From patchwork Tue Sep 29 12:01:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wei Hu (Xavier)" X-Patchwork-Id: 79189 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 15C75A04C0; Tue, 29 Sep 2020 14:02:20 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9DA801DA5D; Tue, 29 Sep 2020 14:01:32 +0200 (CEST) Received: from incedge.chinasoftinc.com (unknown [114.113.233.8]) by dpdk.org (Postfix) with ESMTP id 9AD1E1BE86 for ; Tue, 29 Sep 2020 14:01:26 +0200 (CEST) X-ASG-Debug-ID: 1601380885-149d11049b2967b0001-TfluYd Received: from mail.chinasoftinc.com (inccas001.ito.icss [10.168.0.51]) by incedge.chinasoftinc.com with ESMTP id 1zGaIDE98o64m0p2 (version=TLSv1 cipher=ECDHE-RSA-AES256-SHA bits=256 verify=NO); Tue, 29 Sep 2020 20:01:25 +0800 (CST) X-Barracuda-Envelope-From: huwei013@chinasoftinc.com X-Barracuda-RBL-Trusted-Forwarder: 10.168.0.51 X-ASG-Whitelist: Client Received: from localhost.localdomain (120.133.139.157) by INCCAS001.ito.icss (10.168.0.60) with Microsoft SMTP Server id 14.3.487.0; Tue, 29 Sep 2020 20:01:24 +0800 From: "Wei Hu (Xavier)" X-Barracuda-RBL-Trusted-Forwarder: 10.168.0.60 To: CC: Date: Tue, 29 Sep 2020 20:01:12 +0800 X-ASG-Orig-Subj: [PATCH v2 4/9] net/hns3: set suitable type when initial flow error struct Message-ID: <20200929120117.50394-5-huwei013@chinasoftinc.com> X-Mailer: git-send-email 2.9.5 In-Reply-To: <20200929120117.50394-1-huwei013@chinasoftinc.com> References: <20200929110945.70761-1-huwei013@chinasoftinc.com> <20200929120117.50394-1-huwei013@chinasoftinc.com> MIME-Version: 1.0 X-Originating-IP: [120.133.139.157] X-Barracuda-Connect: inccas001.ito.icss[10.168.0.51] X-Barracuda-Start-Time: 1601380885 X-Barracuda-Encrypted: ECDHE-RSA-AES256-SHA X-Barracuda-URL: https://incspam.chinasofti.com:443/cgi-mod/mark.cgi X-Virus-Scanned: by bsmtpd at chinasoftinc.com X-Barracuda-Scan-Msg-Size: 8605 Subject: [dpdk-dev] [PATCH v2 4/9] net/hns3: set suitable type when initial flow error struct X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "Wei Hu (Xavier)" The API of rte_flow_error_set is used to pass detail error information to caller, this patch sets suitable type when calling rte_flow_error_set API. Fixes: fcba820d9b9e ("net/hns3: support flow director") Fixes: c37ca66f2b27 ("net/hns3: support RSS") Cc: stable@dpdk.org Signed-off-by: Chengwen Feng Signed-off-by: Wei Hu (Xavier) --- drivers/net/hns3/hns3_flow.c | 54 ++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index a6676d6..4fb129e 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -168,9 +168,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, if (cnt) { if (!cnt->shared || cnt->shared != shared) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - cnt, - "Counter id is used,shared flag not match"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, shared flag not match"); cnt->ref_cnt++; return 0; } @@ -178,7 +178,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION, cnt, + RTE_FLOW_ERROR_TYPE_HANDLE, cnt, "Alloc mem for counter failed"); cnt->id = id; cnt->shared = shared; @@ -206,13 +206,13 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, cnt = hns3_counter_lookup(dev, flow->counter_id); if (cnt == NULL) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Can't find counter id"); ret = hns3_get_count(&hns->hw, flow->counter_id, &value); if (ret) { rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Read counter fail."); return ret; } @@ -374,9 +374,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, (const struct rte_flow_action_mark *)actions->conf; if (mark->id >= HNS3_MAX_FILTER_ID) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid Mark ID"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid Mark ID"); rule->fd_id = mark->id; rule->flags |= HNS3_RULE_FLAG_FDID; break; @@ -390,9 +390,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; if (act_count->id >= counter_num) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid counter id"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid counter id"); rule->act_cnt = *act_count; rule->flags |= HNS3_RULE_FLAG_COUNTER; break; @@ -556,7 +556,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, ipv4_mask->hdr.time_to_live || ipv4_mask->hdr.hdr_checksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,tos,proto in IPV4"); } @@ -621,7 +621,7 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,proto in IPV6"); } @@ -681,7 +681,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in TCP"); } @@ -728,7 +728,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in UDP"); } @@ -775,7 +775,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, sctp_mask = item->mask; if (sctp_mask->hdr.cksum) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in SCTP"); @@ -920,14 +920,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (vxlan_mask->flags) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Flags is not supported in VxLAN"); /* VNI must be totally masked or not. */ if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in VxLAN"); if (vxlan_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -971,14 +971,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in NVGRE"); /* TNI must be totally masked or not. */ if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "TNI must be totally masked or not in NVGRE"); if (nvgre_mask->tni[0]) { @@ -1025,13 +1025,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in GENEVE"); /* VNI must be totally masked or not. */ if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in GENEVE"); if (geneve_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -1062,7 +1062,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported tunnel type!"); } if (ret) @@ -1116,7 +1116,7 @@ hns3_parse_normal(const struct rte_flow_item *item, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported normal type!"); } @@ -1132,7 +1132,7 @@ hns3_validate_item(const struct rte_flow_item *item, if (item->last) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, "Not supported last point for range"); for (i = 0; i < step_mngr.count; i++) { @@ -1218,7 +1218,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "fdir_conf.mode isn't perfect"); step_mngr.items = first_items;