From patchwork Wed Sep 7 05:10:36 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 116014 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5BA85A0550; Wed, 7 Sep 2022 07:11:27 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 321D041141; Wed, 7 Sep 2022 07:11:24 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 68E5940697 for ; Wed, 7 Sep 2022 07:11:22 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662527482; x=1694063482; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=1+bUTQLF+ydHT2LtP/eG5D4FdY8aT16l9Te5N1BQe4M=; b=RO4Qj3vGCp1Rl2/poFNepIN0IV+Hq9z0Z+Arfcsad/NBqSsOzvrAjx2q T0Vm6SfB60QlbWJtvjMFEh38/kVw5xDtILdTTMjbK9O0J+b9DYB5dchTe 6YvV6mbmvLWmnuAvttUe8HcP735mZ1DmDybTEmqmEvNT4XdEkNsQqCt7L T4rS7jgLfc/vLibjtZlMoMRG3OVpwv7ttjHo51ZLJQVFhrM1xifxwKmak BtW4ntWs+V1RtLDs7yVQ7G35teJMvZF11tPpWRPyTFgT9vjg9peh5a7Ap HJe57AJiV2Df5kWfZ/nwLTAG5M1zhBMkmLB8o09drcZWfVotTAYvceXhJ A==; X-IronPort-AV: E=McAfee;i="6500,9779,10462"; a="283779067" X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="283779067" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:21 -0700 X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="676012992" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:19 -0700 From: Jie Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, stevex.yang@intel.com, Jie Wang Subject: [PATCH v5 1/5] common/iavf: support flow subscription Date: Wed, 7 Sep 2022 13:10:36 +0800 Message-Id: <20220907051040.119588-2-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com> References: <20220809062122.1203281-1-jie1x.wang@intel.com> <20220907051040.119588-1-jie1x.wang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE. PF is expected to offload a rule to hardware which will redirect the packet that matching the required pattern to this VF. Only a flow with dst mac address as PF's mac address can be subscribed. VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability negotiation and only a trusted VF can be granted with this capability. A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE. Signed-off-by: Jie Wang Signed-off-by: Qi Zhang --- drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++-- 1 file changed, 100 insertions(+), 4 deletions(-) diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index f123daec8e..e02eec4935 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -168,6 +168,8 @@ enum virtchnl_ops { VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, VIRTCHNL_OP_CONFIG_QUANTA = 113, + VIRTCHNL_OP_FLOW_SUBSCRIBE = 114, + VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115, VIRTCHNL_OP_MAX, }; @@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) return "VIRTCHNL_OP_1588_PTP_GET_CAPS"; case VIRTCHNL_OP_1588_PTP_GET_TIME: return "VIRTCHNL_OP_1588_PTP_GET_TIME"; + case VIRTCHNL_OP_FLOW_SUBSCRIBE: + return "VIRTCHNL_OP_FLOW_SUBSCRIBE"; + case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: + return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE"; case VIRTCHNL_OP_MAX: return "VIRTCHNL_OP_MAX"; default: @@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO BIT(8) #define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9) #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) +#define VIRTCHNL_VF_OFFLOAD_FSUB_PF BIT(14) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) @@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states { }; #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 #define PROTO_HDR_SHIFT 5 #define PROTO_HDR_FIELD_START(proto_hdr_type) \ @@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr { VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); +struct virtchnl_proto_hdr_w_msk { + /* see enum virtchnl_proto_hdr_type */ + s32 type; + u32 pad; + /** + * binary buffer in network order for specific header type. + * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 + * header is expected to be copied into the buffer. + */ + u8 buffer_spec[64]; + /* binary buffer for bit-mask applied to specific header type */ + u8 buffer_mask[64]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk); + struct virtchnl_proto_hdrs { u8 tunnel_level; /** @@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs { */ int count; /** - * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS - * must be 0 for a raw packet request. + * count must <= + * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK + * count = 0 : select raw + * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr + * count > VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr_w_msk + * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS */ union { - struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + struct virtchnl_proto_hdr + proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + struct virtchnl_proto_hdr_w_msk + proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK]; struct { u16 pkt_len; u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; @@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg { VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); -/* action configuration for FDIR */ +/* action configuration for FDIR and FSUB */ struct virtchnl_filter_action { /* see enum virtchnl_action type */ s32 type; @@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +/* Status returned to VF after VF requests FSUB commands + * VIRTCHNL_FSUB_SUCCESS + * VF FLOW related request is successfully done by PF + * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE. + * + * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE + * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource. + * + * VIRTCHNL_FSUB_FAILURE_RULE_EXIST + * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed. + * + * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST + * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist. + * + * VIRTCHNL_FSUB_FAILURE_RULE_INVALID + * OP_FLOW_SUBSCRIBE request is failed due to parameters validation + * or HW doesn't support. + */ +enum virtchnl_fsub_prgm_status { + VIRTCHNL_FSUB_SUCCESS = 0, + VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE, + VIRTCHNL_FSUB_FAILURE_RULE_EXIST, + VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST, + VIRTCHNL_FSUB_FAILURE_RULE_INVALID, +}; + +/* VIRTCHNL_OP_FLOW_SUBSCRIBE + * VF sends this request to PF by filling out vsi_id, + * validate_only, priority, proto_hdrs and actions. + * PF will return flow_id + * if the request is successfully done and return status to VF. + */ +struct virtchnl_flow_sub { + u16 vsi_id; /* INPUT */ + u8 validate_only; /* INPUT */ + u8 priority; /* INPUT */ + u32 flow_id; /* OUTPUT */ + struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */ + struct virtchnl_filter_action_set actions; /* INPUT */ + /* see enum virtchnl_fsub_prgm_status; OUTPUT */ + s32 status; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub); + +/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE + * VF sends this request to PF by filling out vsi_id + * and flow_id. PF will return status to VF. + */ +struct virtchnl_flow_unsub { + u16 vsi_id; /* INPUT */ + u16 pad; + u32 flow_id; /* INPUT */ + /* see enum virtchnl_fsub_prgm_status; OUTPUT */ + s32 status; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub); + /* VIRTCHNL_OP_GET_QOS_CAPS * VF sends this message to get its QoS Caps, such as * TC number, Arbiter and Bandwidth. @@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DEL_FDIR_FILTER: valid_len = sizeof(struct virtchnl_fdir_del); break; + case VIRTCHNL_OP_FLOW_SUBSCRIBE: + valid_len = sizeof(struct virtchnl_flow_sub); + break; + case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: + valid_len = sizeof(struct virtchnl_flow_unsub); + break; case VIRTCHNL_OP_GET_QOS_CAPS: break; case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP: From patchwork Wed Sep 7 05:10:37 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 116015 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 67BECA0550; Wed, 7 Sep 2022 07:11:34 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6BD61427F1; Wed, 7 Sep 2022 07:11:27 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id C3E4A427EB for ; Wed, 7 Sep 2022 07:11:24 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662527485; x=1694063485; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=gVoyauOnQLCXr0vfeC8DaAoxRZdXUuz8GSxEikAtako=; b=fSeOhWLHj4Y/JmJIDCQKoA8qSTmR0AMOWERFX8t+4R5Q3PdfGa7xOx7t S3fR6nh2VWw0Xtp02RLBUU4cp7Had/tZ6qpxfD7J83Ux72CTjirvPnkw2 XHcx+yKsLHtV1fSHTMfScCQpXhs11wJyeZD7j4/gFbdcOEaO47HwkfvB7 u+Dgy7e4K9rdpPKMJRBxT207PBJfKDLWUY5kaHPZ/VQ7IrIdi9FIxYEYX eLhqggX97K6RhFSOW49GmFKPxxpcRs19cJb3zkW/46H+fGXmoKt+i+M3O MlmMgoOTX76IJHdYgk8NpUSS6p1lkaweiEapkUZY9kElfJJrCr13aiNhF Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10462"; a="283779080" X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="283779080" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:24 -0700 X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="676013001" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:22 -0700 From: Jie Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, stevex.yang@intel.com, Jie Wang Subject: [PATCH v5 2/5] net/iavf: add flow subscription to AVF Date: Wed, 7 Sep 2022 13:10:37 +0800 Message-Id: <20220907051040.119588-3-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com> References: <20220809062122.1203281-1-jie1x.wang@intel.com> <20220907051040.119588-1-jie1x.wang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add the skeletal code of flow subscription to AVF driver. Signed-off-by: Jie Wang --- doc/guides/rel_notes/release_22_11.rst | 4 + drivers/net/iavf/iavf_fsub.c | 112 +++++++++++++++++++++++++ drivers/net/iavf/iavf_generic_flow.c | 17 +++- drivers/net/iavf/iavf_generic_flow.h | 1 + drivers/net/iavf/iavf_vchnl.c | 1 + drivers/net/iavf/meson.build | 1 + 6 files changed, 135 insertions(+), 1 deletion(-) create mode 100644 drivers/net/iavf/iavf_fsub.c diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst index 8c021cf050..bb77a03e24 100644 --- a/doc/guides/rel_notes/release_22_11.rst +++ b/doc/guides/rel_notes/release_22_11.rst @@ -55,6 +55,10 @@ New Features Also, make sure to start the actual text at the margin. ======================================================= +* **Updated Intel iavf driver.** + + * Added flow subscription support. + Removed Items ------------- diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c new file mode 100644 index 0000000000..17f9bb2976 --- /dev/null +++ b/drivers/net/iavf/iavf_fsub.c @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iavf_generic_flow.h" + + +static struct iavf_flow_parser iavf_fsub_parser; + +static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {}; + +static int +iavf_fsub_create(__rte_unused struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, + __rte_unused void *meta, + __rte_unused struct rte_flow_error *error) +{ + return -rte_errno; +} + +static int +iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, + __rte_unused struct rte_flow_error *error) +{ + return -rte_errno; +} + +static int +iavf_fsub_validation(__rte_unused struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, + __rte_unused void *meta, + __rte_unused struct rte_flow_error *error) +{ + return -rte_errno; +}; + +static int +iavf_fsub_parse(__rte_unused struct iavf_adapter *ad, + __rte_unused struct iavf_pattern_match_item *array, + __rte_unused uint32_t array_len, + __rte_unused const struct rte_flow_item pattern[], + __rte_unused const struct rte_flow_action actions[], + __rte_unused void **meta, + __rte_unused struct rte_flow_error *error) +{ + return -rte_errno; +} + +static int +iavf_fsub_init(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_parser *parser; + + if (!vf->vf_res) + return -EINVAL; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF) + parser = &iavf_fsub_parser; + else + return -ENOTSUP; + + return iavf_register_parser(parser, ad); +} + +static void +iavf_fsub_uninit(struct iavf_adapter *ad) +{ + iavf_unregister_parser(&iavf_fsub_parser, ad); +} + +static struct +iavf_flow_engine iavf_fsub_engine = { + .init = iavf_fsub_init, + .uninit = iavf_fsub_uninit, + .create = iavf_fsub_create, + .destroy = iavf_fsub_destroy, + .validation = iavf_fsub_validation, + .type = IAVF_FLOW_ENGINE_FSUB, +}; + +static struct +iavf_flow_parser iavf_fsub_parser = { + .engine = &iavf_fsub_engine, + .array = iavf_fsub_pattern_list, + .array_len = RTE_DIM(iavf_fsub_pattern_list), + .parse_pattern_action = iavf_fsub_parse, + .stage = IAVF_FLOW_STAGE_DISTRIBUTOR, +}; + +RTE_INIT(iavf_fsub_engine_init) +{ + iavf_register_flow_engine(&iavf_fsub_engine); +} diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c index e1a611e319..b04614ba6e 100644 --- a/drivers/net/iavf/iavf_generic_flow.c +++ b/drivers/net/iavf/iavf_generic_flow.c @@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser, { struct iavf_parser_list *list = NULL; struct iavf_flow_parser_node *parser_node; + struct iavf_flow_parser_node *existing_node; + void *temp; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0); @@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser, TAILQ_INSERT_TAIL(list, parser_node, node); } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) { list = &vf->dist_parser_list; + RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) { + if (existing_node->parser->engine->type == + IAVF_FLOW_ENGINE_FSUB) { + TAILQ_INSERT_AFTER(list, existing_node, + parser_node, node); + goto DONE; + } + } TAILQ_INSERT_HEAD(list, parser_node, node); } else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) { list = &vf->ipsec_crypto_parser_list; TAILQ_INSERT_HEAD(list, parser_node, node); + } else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) { + list = &vf->dist_parser_list; + TAILQ_INSERT_HEAD(list, parser_node, node); } else { return -EINVAL; } +DONE: return 0; } @@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser, if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) list = &vf->rss_parser_list; - else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) + else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) || + (parser->engine->type == IAVF_FLOW_ENGINE_FSUB)) list = &vf->dist_parser_list; if (list == NULL) diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h index 52eb1caf29..448facffa5 100644 --- a/drivers/net/iavf/iavf_generic_flow.h +++ b/drivers/net/iavf/iavf_generic_flow.h @@ -480,6 +480,7 @@ enum iavf_flow_engine_type { IAVF_FLOW_ENGINE_IPSEC_CRYPTO, IAVF_FLOW_ENGINE_FDIR, IAVF_FLOW_ENGINE_HASH, + IAVF_FLOW_ENGINE_FSUB, IAVF_FLOW_ENGINE_MAX, }; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 21bd1e2193..6d84add423 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_FSUB_PF | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index 2da37de662..6df771f917 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -16,6 +16,7 @@ sources = files( 'iavf_hash.c', 'iavf_tm.c', 'iavf_ipsec_crypto.c', + 'iavf_fsub.c', ) if arch_subdir == 'x86' From patchwork Wed Sep 7 05:10:38 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 116016 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E05C6A0550; Wed, 7 Sep 2022 07:11:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6AC67427F9; Wed, 7 Sep 2022 07:11:30 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id A4A47427F9 for ; Wed, 7 Sep 2022 07:11:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662527487; x=1694063487; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=LlAR2B68nO36JFGwcPmn4toCmTwv0y3SxzEJdU467mA=; b=RXmEg2JLYykU9IKZs4xbMBoi7+uOILFQqWLUZm9ToCtFERfAsqqmOzXS jNHFw0NlCS8fhF+UTCL1AgZDGXznzphCVsgGIfiBMkHfWH3Akx/qnmUYd 1HwTQLiBLfhAErqUH/MYhmnDhV29QTvRIrkpD+3O1bePT1CF/ROMcDyh+ sHzkUES3F1LcBEYKsC2NjRu21ZUe1Ey4XMDFe2LPFTt2UdZ5llIAzsNoB u6rtAwiYSo6K/9VPaCHhATr/4xXzTnxn7htzwEeS/PdyhCt7YYTM6Boxz Ttv8IvSLm4RxUgaiFcvTTl2QEi1Z/S17TSO50I+YoIr+sLe5kEFgBmPcp Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10462"; a="283779084" X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="283779084" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:27 -0700 X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="676013017" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:24 -0700 From: Jie Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, stevex.yang@intel.com, Jie Wang Subject: [PATCH v5 3/5] net/iavf: support flow subscrption pattern Date: Wed, 7 Sep 2022 13:10:38 +0800 Message-Id: <20220907051040.119588-4-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com> References: <20220809062122.1203281-1-jie1x.wang@intel.com> <20220907051040.119588-1-jie1x.wang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add flow subscription pattern support for AVF. The supported patterns are listed below: eth/vlan/ipv4 eth/ipv4(6) eth/ipv4(6)/udp eth/ipv4(6)/tcp Signed-off-by: Jie Wang --- drivers/net/iavf/iavf.h | 7 + drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++- 2 files changed, 597 insertions(+), 8 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 025ab3ff60..f79c7f9f6e 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -148,6 +148,13 @@ struct iavf_fdir_info { struct iavf_fdir_conf conf; }; +struct iavf_fsub_conf { + struct virtchnl_flow_sub sub_fltr; + struct virtchnl_flow_unsub unsub_fltr; + uint64_t input_set; + uint32_t flow_id; +}; + struct iavf_qv_map { uint16_t queue_id; uint16_t vector_id; diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c index 17f9bb2976..66e403d585 100644 --- a/drivers/net/iavf/iavf_fsub.c +++ b/drivers/net/iavf/iavf_fsub.c @@ -22,9 +22,51 @@ #include "iavf_generic_flow.h" +#define MAX_QGRP_NUM_TYPE 7 +#define IAVF_IPV6_ADDR_LENGTH 16 +#define MAX_INPUT_SET_BYTE 32 + +#define IAVF_SW_INSET_ETHER ( \ + IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE) +#define IAVF_SW_INSET_MAC_IPV4 ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \ + IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS) +#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \ + IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER) +#define IAVF_SW_INSET_MAC_IPV4_TCP ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \ + IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \ + IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT) +#define IAVF_SW_INSET_MAC_IPV4_UDP ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \ + IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \ + IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT) +#define IAVF_SW_INSET_MAC_IPV6 ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_IPV6_NEXT_HDR) +#define IAVF_SW_INSET_MAC_IPV6_TCP ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \ + IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \ + IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT) +#define IAVF_SW_INSET_MAC_IPV6_UDP ( \ + IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \ + IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \ + IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT) + static struct iavf_flow_parser iavf_fsub_parser; -static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {}; +static struct +iavf_pattern_match_item iavf_fsub_pattern_list[] = { + {iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE}, + {iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_udp, IAVF_SW_INSET_MAC_IPV4_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_tcp, IAVF_SW_INSET_MAC_IPV4_TCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6, IAVF_SW_INSET_MAC_IPV6, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_udp, IAVF_SW_INSET_MAC_IPV6_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_tcp, IAVF_SW_INSET_MAC_IPV6_TCP, IAVF_INSET_NONE}, +}; static int iavf_fsub_create(__rte_unused struct iavf_adapter *ad, @@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad, }; static int -iavf_fsub_parse(__rte_unused struct iavf_adapter *ad, - __rte_unused struct iavf_pattern_match_item *array, - __rte_unused uint32_t array_len, - __rte_unused const struct rte_flow_item pattern[], - __rte_unused const struct rte_flow_action actions[], - __rte_unused void **meta, - __rte_unused struct rte_flow_error *error) +iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], + const uint64_t input_set_mask, + struct rte_flow_error *error, + struct iavf_fsub_conf *filter) +{ + struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs; + enum rte_flow_item_type item_type; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item *item = pattern; + struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL; + uint64_t outer_input_set = IAVF_INSET_NONE; + uint64_t *input = NULL; + uint16_t input_set_byte = 0; + uint16_t j; + uint32_t layer = 0; + + for (item = pattern; item->type != + RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not support range"); + return false; + } + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + hdr1 = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH); + + if (eth_spec && eth_mask) { + input = &outer_input_set; + + if (!rte_is_zero_ether_addr(ð_mask->dst)) { + *input |= IAVF_INSET_DMAC; + input_set_byte += 6; + } else { + /* flow subscribe filter will add dst mac in kernel */ + input_set_byte += 6; + } + + if (!rte_is_zero_ether_addr(ð_mask->src)) { + *input |= IAVF_INSET_SMAC; + input_set_byte += 6; + } + + if (eth_mask->type) { + *input |= IAVF_INSET_ETHERTYPE; + input_set_byte += 2; + } + + rte_memcpy(hdr1->buffer_spec, eth_spec, + sizeof(struct rte_ether_hdr)); + rte_memcpy(hdr1->buffer_mask, eth_mask, + sizeof(struct rte_ether_hdr)); + } else { + /* flow subscribe filter will add dst mac in kernel */ + input_set_byte += 6; + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + hdr = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (ipv4_spec && ipv4_mask) { + input = &outer_input_set; + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv4 mask."); + return false; + } + + if (ipv4_mask->hdr.src_addr) { + *input |= IAVF_INSET_IPV4_SRC; + input_set_byte += 2; + } + if (ipv4_mask->hdr.dst_addr) { + *input |= IAVF_INSET_IPV4_DST; + input_set_byte += 2; + } + if (ipv4_mask->hdr.time_to_live) { + *input |= IAVF_INSET_IPV4_TTL; + input_set_byte++; + } + if (ipv4_mask->hdr.next_proto_id) { + *input |= IAVF_INSET_IPV4_PROTO; + input_set_byte++; + } + if (ipv4_mask->hdr.type_of_service) { + *input |= IAVF_INSET_IPV4_TOS; + input_set_byte++; + } + + rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr, + sizeof(ipv4_spec->hdr)); + rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr, + sizeof(ipv4_spec->hdr)); + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + hdr = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (ipv6_spec && ipv6_mask) { + input = &outer_input_set; + + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv6 mask"); + return false; + } + + for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j]) { + *input |= IAVF_INSET_IPV6_SRC; + break; + } + } + for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.dst_addr[j]) { + *input |= IAVF_INSET_IPV6_DST; + break; + } + } + + for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) { + if (ipv6_mask->hdr.src_addr[j]) + input_set_byte++; + + if (ipv6_mask->hdr.dst_addr[j]) + input_set_byte++; + } + + if (ipv6_mask->hdr.proto) { + *input |= IAVF_INSET_IPV6_NEXT_HDR; + input_set_byte++; + } + if (ipv6_mask->hdr.hop_limits) { + *input |= IAVF_INSET_IPV6_HOP_LIMIT; + input_set_byte++; + } + if (ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) { + *input |= IAVF_INSET_IPV6_TC; + input_set_byte += 4; + } + + rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr, + sizeof(ipv6_spec->hdr)); + rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr, + sizeof(ipv6_spec->hdr)); + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + hdr = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (udp_spec && udp_mask) { + input = &outer_input_set; + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid UDP mask"); + return false; + } + + if (udp_mask->hdr.src_port) { + *input |= IAVF_INSET_UDP_SRC_PORT; + input_set_byte += 2; + } + if (udp_mask->hdr.dst_port) { + *input |= IAVF_INSET_UDP_DST_PORT; + input_set_byte += 2; + } + + rte_memcpy(hdr->buffer_spec, &udp_spec->hdr, + sizeof(udp_spec->hdr)); + rte_memcpy(hdr->buffer_mask, &udp_mask->hdr, + sizeof(udp_mask->hdr)); + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + hdr = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (tcp_spec && tcp_mask) { + input = &outer_input_set; + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid TCP mask"); + return false; + } + + if (tcp_mask->hdr.src_port) { + *input |= IAVF_INSET_TCP_SRC_PORT; + input_set_byte += 2; + } + if (tcp_mask->hdr.dst_port) { + *input |= IAVF_INSET_TCP_DST_PORT; + input_set_byte += 2; + } + + rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr, + sizeof(tcp_mask->hdr)); + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + + hdr = &hdrs->proto_hdr_w_msk[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN); + + if (vlan_spec && vlan_spec) { + input = &outer_input_set; + + *input |= IAVF_INSET_VLAN_OUTER; + + if (vlan_mask->tci) + input_set_byte += 2; + + if (vlan_mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VLAN input set."); + return false; + } + + rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr, + sizeof(vlan_spec->hdr)); + rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr, + sizeof(vlan_mask->hdr)); + } + + hdrs->count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_VOID: + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, pattern, + "Invalid pattern item."); + return -rte_errno; + } + } + + hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS; + + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, "too much input set"); + return -rte_errno; + } + + if (!outer_input_set || (outer_input_set & ~input_set_mask)) + return -rte_errno; + + return 0; +} + +static int +iavf_fsub_parse_action(struct iavf_adapter *ad, + const struct rte_flow_action *actions, + uint32_t priority, + struct rte_flow_error *error, + struct iavf_fsub_conf *filter) { + const struct rte_flow_action *action; + const struct rte_flow_action_ethdev *act_ethdev; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_rss *act_qgrop; + struct virtchnl_filter_action *filter_action; + uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = { + 2, 4, 8, 16, 32, 64, 128}; + uint16_t i, num = 0, dest_num = 0, vf_num = 0; + uint16_t rule_port_id; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + vf_num++; + filter_action = &filter->sub_fltr.actions.actions[num]; + + act_ethdev = action->conf; + rule_port_id = ad->dev_data->port_id; + if (rule_port_id != act_ethdev->port_id) + goto error1; + + filter->sub_fltr.actions.count = ++num; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + filter_action = &filter->sub_fltr.actions.actions[num]; + + act_q = action->conf; + if (act_q->index >= ad->dev_data->nb_rx_queues) + goto error2; + + filter_action->type = VIRTCHNL_ACTION_QUEUE; + filter_action->act_conf.queue.index = act_q->index; + filter->sub_fltr.actions.count = ++num; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + dest_num++; + filter_action = &filter->sub_fltr.actions.actions[num]; + + act_qgrop = action->conf; + if (act_qgrop->queue_num <= 1) + goto error2; + + filter_action->type = VIRTCHNL_ACTION_Q_REGION; + filter_action->act_conf.queue.index = + act_qgrop->queue[0]; + for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) { + if (act_qgrop->queue_num == + valid_qgrop_number[i]) + break; + } + + if (i == MAX_QGRP_NUM_TYPE) + goto error2; + + if ((act_qgrop->queue[0] + act_qgrop->queue_num) > + ad->dev_data->nb_rx_queues) + goto error3; + + for (i = 0; i < act_qgrop->queue_num - 1; i++) + if (act_qgrop->queue[i + 1] != + act_qgrop->queue[i] + 1) + goto error4; + + filter_action->act_conf.queue.region = act_qgrop->queue_num; + filter->sub_fltr.actions.count = ++num; + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "Invalid action type"); + return -rte_errno; + } + } + + /* 0 denotes lowest priority of recipe and highest priority + * of rte_flow. Change rte_flow priority into recipe priority. + */ + filter->sub_fltr.priority = priority; + + if (num > VIRTCHNL_MAX_NUM_ACTIONS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Action numbers exceed the maximum value"); + return -rte_errno; + } + + if (vf_num == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action, vf action must be added"); + return -rte_errno; + } + + if (dest_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Unsupported action combination"); + return -rte_errno; + } + + return 0; + +error1: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid ethdev_port_id"); + return -rte_errno; + +error2: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action type or queue number"); + return -rte_errno; + +error3: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid queue region indexes"); + return -rte_errno; + +error4: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Discontinuous queue region"); return -rte_errno; } +static int +iavf_fsub_check_action(const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + uint16_t actions_num = 0; + bool vf_valid = false; + bool queue_valid = false; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + vf_valid = true; + actions_num++; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue_valid = true; + actions_num++; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + actions_num++; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "Invalid action type"); + return -rte_errno; + } + } + + if (!((actions_num == 1 && !queue_valid) || + (actions_num == 2 && vf_valid && queue_valid))) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "Invalid action number"); + return -rte_errno; + } + + return 0; +} + +static int +iavf_fsub_parse(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct iavf_fsub_conf *filter; + struct iavf_pattern_match_item *pattern_match_item = NULL; + int ret = 0; + uint32_t priority = 0; + + filter = rte_zmalloc(NULL, sizeof(*filter), 0); + if (!filter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "No memory for iavf_fsub_conf_ptr"); + goto error; + } + + /* search flow subscribe pattern */ + pattern_match_item = iavf_search_pattern_match_item(pattern, array, + array_len, error); + if (!pattern_match_item) + return -rte_errno; + + /* parse flow subscribe pattern */ + ret = iavf_fsub_parse_pattern(pattern, + pattern_match_item->input_set_mask, + error, filter); + if (ret) + goto error; + + /* check flow subscribe pattern action */ + ret = iavf_fsub_check_action(actions, error); + if (ret) + goto error; + + /* parse flow subscribe pattern action */ + ret = iavf_fsub_parse_action((void *)ad, actions, priority, + error, filter); + if (ret) + goto error; + + if (meta) + *meta = filter; + +error: + rte_free(pattern_match_item); + return ret; +} + static int iavf_fsub_init(struct iavf_adapter *ad) { From patchwork Wed Sep 7 05:10:39 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 116017 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C2C56A0550; Wed, 7 Sep 2022 07:11:45 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5FC7A42836; Wed, 7 Sep 2022 07:11:31 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id CAE7440F19 for ; Wed, 7 Sep 2022 07:11:29 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662527490; x=1694063490; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=uZZPixTAgVSMVXRBaRLoXo1SLC8cd0pAEnWSgHKg3yM=; b=nNOQbaL2X+akFuZW39kZTMwW8yojJO82mp7EbSZELTWK+QN8w7eaky3u oyjEHvePl272aP1Nin4sPVSpXfSsElQuUWyQOYtPsHmNzUTDkW5eDWoOU AOGUAlAtsD31A03LjcXzEFUl2rDdoC6hfqwiiXaxdVY8lbZ8pB/evBC+C wUTTbCEnFHQeswweNXKJRlPJaTU3949DhMRMfr55rpUGbK9sMrNgZY4fe GAfczqbjwGD1JPCRMCpX+c9+GpB25NAdbvN1JOly4KOMZ9FWlbk9kGDQn OUn8nXyNDKmCMiT3GuY0DWYfxgO94FHMkFq1WW6dew7VDyRp3rbbetJFL A==; X-IronPort-AV: E=McAfee;i="6500,9779,10462"; a="283779094" X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="283779094" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:29 -0700 X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="676013031" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:27 -0700 From: Jie Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, stevex.yang@intel.com, Jie Wang Subject: [PATCH v5 4/5] net/iavf: support flow subscription rule Date: Wed, 7 Sep 2022 13:10:39 +0800 Message-Id: <20220907051040.119588-5-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com> References: <20220809062122.1203281-1-jie1x.wang@intel.com> <20220907051040.119588-1-jie1x.wang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Support flow subscribption create/destroy/validation flow rule for AVF. For examples: testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11 / end actions represented_port port_id 1 / end testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22 / end actions represented_port port_id 1 / end testpmd> flow destroy 1 rule 0 VF subscribes to a rule, which means the packets will be sent to VF instead of PF, and only VF will receive the packets. It is allowed multiple VF subscribe to same rule, the packets will be replicated and received by each VF. PF will destroy all subscriptions during VF reset. Signed-off-by: Jie Wang --- drivers/net/iavf/iavf.h | 6 ++ drivers/net/iavf/iavf_fsub.c | 75 +++++++++++++++---- drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+), 12 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index f79c7f9f6e..26b858f6f0 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter, extern const struct rte_tm_ops iavf_tm_ops; int iavf_get_ptp_cap(struct iavf_adapter *adapter); int iavf_get_phc_time(struct iavf_rx_queue *rxq); +int iavf_flow_sub(struct iavf_adapter *adapter, + struct iavf_fsub_conf *filter); +int iavf_flow_unsub(struct iavf_adapter *adapter, + struct iavf_fsub_conf *filter); +int iavf_flow_sub_check(struct iavf_adapter *adapter, + struct iavf_fsub_conf *filter); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c index 66e403d585..28857d7577 100644 --- a/drivers/net/iavf/iavf_fsub.c +++ b/drivers/net/iavf/iavf_fsub.c @@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = { }; static int -iavf_fsub_create(__rte_unused struct iavf_adapter *ad, - __rte_unused struct rte_flow *flow, - __rte_unused void *meta, - __rte_unused struct rte_flow_error *error) +iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow, + void *meta, struct rte_flow_error *error) { + struct iavf_fsub_conf *filter = meta; + struct iavf_fsub_conf *rule; + int ret; + + rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0); + if (!rule) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory for fsub rule"); + return -rte_errno; + } + + ret = iavf_flow_sub(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to subscribe flow rule."); + goto free_entry; + } + + rte_memcpy(rule, filter, sizeof(*rule)); + flow->rule = rule; + + return ret; + +free_entry: + rte_free(rule); return -rte_errno; } static int -iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad, - __rte_unused struct rte_flow *flow, - __rte_unused struct rte_flow_error *error) +iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow, + struct rte_flow_error *error) { - return -rte_errno; + struct iavf_fsub_conf *filter; + int ret; + + filter = (struct iavf_fsub_conf *)flow->rule; + + ret = iavf_flow_unsub(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to unsubscribe flow rule."); + return -rte_errno; + } + + flow->rule = NULL; + rte_free(filter); + + return ret; } static int -iavf_fsub_validation(__rte_unused struct iavf_adapter *ad, +iavf_fsub_validation(struct iavf_adapter *ad, __rte_unused struct rte_flow *flow, - __rte_unused void *meta, - __rte_unused struct rte_flow_error *error) + void *meta, + struct rte_flow_error *error) { - return -rte_errno; + struct iavf_fsub_conf *filter = meta; + int ret; + + ret = iavf_flow_sub_check(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to validate filter rule."); + return -rte_errno; + } + + return ret; }; static int diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 6d84add423..cc0db8d093 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter, return 0; } +int +iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_flow_sub *fsub_cfg; + struct iavf_cmd_info args; + int err; + + filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->sub_fltr.validate_only = 0; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE; + args.in_args = (uint8_t *)(&filter->sub_fltr); + args.in_args_size = sizeof(*(&filter->sub_fltr)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "OP_FLOW_SUBSCRIBE"); + + fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer; + filter->flow_id = fsub_cfg->flow_id; + + if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) { + PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF"); + } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) { + PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw " + "resource"); + err = -1; + } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) { + PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule " + "is already existed"); + err = -1; + } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) { + PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw " + "doesn't support"); + err = -1; + } else { + PMD_DRV_LOG(ERR, "Failed to add rule request due to other " + "reasons"); + err = -1; + } + + return err; +} + +int +iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_flow_unsub *unsub_cfg; + struct iavf_cmd_info args; + int err; + + filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->unsub_fltr.flow_id = filter->flow_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE; + args.in_args = (uint8_t *)(&filter->unsub_fltr); + args.in_args_size = sizeof(filter->unsub_fltr); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of " + "OP_FLOW_UNSUBSCRIBE"); + + unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer; + + if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) { + PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF"); + } else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) { + PMD_DRV_LOG(ERR, "Failed to delete rule request due to this " + "rule doesn't exist"); + err = -1; + } else { + PMD_DRV_LOG(ERR, "Failed to delete rule request due to other " + "reasons"); + err = -1; + } + + return err; +} + +int +iavf_flow_sub_check(struct iavf_adapter *adapter, + struct iavf_fsub_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_flow_sub *fsub_cfg; + + struct iavf_cmd_info args; + int err; + + filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id; + filter->sub_fltr.validate_only = 1; + + args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE; + args.in_args = (uint8_t *)(&filter->sub_fltr); + args.in_args_size = sizeof(*(&filter->sub_fltr)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to check flow director rule"); + return err; + } + + fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer; + + if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) { + PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF"); + } else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) { + PMD_DRV_LOG(ERR, "Failed to check rule request due to " + "parameters validation or HW doesn't " + "support"); + err = -1; + } else { + PMD_DRV_LOG(ERR, "Failed to check rule request due to other " + "reasons"); + err = -1; + } + + return err; +} + int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, struct virtchnl_rss_cfg *rss_cfg, bool add) From patchwork Wed Sep 7 05:10:40 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jie Wang X-Patchwork-Id: 116018 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id ED492A0550; Wed, 7 Sep 2022 07:11:52 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9AA164282F; Wed, 7 Sep 2022 07:11:33 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id A5C3642B74 for ; Wed, 7 Sep 2022 07:11:32 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662527492; x=1694063492; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9fudRMuJLVp3OBIbdyBnNkGBRD9EubM7MZpO6V/E29A=; b=h1wE9SfbJYTazAj+I7KhbZ7FC4PNII64PoaHH7yEmR7fEUfldzBMYWS0 jw+3BSsZ/1uYNqKhbT4zhHd1AnCBtd7s+dknq+84MlYTcq/2Fal+a3Qzl NW8ZnbtTED9rP+p5xTwd8Hf76yuwTfKziutxTJ/dyaq0HyTDvgvRnjSu4 JM4a+EZqei+p2NFOFIknivloioxqp8dBneRhUjI7Alz8F3OwfdMH/SNVo Moc5Y2d1V7iR7mhColV4ayev+IcPRWfvDEF+++NUG6wP4Tu7bVA/P3BZz BR51d6uQMQVCYWd0rW7lQlEpKh/4BkLeLhjRkBCfbqoVMUodSDDztsC0b A==; X-IronPort-AV: E=McAfee;i="6500,9779,10462"; a="283779101" X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="283779101" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:31 -0700 X-IronPort-AV: E=Sophos;i="5.93,295,1654585200"; d="scan'208";a="676013040" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Sep 2022 22:11:29 -0700 From: Jie Wang To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, stevex.yang@intel.com, Jie Wang Subject: [PATCH v5 5/5] net/iavf: support priority of flow rule Date: Wed, 7 Sep 2022 13:10:40 +0800 Message-Id: <20220907051040.119588-6-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com> References: <20220809062122.1203281-1-jie1x.wang@intel.com> <20220907051040.119588-1-jie1x.wang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add flow rule attribute "priority" support for AVF. Lower values denote higher priority, the highest priority for a flow rule is 0. All subscription rule will have a lower priority than the rules that be created by host. Signed-off-by: Jie Wang --- drivers/net/iavf/iavf_fdir.c | 4 ++++ drivers/net/iavf/iavf_fsub.c | 2 +- drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++---------- drivers/net/iavf/iavf_generic_flow.h | 1 + drivers/net/iavf/iavf_hash.c | 5 +++++ drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------ 6 files changed, 34 insertions(+), 17 deletions(-) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index a397047fdb..8f80873925 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -1583,6 +1583,7 @@ iavf_fdir_parse(struct iavf_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error) { @@ -1593,6 +1594,9 @@ iavf_fdir_parse(struct iavf_adapter *ad, memset(filter, 0, sizeof(*filter)); + if (priority >= 1) + return -rte_errno; + item = iavf_search_pattern_match_item(pattern, array, array_len, error); if (!item) return -rte_errno; diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c index 28857d7577..3bb6c30d3c 100644 --- a/drivers/net/iavf/iavf_fsub.c +++ b/drivers/net/iavf/iavf_fsub.c @@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error) { struct iavf_fsub_conf *filter; struct iavf_pattern_match_item *pattern_match_item = NULL; int ret = 0; - uint32_t priority = 0; filter = rte_zmalloc(NULL, sizeof(*filter), 0); if (!filter) { diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c index b04614ba6e..f33c764764 100644 --- a/drivers/net/iavf/iavf_generic_flow.c +++ b/drivers/net/iavf/iavf_generic_flow.c @@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = { typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad, struct rte_flow *flow, struct iavf_parser_list *parser_list, + uint32_t priority, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error); @@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr, return -rte_errno; } - /* Not supported */ - if (attr->priority) { + /* support priority for flow subscribe */ + if (attr->priority > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "Not support priority."); + attr, "Only support priority 0 and 1."); return -rte_errno; } @@ -2098,6 +2099,7 @@ static struct iavf_flow_engine * iavf_parse_engine_create(struct iavf_adapter *ad, struct rte_flow *flow, struct iavf_parser_list *parser_list, + uint32_t priority, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) @@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad, if (parser_node->parser->parse_pattern_action(ad, parser_node->parser->array, parser_node->parser->array_len, - pattern, actions, &meta, error) < 0) + pattern, actions, priority, &meta, error) < 0) continue; engine = parser_node->parser->engine; @@ -2127,6 +2129,7 @@ static struct iavf_flow_engine * iavf_parse_engine_validate(struct iavf_adapter *ad, struct rte_flow *flow, struct iavf_parser_list *parser_list, + uint32_t priority, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error) @@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad, if (parser_node->parser->parse_pattern_action(ad, parser_node->parser->array, parser_node->parser->array_len, - pattern, actions, &meta, error) < 0) + pattern, actions, priority, &meta, error) < 0) continue; engine = parser_node->parser->engine; @@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev, if (ret) return ret; - *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern, - actions, error); + *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, + attr->priority, pattern, actions, error); if (*engine) return 0; - *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern, - actions, error); + *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, + attr->priority, pattern, actions, error); if (*engine) return 0; *engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list, - pattern, actions, error); + attr->priority, pattern, actions, error); if (*engine) return 0; diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h index 448facffa5..60d8ab02b4 100644 --- a/drivers/net/iavf/iavf_generic_flow.h +++ b/drivers/net/iavf/iavf_generic_flow.h @@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error); diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index 42df7c4e48..dea4e0aa0a 100644 --- a/drivers/net/iavf/iavf_hash.c +++ b/drivers/net/iavf/iavf_hash.c @@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error); @@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error) { @@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad, uint64_t phint = IAVF_PHINT_NONE; int ret = 0; + if (priority >= 1) + return -rte_errno; + rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0); if (!rss_meta_ptr) { rte_flow_error_set(error, EINVAL, diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c index 1d465b4419..cec1b968fe 100644 --- a/drivers/net/iavf/iavf_ipsec_crypto.c +++ b/drivers/net/iavf/iavf_ipsec_crypto.c @@ -1933,16 +1933,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = { static int iavf_ipsec_flow_parse(struct iavf_adapter *ad, - struct iavf_pattern_match_item *array, - uint32_t array_len, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - void **meta, - struct rte_flow_error *error) + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + uint32_t priority, + void **meta, + struct rte_flow_error *error) { struct iavf_pattern_match_item *item = NULL; int ret = -1; + if (priority >= 1) + return -rte_errno; + item = iavf_search_pattern_match_item(pattern, array, array_len, error); if (item && item->meta) { uint32_t type = (uint64_t)(item->meta);