From patchwork Wed Apr 13 16:09:00 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109635 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 25C97A0508; Wed, 13 Apr 2022 10:10:46 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 278D3427F6; Wed, 13 Apr 2022 10:10:43 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 19B614068B for ; Wed, 13 Apr 2022 10:10:40 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837441; x=1681373441; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=J4Wo64ItQBH4N6CEJHCXCjbSj6gRI5z9jkTbPzFPbys=; b=GbQf6PlS2IWkZa0nExl8bqnef7BeXzCctHNnsLapE7gN90KY/Hu2si1t 7MHusmIX/1F2YbJfmXAWQ3uH3COoDTj0bTkbbzkYRN4PRx2EMhMFjHrbt 8RK57+8enbri2f9cgDEOKxh+JwrBya2/4eWtC+/u22yWcSInMDW6BMEzR Kqg3ohbIwX0nO1EA283UKbxnNvpRbUF8hsTKVEmaUubXloud0D+SNaIds 8ii9Z4fjMOxebVUyvCQwGtqLFcBZqUzkHko7gvqmGLP9Buy3wIjg+jv3Q o5faaeMugTtp3i9bKOIoO+8eMz9A+vfCeTLPaiLYJ+i0ahbvj4A5s890I A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189017" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189017" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:40 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847464" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:38 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 01/33] net/ice: enable RSS RETA ops for DCF hardware Date: Wed, 13 Apr 2022 16:09:00 +0000 Message-Id: <20220413160932.2074781-2-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang RSS RETA should be updated and queried by application, Add related ops ('.reta_update', '.reta_query') for DCF. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 2 +- drivers/net/ice/ice_dcf.h | 1 + drivers/net/ice/ice_dcf_ethdev.c | 77 ++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 7f0c074b01..070d1b71ac 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -790,7 +790,7 @@ ice_dcf_configure_rss_key(struct ice_dcf_hw *hw) return err; } -static int +int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw) { struct virtchnl_rss_lut *rss_lut; diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 6ec766ebda..b2c6aa2684 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -122,6 +122,7 @@ int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); +int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 59610e058f..1ac66ed990 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -761,6 +761,81 @@ ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev, return 0; } +static int +ice_dcf_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint8_t *lut; + uint16_t i, idx, shift; + int ret; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + if (reta_size != hw->vf_res->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, hw->vf_res->rss_lut_size); + return -EINVAL; + } + + lut = rte_zmalloc("rss_lut", reta_size, 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + /* store the old lut table temporarily */ + rte_memcpy(lut, hw->rss_lut, reta_size); + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + + rte_memcpy(hw->rss_lut, lut, reta_size); + /* send virtchnnl ops to configure rss*/ + ret = ice_dcf_configure_rss_lut(hw); + if (ret) /* revert back */ + rte_memcpy(hw->rss_lut, lut, reta_size); + rte_free(lut); + + return ret; +} + +static int +ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint16_t i, idx, shift; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + if (reta_size != hw->vf_res->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, hw->vf_res->rss_lut_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = hw->rss_lut[i]; + } + + return 0; +} + #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4) #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6) #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t) @@ -1107,6 +1182,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, .tm_ops_get = ice_dcf_tm_ops_get, + .reta_update = ice_dcf_dev_rss_reta_update, + .reta_query = ice_dcf_dev_rss_reta_query, }; static int From patchwork Wed Apr 13 16:09:01 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109636 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 89367A0508; Wed, 13 Apr 2022 10:10:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 210BA410E1; Wed, 13 Apr 2022 10:10:47 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 8D2D740694 for ; Wed, 13 Apr 2022 10:10:45 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837445; x=1681373445; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=mrR24+g49ndVilfIC2X4RY0fJTgOXLiccIJAdDUJoA8=; b=NIuKG+CKT10n8Sk4JxJcryBRv45G2eSqIQ8ETLo0k94yx/RYuXavqc7+ I0DRPLy2/1WtiA2RVUp0kNZXdeT+QVS4yhOPJaIw8hqP15LA+NWsjE7eV DCjPruNu1xy+0UAcNPkSjLjV4HG0vJhsZooSS28Fc6aDr0EYRs1/fuk63 USFtOR1j+laxP5LWH7iBZMutQLDkuS1K9nXogpDClraZlxKz80FL1MZiM ejugMSOSQ+tV4CHnhc9YCucC8nvPtHHz0B4b01HCjQBfRqqACodkb6XJV AduxE8MYr0r0Xdvr04Jm06pWOl30m8KvWXvxbwf8t0/I5yAl2lBy+8uW9 Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189030" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189030" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:44 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847491" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:40 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 02/33] net/ice: enable RSS HASH ops for DCF hardware Date: Wed, 13 Apr 2022 16:09:01 +0000 Message-Id: <20220413160932.2074781-3-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang RSS HASH should be updated and queried by application, Add related ops ('.rss_hash_update', '.rss_hash_conf_get') for DCF. Because DCF doesn't support configure RSS HASH, only HASH key can be updated within ops '.rss_hash_update'. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 2 +- drivers/net/ice/ice_dcf.h | 1 + drivers/net/ice/ice_dcf_ethdev.c | 51 ++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 070d1b71ac..89c0203ba3 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -758,7 +758,7 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) hw->ets_config = NULL; } -static int +int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw) { struct virtchnl_rss_key *rss_key; diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index b2c6aa2684..f0b45af5ae 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -122,6 +122,7 @@ int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); +int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw); int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw *hw); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 1ac66ed990..ccad7fc304 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -836,6 +836,55 @@ ice_dcf_dev_rss_reta_query(struct rte_eth_dev *dev, return 0; } +static int +ice_dcf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + /* HENA setting, it is enabled by default, no change */ + if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + return 0; + } else if (rss_conf->rss_key_len != hw->vf_res->rss_key_size) { + PMD_DRV_LOG(ERR, "The size of hash key configured " + "(%d) doesn't match the size of hardware can " + "support (%d)", rss_conf->rss_key_len, + hw->vf_res->rss_key_size); + return -EINVAL; + } + + rte_memcpy(hw->rss_key, rss_conf->rss_key, rss_conf->rss_key_len); + + return ice_dcf_configure_rss_key(hw); +} + +static int +ice_dcf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) + return -ENOTSUP; + + /* Just set it to default value now. */ + rss_conf->rss_hf = ICE_RSS_OFFLOAD_ALL; + + if (!rss_conf->rss_key) + return 0; + + rss_conf->rss_key_len = hw->vf_res->rss_key_size; + rte_memcpy(rss_conf->rss_key, hw->rss_key, rss_conf->rss_key_len); + + return 0; +} + #define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4) #define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6) #define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t) @@ -1184,6 +1233,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .tm_ops_get = ice_dcf_tm_ops_get, .reta_update = ice_dcf_dev_rss_reta_update, .reta_query = ice_dcf_dev_rss_reta_query, + .rss_hash_update = ice_dcf_dev_rss_hash_update, + .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get, }; static int From patchwork Wed Apr 13 16:09:02 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109637 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2EA8AA0508; Wed, 13 Apr 2022 10:10:59 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5AAA940694; Wed, 13 Apr 2022 10:10:52 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 4172540694 for ; Wed, 13 Apr 2022 10:10:50 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837450; x=1681373450; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=5FnmNbyjznWNp/QvhpWMhZ+N23zqp246e9Z1lWhhsmg=; b=DVX1a/4Y/4zHo9exZupZ/MFE2u0luJzVkJh2iRP1GLhAmu8ZrCb/raxC mPhC6YsW7J0i5vJz7a53On60zReUeLyjOmmYPvFlUQKODiwbNLvL7BKfj q/NR8EEYiuXkZFFbnBE3yxMK5wJRR5NWLnWz7zCbg9SgU9gbV9YmQd4iG IBeuI1nI9uLWjmjtJgicnBYZj2bJBQNeqMM+CC/nrScvDgHjhrcrsfq5Q JQ8d6Sthg0cApT2qV61ZJsLhvFfNA+xDcsUnKaoL2L2h8kCiO1573Y+Of IQYRCYf2vPkN3DCG8kjugmQ27eY9HfuGu6AbkFWV9ck5ZJWwg7RppHSQf A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189051" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189051" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:49 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847514" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:45 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Robin Zhang , Kevin Liu Subject: [PATCH v2 03/33] net/ice: cleanup Tx buffers Date: Wed, 13 Apr 2022 16:09:02 +0000 Message-Id: <20220413160932.2074781-4-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Robin Zhang Add support for ops rte_eth_tx_done_cleanup in dcf Signed-off-by: Robin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf_ethdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index ccad7fc304..d8b5961514 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1235,6 +1235,7 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .reta_query = ice_dcf_dev_rss_reta_query, .rss_hash_update = ice_dcf_dev_rss_hash_update, .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get, + .tx_done_cleanup = ice_tx_done_cleanup, }; static int From patchwork Wed Apr 13 16:09:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109638 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 08704A0508; Wed, 13 Apr 2022 10:11:05 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 428504280B; Wed, 13 Apr 2022 10:10:54 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id AA36640694 for ; Wed, 13 Apr 2022 10:10:51 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837451; x=1681373451; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=uhH6sbpPFSDqWF8L5yx2JMPlQrbIgO+TzDOmyZdVXeM=; b=ZQyVv7FEp0s3MZtAjflNN3Eo3gyXa8cgC09eSAFYju9omr506np1F281 7uMtOwUVsR6qNKXeXlGBOdK2yCOWnx29fdcln5l+46Ate84OiqEEl3AS7 D4nmNJimLjJ55wd/HotOK1IpYFIlcyRAtRKDFCdLmkgROM0jDLTGIHRiH ME1lOfNr5eJ9Uq0j0OBQcoaiv1ICyYr7IcP37e6l5ZPnAAp9L5uyNVWWF XdUV+cW5kJ7zScE+xmMTUYKc7xO5WC3+sXXxw6Dy2Naudv06GKD6jaeGE GMUjkhXQ3RJlsQOdacEn+rEbBsTOBMrlbhfdUwPGzXNpnzZ4Vkq8nitdn Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189055" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189055" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:50 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847531" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:48 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Jie Wang , Kevin Liu Subject: [PATCH v2 04/33] net/ice: add ops MTU-SET to dcf Date: Wed, 13 Apr 2022 16:09:03 +0000 Message-Id: <20220413160932.2074781-5-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Jie Wang add API "mtu_set" to dcf, and it can configure the port mtu through cmdline. Signed-off-by: Jie Wang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf_ethdev.c | 14 ++++++++++++++ drivers/net/ice/ice_dcf_ethdev.h | 6 ++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index d8b5961514..06d752fd61 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1081,6 +1081,19 @@ ice_dcf_link_update(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &new_link); } +static int +ice_dcf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused) +{ + /* mtu setting is forbidden if port is start */ + if (dev->data->dev_started != 0) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev->data->port_id); + return -EBUSY; + } + + return 0; +} + bool ice_dcf_adminq_need_retry(struct ice_adapter *ad) { @@ -1236,6 +1249,7 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .rss_hash_update = ice_dcf_dev_rss_hash_update, .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get, .tx_done_cleanup = ice_tx_done_cleanup, + .mtu_set = ice_dcf_dev_mtu_set, }; static int diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 11a1305038..f2faf26f58 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -15,6 +15,12 @@ #define ICE_DCF_MAX_RINGS 1 +#define ICE_DCF_FRAME_SIZE_MAX 9728 +#define ICE_DCF_VLAN_TAG_SIZE 4 +#define ICE_DCF_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2) +#define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD) + struct ice_dcf_queue { uint64_t dummy; }; From patchwork Wed Apr 13 16:09:04 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109639 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 484F4A0508; Wed, 13 Apr 2022 10:11:11 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2F95D42810; Wed, 13 Apr 2022 10:10:56 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 970284280D for ; Wed, 13 Apr 2022 10:10:54 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837455; x=1681373455; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=HAg21Ie7KGkUTfzVHs6REN3QEDKJCuyvy8CYzTL9F7A=; b=cHeUKBPfaieI3hp/3JfF0M0mM3y5MFLM4hhowZw5JjxT/nROfxSUpFNo YawK02aykaAGzsp0DFccJSKnWklAKyWBoN+BkzeLQVETtut6dV6HIs0zQ eBjNdhwZ3OJId6Wbb6BSXejQZMJ8p5lGUQNZNEiiVADaKtd9vMxiBnWnb J+7WDdg7V4zblC9Z7I7UejIcgzts8TRAdcVHQ61V/TwkTNl70QrIN+lYN hTDcMvbRphZ6TP5JINBK63WLKPI4dHKcrXyjJT7jsUO3h3xKAS8kigmpa Rz/IBo8zJ6pyY6IDNqC67Na4eCDJwYOCLFojhEiYLAwWRgdCziH3FIDl1 g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189065" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189065" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:53 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847545" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:51 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Jie Wang , Kevin Liu Subject: [PATCH v2 05/33] net/ice: add ops dev-supported-ptypes-get to dcf Date: Wed, 13 Apr 2022 16:09:04 +0000 Message-Id: <20220413160932.2074781-6-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Jie Wang add API "dev_supported_ptypes_get" to dcf, that dcf pmd can get ptypes through the new API. Signed-off-by: Jie Wang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf_ethdev.c | 80 +++++++++++++++++++------------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 06d752fd61..6a577a6582 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1218,38 +1218,56 @@ ice_dcf_dev_reset(struct rte_eth_dev *dev) return ret; } +static const uint32_t * +ice_dcf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_NONFRAG, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + return ptypes; +} + static const struct eth_dev_ops ice_dcf_eth_dev_ops = { - .dev_start = ice_dcf_dev_start, - .dev_stop = ice_dcf_dev_stop, - .dev_close = ice_dcf_dev_close, - .dev_reset = ice_dcf_dev_reset, - .dev_configure = ice_dcf_dev_configure, - .dev_infos_get = ice_dcf_dev_info_get, - .rx_queue_setup = ice_rx_queue_setup, - .tx_queue_setup = ice_tx_queue_setup, - .rx_queue_release = ice_dev_rx_queue_release, - .tx_queue_release = ice_dev_tx_queue_release, - .rx_queue_start = ice_dcf_rx_queue_start, - .tx_queue_start = ice_dcf_tx_queue_start, - .rx_queue_stop = ice_dcf_rx_queue_stop, - .tx_queue_stop = ice_dcf_tx_queue_stop, - .link_update = ice_dcf_link_update, - .stats_get = ice_dcf_stats_get, - .stats_reset = ice_dcf_stats_reset, - .promiscuous_enable = ice_dcf_dev_promiscuous_enable, - .promiscuous_disable = ice_dcf_dev_promiscuous_disable, - .allmulticast_enable = ice_dcf_dev_allmulticast_enable, - .allmulticast_disable = ice_dcf_dev_allmulticast_disable, - .flow_ops_get = ice_dcf_dev_flow_ops_get, - .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, - .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, - .tm_ops_get = ice_dcf_tm_ops_get, - .reta_update = ice_dcf_dev_rss_reta_update, - .reta_query = ice_dcf_dev_rss_reta_query, - .rss_hash_update = ice_dcf_dev_rss_hash_update, - .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get, - .tx_done_cleanup = ice_tx_done_cleanup, - .mtu_set = ice_dcf_dev_mtu_set, + .dev_start = ice_dcf_dev_start, + .dev_stop = ice_dcf_dev_stop, + .dev_close = ice_dcf_dev_close, + .dev_reset = ice_dcf_dev_reset, + .dev_configure = ice_dcf_dev_configure, + .dev_infos_get = ice_dcf_dev_info_get, + .dev_supported_ptypes_get = ice_dcf_dev_supported_ptypes_get, + .rx_queue_setup = ice_rx_queue_setup, + .tx_queue_setup = ice_tx_queue_setup, + .rx_queue_release = ice_dev_rx_queue_release, + .tx_queue_release = ice_dev_tx_queue_release, + .rx_queue_start = ice_dcf_rx_queue_start, + .tx_queue_start = ice_dcf_tx_queue_start, + .rx_queue_stop = ice_dcf_rx_queue_stop, + .tx_queue_stop = ice_dcf_tx_queue_stop, + .link_update = ice_dcf_link_update, + .stats_get = ice_dcf_stats_get, + .stats_reset = ice_dcf_stats_reset, + .promiscuous_enable = ice_dcf_dev_promiscuous_enable, + .promiscuous_disable = ice_dcf_dev_promiscuous_disable, + .allmulticast_enable = ice_dcf_dev_allmulticast_enable, + .allmulticast_disable = ice_dcf_dev_allmulticast_disable, + .flow_ops_get = ice_dcf_dev_flow_ops_get, + .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, + .tm_ops_get = ice_dcf_tm_ops_get, + .reta_update = ice_dcf_dev_rss_reta_update, + .reta_query = ice_dcf_dev_rss_reta_query, + .rss_hash_update = ice_dcf_dev_rss_hash_update, + .rss_hash_conf_get = ice_dcf_dev_rss_hash_conf_get, + .tx_done_cleanup = ice_tx_done_cleanup, + .mtu_set = ice_dcf_dev_mtu_set, }; static int From patchwork Wed Apr 13 16:09:05 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109640 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5C5D2A0508; Wed, 13 Apr 2022 10:11:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 162B542813; Wed, 13 Apr 2022 10:10:58 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 7259342809 for ; Wed, 13 Apr 2022 10:10:56 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837456; x=1681373456; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=WOQHVMy6I/PNV1SH4Kmb0apkriJ+8tFdVGNa6R1TKVo=; b=XOCbTZD+2y/FerSUnoiAAyzWjdcAh1naHNqKNDljxGseVeKn+xT2AQyI +0c5NuMLLHab2BDfS5ykGo1VExTBtwxL+xTrfxctHTgRCYyu8Hv7LHLz8 ClI8A1KxrpyzFiV/lBMPfrd59rGkdWWUsdU9GTntK1cOkwmGTBJ5VLfPc uA3dbRP00uu5cVSdM8qEtuxnQfK8AP6NidqR7IdlwVn4xPEkY6keLmYae 868OmkpaKP95tZ2xXXvUFaepBHQthsyofDR9JxM6OMlOaSBVr8Kblbc1C CQvI/HP1vXU6sXY2owJXxYaWVnkP5wPnx5Jbw0Egr3Cc7cNTyL4blOYse g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189071" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189071" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:55 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847556" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:53 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 06/33] net/ice: support dcf promisc configuration Date: Wed, 13 Apr 2022 16:09:05 +0000 Message-Id: <20220413160932.2074781-7-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Support configuration of unicast and multicast promisc on dcf. Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf_ethdev.c | 77 ++++++++++++++++++++++++++++++-- drivers/net/ice/ice_dcf_ethdev.h | 3 ++ 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 6a577a6582..87d281ee93 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -727,27 +727,95 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, } static int -ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev) +dcf_config_promisc(struct ice_dcf_adapter *adapter, + bool enable_unicast, + bool enable_multicast) { + struct ice_dcf_hw *hw = &adapter->real_hw; + struct virtchnl_promisc_info promisc; + struct dcf_virtchnl_cmd args; + int err; + + promisc.flags = 0; + promisc.vsi_id = hw->vsi_res->vsi_id; + + if (enable_unicast) + promisc.flags |= FLAG_VF_UNICAST_PROMISC; + + if (enable_multicast) + promisc.flags |= FLAG_VF_MULTICAST_PROMISC; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + args.req_msg = (uint8_t *)&promisc; + args.req_msglen = sizeof(promisc); + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) { + PMD_DRV_LOG(ERR, + "fail to execute command VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE"); + return err; + } + + adapter->promisc_unicast_enabled = enable_unicast; + adapter->promisc_multicast_enabled = enable_multicast; return 0; } +static int +ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + if (adapter->promisc_unicast_enabled) { + PMD_DRV_LOG(INFO, "promiscuous has been enabled"); + return 0; + } + + return dcf_config_promisc(adapter, true, + adapter->promisc_multicast_enabled); +} + static int ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev) { - return 0; + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + if (!adapter->promisc_unicast_enabled) { + PMD_DRV_LOG(INFO, "promiscuous has been disabled"); + return 0; + } + + return dcf_config_promisc(adapter, false, + adapter->promisc_multicast_enabled); } static int ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev) { - return 0; + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + if (adapter->promisc_multicast_enabled) { + PMD_DRV_LOG(INFO, "allmulticast has been enabled"); + return 0; + } + + return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled, + true); } static int ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev) { - return 0; + struct ice_dcf_adapter *adapter = dev->data->dev_private; + + if (!adapter->promisc_multicast_enabled) { + PMD_DRV_LOG(INFO, "allmulticast has been disabled"); + return 0; + } + + return dcf_config_promisc(adapter, adapter->promisc_unicast_enabled, + false); } static int @@ -1299,6 +1367,7 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) return -1; } + dcf_config_promisc(adapter, false, false); return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index f2faf26f58..22e450527b 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -33,6 +33,9 @@ struct ice_dcf_adapter { struct ice_adapter parent; /* Must be first */ struct ice_dcf_hw real_hw; + bool promisc_unicast_enabled; + bool promisc_multicast_enabled; + int num_reprs; struct ice_dcf_repr_info *repr_infos; }; From patchwork Wed Apr 13 16:09:06 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109641 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F0502A0508; Wed, 13 Apr 2022 10:11:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 04E4F42809; Wed, 13 Apr 2022 10:11:02 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 6E3CB4280D for ; Wed, 13 Apr 2022 10:10:59 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837459; x=1681373459; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=E+Pxz1IGnkGogeqtg63gIrWEg+zBHJGlVEYLUId0Yg0=; b=OV1ApJgwnP/8vmNKpEQhRvEUmdM9W8qrP4r+54cGrqSpgKZoeZXFUEJ7 Gd3fsEnqyzpQgP8CP1X90GDUYzdnLABk+F0ieyc7NhtJlIBOAWw/ASJxW Y0JbM4b08Wx03sW7zKk1b3Me3nwjdyb5wuz5g8I68dKnZbRR3gVsWScva wimsB24+LXSGciouGRUlXYwrkemZB6GPQcXzQ4cFTlrHsfXKTsEBVohod DMyEBHL0H+iwIKS0l8SqlhIs7vyo62LGJigvoXVvldinHicHyhuJnhcs/ hqyHVOz4ymeIHDtiVNj82z3v7Ab3flkbGGljUXXUiNKGUE2bXhNq9Vwkf w==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189082" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189082" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:58 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847566" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:56 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu , Alvin Zhang Subject: [PATCH v2 07/33] net/ice: support dcf MAC configuration Date: Wed, 13 Apr 2022 16:09:06 +0000 Message-Id: <20220413160932.2074781-8-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Below PMD ops are supported in this patch: .mac_addr_add = dcf_dev_add_mac_addr .mac_addr_remove = dcf_dev_del_mac_addr .set_mc_addr_list = dcf_set_mc_addr_list .mac_addr_set = dcf_dev_set_default_mac_addr Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 9 +- drivers/net/ice/ice_dcf.h | 4 +- drivers/net/ice/ice_dcf_ethdev.c | 218 ++++++++++++++++++++++++++++++- drivers/net/ice/ice_dcf_ethdev.h | 5 +- 4 files changed, 226 insertions(+), 10 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 89c0203ba3..55ae68c456 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1089,10 +1089,11 @@ ice_dcf_query_stats(struct ice_dcf_hw *hw, } int -ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add) +ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, + struct rte_ether_addr *addr, + bool add, uint8_t type) { struct virtchnl_ether_addr_list *list; - struct rte_ether_addr *addr; struct dcf_virtchnl_cmd args; int len, err = 0; @@ -1105,7 +1106,6 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add) } len = sizeof(struct virtchnl_ether_addr_list); - addr = hw->eth_dev->data->mac_addrs; len += sizeof(struct virtchnl_ether_addr); list = rte_zmalloc(NULL, len, 0); @@ -1116,9 +1116,10 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add) rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); + PMD_DRV_LOG(DEBUG, "add/rm mac:" RTE_ETHER_ADDR_PRT_FMT, RTE_ETHER_ADDR_BYTES(addr)); - + list->list[0].type = type; list->vsi_id = hw->vsi_res->vsi_id; list->num_elements = 1; diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index f0b45af5ae..78df202a77 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -131,7 +131,9 @@ int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); int ice_dcf_query_stats(struct ice_dcf_hw *hw, struct virtchnl_eth_stats *pstats); -int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add); +int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, + struct rte_ether_addr *addr, bool add, + uint8_t type); int ice_dcf_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); void ice_dcf_tm_conf_init(struct rte_eth_dev *dev); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 87d281ee93..0d944f9fd2 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -26,6 +26,12 @@ #include "ice_dcf_ethdev.h" #include "ice_rxtx.h" +#define DCF_NUM_MACADDR_MAX 64 + +static int dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num, bool add); + static int ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); @@ -561,12 +567,22 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) return ret; } - ret = ice_dcf_add_del_all_mac_addr(hw, true); + ret = ice_dcf_add_del_all_mac_addr(hw, hw->eth_dev->data->mac_addrs, + true, VIRTCHNL_ETHER_ADDR_PRIMARY); if (ret) { PMD_DRV_LOG(ERR, "Failed to add mac addr"); return ret; } + if (dcf_ad->mc_addrs_num) { + /* flush previous addresses */ + ret = dcf_add_del_mc_addr_list(hw, dcf_ad->mc_addrs, + dcf_ad->mc_addrs_num, true); + if (ret) + return ret; + } + + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; return 0; @@ -625,7 +641,16 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) rte_intr_efd_disable(intr_handle); rte_intr_vec_list_free(intr_handle); - ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false); + ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, + dcf_ad->real_hw.eth_dev->data->mac_addrs, + false, VIRTCHNL_ETHER_ADDR_PRIMARY); + + if (dcf_ad->mc_addrs_num) + /* flush previous addresses */ + (void)dcf_add_del_mc_addr_list(&dcf_ad->real_hw, + dcf_ad->mc_addrs, + dcf_ad->mc_addrs_num, false); + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ad->pf.adapter_stopped = 1; hw->tm_conf.committed = false; @@ -655,7 +680,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, struct ice_dcf_adapter *adapter = dev->data->dev_private; struct ice_dcf_hw *hw = &adapter->real_hw; - dev_info->max_mac_addrs = 1; + dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX; dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs; dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; @@ -818,6 +843,189 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev) false); } +static int +dcf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + int err; + + if (rte_is_zero_ether_addr(addr)) { + PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); + return -EINVAL; + } + + err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, true, + VIRTCHNL_ETHER_ADDR_EXTRA); + if (err) { + PMD_DRV_LOG(ERR, "fail to add MAC address"); + return err; + } + + return 0; +} + +static void +dcf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct rte_ether_addr *addr = &dev->data->mac_addrs[index]; + int err; + + err = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, addr, false, + VIRTCHNL_ETHER_ADDR_EXTRA); + if (err) + PMD_DRV_LOG(ERR, "fail to remove MAC address"); +} + +static int +dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct dcf_virtchnl_cmd args; + uint32_t i; + int len, err = 0; + + len = sizeof(struct virtchnl_ether_addr_list); + len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num; + + list = rte_zmalloc(NULL, len, 0); + if (!list) { + PMD_DRV_LOG(ERR, "fail to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < mc_addrs_num; i++) { + memcpy(list->list[i].addr, mc_addrs[i].addr_bytes, + sizeof(list->list[i].addr)); + list->list[i].type = VIRTCHNL_ETHER_ADDR_EXTRA; + } + + list->vsi_id = hw->vsi_res->vsi_id; + list->num_elements = mc_addrs_num; + + memset(&args, 0, sizeof(args)); + args.v_op = add ? VIRTCHNL_OP_ADD_ETH_ADDR : + VIRTCHNL_OP_DEL_ETH_ADDR; + args.req_msg = (uint8_t *)list; + args.req_msglen = len; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : + "OP_DEL_ETHER_ADDRESS"); + rte_free(list); + return err; +} + +static int +dcf_set_mc_addr_list(struct rte_eth_dev *dev, + struct rte_ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint32_t i; + int ret; + + + if (mc_addrs_num > DCF_NUM_MACADDR_MAX) { + PMD_DRV_LOG(ERR, + "can't add more than a limited number (%u) of addresses.", + (uint32_t)DCF_NUM_MACADDR_MAX); + return -EINVAL; + } + + for (i = 0; i < mc_addrs_num; i++) { + if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { + const uint8_t *mac = mc_addrs[i].addr_bytes; + + PMD_DRV_LOG(ERR, + "Invalid mac: %02x:%02x:%02x:%02x:%02x:%02x", + mac[0], mac[1], mac[2], mac[3], mac[4], + mac[5]); + return -EINVAL; + } + } + + if (adapter->mc_addrs_num) { + /* flush previous addresses */ + ret = dcf_add_del_mc_addr_list(hw, adapter->mc_addrs, + adapter->mc_addrs_num, false); + if (ret) + return ret; + } + if (!mc_addrs_num) { + adapter->mc_addrs_num = 0; + return 0; + } + + /* add new ones */ + ret = dcf_add_del_mc_addr_list(hw, mc_addrs, mc_addrs_num, true); + if (ret) { + /* if adding mac address list fails, should add the + * previous addresses back. + */ + if (adapter->mc_addrs_num) + (void)dcf_add_del_mc_addr_list(hw, adapter->mc_addrs, + adapter->mc_addrs_num, + true); + return ret; + } + adapter->mc_addrs_num = mc_addrs_num; + memcpy(adapter->mc_addrs, + mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); + + return 0; +} + +static int +dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct rte_ether_addr *old_addr; + int ret; + + old_addr = hw->eth_dev->data->mac_addrs; + if (rte_is_same_ether_addr(old_addr, mac_addr)) + return 0; + + ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, old_addr, false, + VIRTCHNL_ETHER_ADDR_PRIMARY); + if (ret) + PMD_DRV_LOG(ERR, "Fail to delete old MAC:" + " %02X:%02X:%02X:%02X:%02X:%02X", + old_addr->addr_bytes[0], + old_addr->addr_bytes[1], + old_addr->addr_bytes[2], + old_addr->addr_bytes[3], + old_addr->addr_bytes[4], + old_addr->addr_bytes[5]); + + ret = ice_dcf_add_del_all_mac_addr(&adapter->real_hw, mac_addr, true, + VIRTCHNL_ETHER_ADDR_PRIMARY); + if (ret) + PMD_DRV_LOG(ERR, "Fail to add new MAC:" + " %02X:%02X:%02X:%02X:%02X:%02X", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5]); + + if (ret) + return -EIO; + + rte_ether_addr_copy(mac_addr, hw->eth_dev->data->mac_addrs); + return 0; +} + static int ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops) @@ -1326,6 +1534,10 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .promiscuous_disable = ice_dcf_dev_promiscuous_disable, .allmulticast_enable = ice_dcf_dev_allmulticast_enable, .allmulticast_disable = ice_dcf_dev_allmulticast_disable, + .mac_addr_add = dcf_dev_add_mac_addr, + .mac_addr_remove = dcf_dev_del_mac_addr, + .set_mc_addr_list = dcf_set_mc_addr_list, + .mac_addr_set = dcf_dev_set_default_mac_addr, .flow_ops_get = ice_dcf_dev_flow_ops_get, .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 22e450527b..27f6402786 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -14,7 +14,7 @@ #include "ice_dcf.h" #define ICE_DCF_MAX_RINGS 1 - +#define DCF_NUM_MACADDR_MAX 64 #define ICE_DCF_FRAME_SIZE_MAX 9728 #define ICE_DCF_VLAN_TAG_SIZE 4 #define ICE_DCF_ETH_OVERHEAD \ @@ -35,7 +35,8 @@ struct ice_dcf_adapter { bool promisc_unicast_enabled; bool promisc_multicast_enabled; - + uint32_t mc_addrs_num; + struct rte_ether_addr mc_addrs[DCF_NUM_MACADDR_MAX]; int num_reprs; struct ice_dcf_repr_info *repr_infos; }; From patchwork Wed Apr 13 16:09:07 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109642 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 90117A0508; Wed, 13 Apr 2022 10:11:30 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6255E42824; Wed, 13 Apr 2022 10:11:04 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 5C67142815 for ; Wed, 13 Apr 2022 10:11:01 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837462; x=1681373462; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=llY6IOz8dlyDhNchvrE++z5xtL8Zh+wU7YCAs0cdukc=; b=FDlmCUej4fVdzlRd6hXqXSmd/XCrYCv32tQTz55x+dexs81VbtIBYn0U nFn+NOIGA6KiYUkDTPV+cnB5D2i7XgHm1YrlF2fQJqeT40PngM8pPE8i8 xjoNoAYENpdzx0BN/p057ZNC94tcRW9SNlZXLmhY6ltHoXo7s+qm9vT+o H5QX2OeNo2PuEaZ2/Tnf/2rZWiZIkxia2bJekcy+vIQ0OEk8BK4+8S9cg QDdv2EcXnAncTImM+a40WhVLTyzEWoT+jLkcK3sdLMN2ngoZIPcvJOe47 rsKrj6nQAJIT1UyGNnsfCLQG7NzXRTybdSdtawqmNsJlTxUFbO7lIR2mK Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189095" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189095" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:01 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847582" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:10:59 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 08/33] net/ice: support dcf VLAN filter and offload configuration Date: Wed, 13 Apr 2022 16:09:07 +0000 Message-Id: <20220413160932.2074781-9-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Below PMD ops are supported in this patch: .vlan_filter_set = dcf_dev_vlan_filter_set .vlan_offload_set = dcf_dev_vlan_offload_set Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf_ethdev.c | 101 +++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 0d944f9fd2..e58cdf47d2 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1026,6 +1026,105 @@ dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev, return 0; } +static int +dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add) +{ + struct virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + + sizeof(uint16_t)]; + struct dcf_virtchnl_cmd args; + int err; + + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list->vsi_id = hw->vsi_res->vsi_id; + vlan_list->num_elements = 1; + vlan_list->vlan_id[0] = vlanid; + + memset(&args, 0, sizeof(args)); + args.v_op = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN; + args.req_msg = cmd_buffer; + args.req_msglen = sizeof(cmd_buffer); + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); + + return err; +} + +static int +dcf_enable_vlan_strip(struct ice_dcf_hw *hw) +{ + struct dcf_virtchnl_cmd args; + int ret; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING; + ret = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (ret) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_VLAN_STRIPPING"); + + return ret; +} + +static int +dcf_disable_vlan_strip(struct ice_dcf_hw *hw) +{ + struct dcf_virtchnl_cmd args; + int ret; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING; + ret = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (ret) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_VLAN_STRIPPING"); + + return ret; +} + +static int +dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + int err; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + err = dcf_add_del_vlan(hw, vlan_id, on); + if (err) + return -EIO; + return 0; +} + +static int +dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + int err; + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + /* Vlan stripping setting */ + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping */ + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + err = dcf_enable_vlan_strip(hw); + else + err = dcf_disable_vlan_strip(hw); + + if (err) + return -EIO; + } + return 0; +} + static int ice_dcf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops) @@ -1538,6 +1637,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .mac_addr_remove = dcf_dev_del_mac_addr, .set_mc_addr_list = dcf_set_mc_addr_list, .mac_addr_set = dcf_dev_set_default_mac_addr, + .vlan_filter_set = dcf_dev_vlan_filter_set, + .vlan_offload_set = dcf_dev_vlan_offload_set, .flow_ops_get = ice_dcf_dev_flow_ops_get, .udp_tunnel_port_add = ice_dcf_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ice_dcf_dev_udp_tunnel_port_del, From patchwork Wed Apr 13 16:09:08 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109643 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 812ACA0508; Wed, 13 Apr 2022 10:11:45 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 64A6642827; Wed, 13 Apr 2022 10:11:07 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id E1D794281E for ; Wed, 13 Apr 2022 10:11:04 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837465; x=1681373465; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=czQYGmqdwsiJjeNH0C2rD08JOg7Y4c6jT9LjqLzrkTE=; b=QV7IMhd6unTLIiTJuOsKJLMf1DRyMtBpaN9zxEEpkq861nezpH/2amkJ KK9TXmrgntlpAWyvPlVGaIAeCxf8kOzMdaZ+BGx1GrxS26Jd6ym4Emg/S M81nrcf0LUZUiy85IlkgkDPhhgPm9zCQ67cBUC5RE1rA9JKK4NpHI4jN+ sgo+tDEGmoiY7uTgX2aqtwCCJcThuawmpMxSf63Xuix1vu0IadPnRisQW bgHmWwtsyZUVSR5zqMvehkF8i8yk2I1tcx3qX61KbZJOv0Ae4pMTGQ8Gj hTIxk9uRV16678KyiMUf2evP6z/JC6zEG+XRIqtjTHlkdC9hsWJrE2ME4 g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189106" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189106" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:03 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847593" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:01 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 09/33] net/ice: support DCF new VLAN capabilities Date: Wed, 13 Apr 2022 16:09:08 +0000 Message-Id: <20220413160932.2074781-10-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang The new VLAN virtchnl opcodes introduce new capabilities like VLAN filtering, stripping and insertion. The DCF needs to query the VLAN capabilities based on current device configuration firstly. DCF is able to configure inner VLAN filter when port VLAN is enabled base on negotiation; and DCF is able to configure outer VLAN (0x8100) if port VLAN is disabled to be compatible with legacy mode. When port VLAN is updated by DCF, the DCF needs to reset to query the new VLAN capabilities. Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 27 +++++ drivers/net/ice/ice_dcf.h | 1 + drivers/net/ice/ice_dcf_ethdev.c | 171 ++++++++++++++++++++++++++++--- 3 files changed, 182 insertions(+), 17 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 55ae68c456..885d58c0f4 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -587,6 +587,29 @@ ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw) return 0; } +static int +dcf_get_vlan_offload_caps_v2(struct ice_dcf_hw *hw) +{ + struct virtchnl_vlan_caps vlan_v2_caps; + struct dcf_virtchnl_cmd args; + int ret; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; + args.rsp_msgbuf = (uint8_t *)&vlan_v2_caps; + args.rsp_buflen = sizeof(vlan_v2_caps); + + ret = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"); + return ret; + } + + rte_memcpy(&hw->vlan_v2_caps, &vlan_v2_caps, sizeof(vlan_v2_caps)); + return 0; +} + int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) { @@ -701,6 +724,10 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) rte_intr_enable(pci_dev->intr_handle); ice_dcf_enable_irq0(hw); + if ((hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) && + dcf_get_vlan_offload_caps_v2(hw)) + goto err_rss; + return 0; err_rss: diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 78df202a77..32e6031bd9 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -107,6 +107,7 @@ struct ice_dcf_hw { uint16_t nb_msix; uint16_t rxq_map[16]; struct virtchnl_eth_stats eth_stats_offset; + struct virtchnl_vlan_caps vlan_v2_caps; /* Link status */ bool link_up; diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index e58cdf47d2..d4bfa182a4 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1026,6 +1026,46 @@ dcf_dev_set_default_mac_addr(struct rte_eth_dev *dev, return 0; } +static int +dcf_add_del_vlan_v2(struct ice_dcf_hw *hw, uint16_t vlanid, bool add) +{ + struct virtchnl_vlan_supported_caps *supported_caps = + &hw->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan_setting; + struct virtchnl_vlan_filter_list_v2 vlan_filter; + struct dcf_virtchnl_cmd args; + uint32_t filtering_caps; + int err; + + if (supported_caps->outer) { + filtering_caps = supported_caps->outer; + vlan_setting = &vlan_filter.filters[0].outer; + } else { + filtering_caps = supported_caps->inner; + vlan_setting = &vlan_filter.filters[0].inner; + } + + if (!(filtering_caps & VIRTCHNL_VLAN_ETHERTYPE_8100)) + return -ENOTSUP; + + memset(&vlan_filter, 0, sizeof(vlan_filter)); + vlan_filter.vport_id = hw->vsi_res->vsi_id; + vlan_filter.num_elements = 1; + vlan_setting->tpid = RTE_ETHER_TYPE_VLAN; + vlan_setting->tci = vlanid; + + memset(&args, 0, sizeof(args)); + args.v_op = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2; + args.req_msg = (uint8_t *)&vlan_filter; + args.req_msglen = sizeof(vlan_filter); + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2"); + + return err; +} + static int dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add) { @@ -1052,6 +1092,116 @@ dcf_add_del_vlan(struct ice_dcf_hw *hw, uint16_t vlanid, bool add) return err; } +static int +dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + int err; + + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + err = dcf_add_del_vlan_v2(hw, vlan_id, on); + if (err) + return -EIO; + return 0; + } + + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; + + err = dcf_add_del_vlan(hw, vlan_id, on); + if (err) + return -EIO; + return 0; +} + +static void +dcf_iterate_vlan_filters_v2(struct rte_eth_dev *dev, bool enable) +{ + struct rte_vlan_filter_conf *vfc = &dev->data->vlan_filter_conf; + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint32_t i, j; + uint64_t ids; + + for (i = 0; i < RTE_DIM(vfc->ids); i++) { + if (vfc->ids[i] == 0) + continue; + + ids = vfc->ids[i]; + for (j = 0; ids != 0 && j < 64; j++, ids >>= 1) { + if (ids & 1) + dcf_add_del_vlan_v2(hw, 64 * i + j, enable); + } + } +} + +static int +dcf_config_vlan_strip_v2(struct ice_dcf_hw *hw, bool enable) +{ + struct virtchnl_vlan_supported_caps *stripping_caps = + &hw->vlan_v2_caps.offloads.stripping_support; + struct virtchnl_vlan_setting vlan_strip; + struct dcf_virtchnl_cmd args; + uint32_t *ethertype; + int ret; + + if ((stripping_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100) && + (stripping_caps->outer & VIRTCHNL_VLAN_TOGGLE)) + ethertype = &vlan_strip.outer_ethertype_setting; + else if ((stripping_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100) && + (stripping_caps->inner & VIRTCHNL_VLAN_TOGGLE)) + ethertype = &vlan_strip.inner_ethertype_setting; + else + return -ENOTSUP; + + memset(&vlan_strip, 0, sizeof(vlan_strip)); + vlan_strip.vport_id = hw->vsi_res->vsi_id; + *ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100; + + memset(&args, 0, sizeof(args)); + args.v_op = enable ? VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 : + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2; + args.req_msg = (uint8_t *)&vlan_strip; + args.req_msglen = sizeof(vlan_strip); + ret = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (ret) + PMD_DRV_LOG(ERR, "fail to execute command %s", + enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" : + "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2"); + + return ret; +} + +static int +dcf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + bool enable; + int err; + + if (mask & RTE_ETH_VLAN_FILTER_MASK) { + enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); + + dcf_iterate_vlan_filters_v2(dev, enable); + } + + if (mask & RTE_ETH_VLAN_STRIP_MASK) { + enable = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); + + err = dcf_config_vlan_strip_v2(hw, enable); + /* If not support, the stripping is already disabled by PF */ + if (err == -ENOTSUP && !enable) + err = 0; + if (err) + return -EIO; + } + + return 0; +} + static int dcf_enable_vlan_strip(struct ice_dcf_hw *hw) { @@ -1084,30 +1234,17 @@ dcf_disable_vlan_strip(struct ice_dcf_hw *hw) return ret; } -static int -dcf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) -{ - struct ice_dcf_adapter *adapter = dev->data->dev_private; - struct ice_dcf_hw *hw = &adapter->real_hw; - int err; - - if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) - return -ENOTSUP; - - err = dcf_add_del_vlan(hw, vlan_id, on); - if (err) - return -EIO; - return 0; -} - static int dcf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; struct ice_dcf_adapter *adapter = dev->data->dev_private; struct ice_dcf_hw *hw = &adapter->real_hw; - struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) + return dcf_dev_vlan_offload_set_v2(dev, mask); + if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) return -ENOTSUP; From patchwork Wed Apr 13 16:09:09 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109644 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DC10FA0508; Wed, 13 Apr 2022 10:11:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6766642805; Wed, 13 Apr 2022 10:11:12 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 9770242805 for ; Wed, 13 Apr 2022 10:11:10 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837470; x=1681373470; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=soZEOlPaZDYWJK582B374G4S5rjpEAf/cO/hHvnfTro=; b=ZRmEiH5r4ApScxRduXgxq0CLCvM83Hx46PiT8XzXrL5qRjW4PZbEscGW fqj7DhrWKUnvfbZxZYtFitMbPRWLPrM2Hm1Xew39rjz3wzmv2hibOaDnm imGjRoKqv7wRajmvRLLcRsVEKLCvszY/aGzl+X1QFiA/tYpBa0+NB0gx5 GqgG2UG/sayJZK9z7RcLDQIgmyzC522PGhQDo7dkf6vP5USGcEqX9fOYM liIRvq+eRPmNk5fWl681dUFHKHyy6jbq+gtx1zCxwTDOC18kitlAa93FJ KpbjZQMyGpvtgLHNy0pt9DEE0vDTxin3Rsbiy5dUxYPFXh9LF9Q8k4t33 w==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189118" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189118" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:06 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847611" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:04 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Dapeng Yu , Kevin Liu Subject: [PATCH v2 10/33] net/ice: enable CVL DCF device reset API Date: Wed, 13 Apr 2022 16:09:09 +0000 Message-Id: <20220413160932.2074781-11-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Dapeng Yu Enable CVL DCF device reset API. Signed-off-by: Dapeng Yu Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 24 ++++++++++++++++++++++++ drivers/net/ice/ice_dcf.h | 1 + 2 files changed, 25 insertions(+) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 885d58c0f4..9c2f13cf72 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1163,3 +1163,27 @@ ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, rte_free(list); return err; } + +int +ice_dcf_cap_reset(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) +{ + int ret; + + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + ice_dcf_disable_irq0(hw); + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, ice_dcf_dev_interrupt_handler, + hw); + ret = ice_dcf_mode_disable(hw); + if (ret) + goto err; + ret = ice_dcf_get_vf_resource(hw); +err: + rte_intr_callback_register(intr_handle, ice_dcf_dev_interrupt_handler, + hw); + rte_intr_enable(intr_handle); + ice_dcf_enable_irq0(hw); + return ret; +} diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 32e6031bd9..8cf17e7700 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -137,6 +137,7 @@ int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, uint8_t type); int ice_dcf_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); +int ice_dcf_cap_reset(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_tm_conf_init(struct rte_eth_dev *dev); void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev); int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id); From patchwork Wed Apr 13 16:09:10 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109645 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0D155A0508; Wed, 13 Apr 2022 10:12:02 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4C18E4282B; Wed, 13 Apr 2022 10:11:13 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 3844542805 for ; Wed, 13 Apr 2022 10:11:11 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837471; x=1681373471; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Ej+hNt6aRetaFtO6hTS6hPkvkACq5l0iXPnR186tnes=; b=dJ4f5+i9lzhgNtUbh/pcm2Wo4RVeDJs29fRiIBqjlV3Pn/wsLm9LgzQA ZU+EowclmeQKquWTr4VMDos4MHWI9eWBd7FCCMu2h4Z34+PNQkSokrjZH y1kZ6Zwd9Pq5rxAzTfjsfsPT1ii/KqiUpRn4up7XkpRahBD/wDABopFhc e5nV2aYFy1drAkwmcgMcbgk2dIIMyGhr1dIz1mdjQf7ujXhk7esPWTXq5 Pw+5SNesG3OxPGcAAMPXWfWDHYmAYYS5hFesIc0L+Zob9QwDkbH6HypH+ IcqV6XMEa/RczCTHiHSYUl1a1blBOz9NiUEDKvNTBJcUEsQCn7Eu3M+kF A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189123" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189123" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:09 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847623" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:07 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Junfeng Guo , Kevin Liu Subject: [PATCH v2 11/33] net/ice/base: add VXLAN support for switch filter Date: Wed, 13 Apr 2022 16:09:10 +0000 Message-Id: <20220413160932.2074781-12-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo 1. Add profile rule for VXLAN on Switch Filter, including pattern_eth_ipv4_udp_vxlan_any pattern_eth_ipv6_udp_vxlan_any pattern_eth_ipv4_udp_vxlan_eth_ipv4 pattern_eth_ipv4_udp_vxlan_eth_ipv6 pattern_eth_ipv6_udp_vxlan_eth_ipv4 pattern_eth_ipv6_udp_vxlan_eth_ipv6 2. Add common rule for VXLAN on Switch Filter, including +-----------------+-----------------------------------------------------+ | Pattern | Input Set | +-----------------+-----------------------------------------------------+ | ipv4_vxlan_ipv4 | vni, inner dmac, inner dst/src ip, outer dst/src ip | | ipv4_vxlan_ipv6 | vni, inner dmac, inner dst/src ip | | ipv6_vxlan_ipv4 | vni, inner dmac, inner dst/src ip | | ipv6_vxlan_ipv6 | vni, inner dmac, inner dst/src ip | +-----------------+-----------------------------------------------------+ Signed-off-by: Junfeng Guo Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_protocol_type.h | 6 + drivers/net/ice/base/ice_switch.c | 213 ++++++++++++++++++++++- drivers/net/ice/base/ice_switch.h | 12 ++ 3 files changed, 230 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h index 0e6e5990be..d6332c5690 100644 --- a/drivers/net/ice/base/ice_protocol_type.h +++ b/drivers/net/ice/base/ice_protocol_type.h @@ -112,6 +112,12 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_IPV6_NAT_T, ICE_SW_TUN_IPV4_L2TPV3, ICE_SW_TUN_IPV6_L2TPV3, + ICE_SW_TUN_PROFID_IPV4_VXLAN, + ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4, + ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6, + ICE_SW_TUN_PROFID_IPV6_VXLAN, + ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4, + ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6, ICE_SW_TUN_PROFID_IPV6_ESP, ICE_SW_TUN_PROFID_IPV6_AH, ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3, diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c index d4cc664ad7..b0c50c8f40 100644 --- a/drivers/net/ice/base/ice_switch.c +++ b/drivers/net/ice/base/ice_switch.c @@ -228,6 +228,117 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; +static const +struct ice_dummy_pkt_offsets dummy_udp_tun_ipv6_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV6_IL, 64 }, + { ICE_TCP_IL, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x46, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xdd, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV4_IL 64 */ + 0x00, 0x00, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +static const +struct ice_dummy_pkt_offsets dummy_udp_tun_ipv6_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_VXLAN, 42 }, + { ICE_GENEVE, 42 }, + { ICE_VXLAN_GPE, 42 }, + { ICE_MAC_IL, 50 }, + { ICE_IPV6_IL, 64 }, + { ICE_UDP_ILOS, 104 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_udp_tun_ipv6_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ + 0x00, 0x3a, 0x00, 0x00, + + 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x86, 0xdd, + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */ + 0x00, 0x58, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */ + 0x00, 0x08, 0x00, 0x00, +}; + /* offset info for MAC + IPv4 + UDP dummy packet */ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, @@ -2001,6 +2112,10 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan) u8 gre_profile[12] = {13, 14, 15, 19, 20, 21, 28, 29, 30, 31, 32, 33}; u8 pppoe_profile[7] = {34, 35, 36, 37, 38, 39, 40}; u8 non_tun_profile[6] = {4, 5, 6, 7, 8, 9}; + bool ipv4_vxlan_ipv4_valid = false; + bool ipv4_vxlan_ipv6_valid = false; + bool ipv6_vxlan_ipv4_valid = false; + bool ipv6_vxlan_ipv6_valid = false; enum ice_sw_tunnel_type tun_type; u16 i, j, k, profile_num = 0; bool non_tun_valid = false; @@ -2022,8 +2137,17 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan) } for (i = 0; i < 12; i++) { - if (vxlan_profile[i] == j) + if (vxlan_profile[i] == j) { vxlan_valid = true; + if (i < 3) + ipv4_vxlan_ipv4_valid = true; + else if (i < 6) + ipv6_vxlan_ipv4_valid = true; + else if (i < 9) + ipv4_vxlan_ipv6_valid = true; + else if (i < 12) + ipv6_vxlan_ipv6_valid = true; + } } for (i = 0; i < 7; i++) { @@ -2083,6 +2207,20 @@ static enum ice_sw_tunnel_type ice_get_tun_type_for_recipe(u8 rid, bool vlan) break; } } + if (tun_type == ICE_SW_TUN_VXLAN) { + if (ipv4_vxlan_ipv4_valid && ipv4_vxlan_ipv6_valid) + tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN; + else if (ipv6_vxlan_ipv4_valid && ipv6_vxlan_ipv6_valid) + tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN; + else if (ipv4_vxlan_ipv4_valid) + tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4; + else if (ipv4_vxlan_ipv6_valid) + tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6; + else if (ipv6_vxlan_ipv4_valid) + tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4; + else if (ipv6_vxlan_ipv6_valid) + tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6; + } if (profile_num == 1 && (flag_valid || non_tun_valid || pppoe_valid)) { for (j = 0; j < ICE_MAX_NUM_PROFILES; j++) { @@ -7496,6 +7634,12 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) case ICE_SW_TUN_VXLAN_GPE: case ICE_SW_TUN_GENEVE: case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6: + case ICE_SW_TUN_PROFID_IPV6_VXLAN: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6: case ICE_SW_TUN_NVGRE: case ICE_SW_TUN_UDP: case ICE_ALL_TUNNELS: @@ -7613,6 +7757,42 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_PPPOE_IPV6_UDP: ice_set_bit(ICE_PROFID_PPPOE_IPV6_UDP, bm); return; + case ICE_SW_TUN_PROFID_IPV4_VXLAN: + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_TCP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_UDP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_OTHER, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_TCP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_UDP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_OTHER, bm); + return; + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4: + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_TCP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_UDP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV4_OTHER, bm); + return; + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6: + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_TCP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_UDP, bm); + ice_set_bit(ICE_PROFID_IPV4_TUN_M_IPV6_OTHER, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_VXLAN: + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_TCP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_UDP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_OTHER, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_TCP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_UDP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_OTHER, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4: + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_TCP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_UDP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV4_OTHER, bm); + return; + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6: + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_TCP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_UDP, bm); + ice_set_bit(ICE_PROFID_IPV6_TUN_M_IPV6_OTHER, bm); + return; case ICE_SW_TUN_PROFID_IPV6_ESP: case ICE_SW_TUN_IPV6_ESP: ice_set_bit(ICE_PROFID_IPV6_ESP, bm); @@ -7780,6 +7960,12 @@ bool ice_is_prof_rule(enum ice_sw_tunnel_type type) { switch (type) { case ICE_SW_TUN_AND_NON_TUN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6: + case ICE_SW_TUN_PROFID_IPV6_VXLAN: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6: case ICE_SW_TUN_PROFID_IPV6_ESP: case ICE_SW_TUN_PROFID_IPV6_AH: case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: @@ -8396,8 +8582,27 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (tun_type == ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6 || + tun_type == ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6) { + if (tcp) { + *pkt = dummy_udp_tun_ipv6_tcp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); + *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; + return; + } + + *pkt = dummy_udp_tun_ipv6_udp_packet; + *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); + *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; + return; + } + if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE || tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP || + tun_type == ICE_SW_TUN_PROFID_IPV4_VXLAN || + tun_type == ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4 || + tun_type == ICE_SW_TUN_PROFID_IPV6_VXLAN || + tun_type == ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4 || tun_type == ICE_SW_TUN_GENEVE_VLAN || tun_type == ICE_SW_TUN_VXLAN_VLAN) { if (tcp) { @@ -8613,6 +8818,12 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, case ICE_SW_TUN_AND_NON_TUN: case ICE_SW_TUN_VXLAN_GPE: case ICE_SW_TUN_VXLAN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6: + case ICE_SW_TUN_PROFID_IPV6_VXLAN: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4: + case ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6: case ICE_SW_TUN_VXLAN_VLAN: case ICE_SW_TUN_UDP: if (!ice_get_open_tunnel_port(hw, TNL_VXLAN, &open_port)) diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h index a2b3c80107..efb9399b77 100644 --- a/drivers/net/ice/base/ice_switch.h +++ b/drivers/net/ice/base/ice_switch.h @@ -20,6 +20,18 @@ #define ICE_PROFID_IPV4_UDP 5 #define ICE_PROFID_IPV6_TCP 7 #define ICE_PROFID_IPV6_UDP 8 +#define ICE_PROFID_IPV4_TUN_M_IPV4_TCP 10 +#define ICE_PROFID_IPV4_TUN_M_IPV4_UDP 11 +#define ICE_PROFID_IPV4_TUN_M_IPV4_OTHER 12 +#define ICE_PROFID_IPV6_TUN_M_IPV4_TCP 16 +#define ICE_PROFID_IPV6_TUN_M_IPV4_UDP 17 +#define ICE_PROFID_IPV6_TUN_M_IPV4_OTHER 18 +#define ICE_PROFID_IPV4_TUN_M_IPV6_TCP 22 +#define ICE_PROFID_IPV4_TUN_M_IPV6_UDP 23 +#define ICE_PROFID_IPV4_TUN_M_IPV6_OTHER 24 +#define ICE_PROFID_IPV6_TUN_M_IPV6_TCP 25 +#define ICE_PROFID_IPV6_TUN_M_IPV6_UDP 26 +#define ICE_PROFID_IPV6_TUN_M_IPV6_OTHER 27 #define ICE_PROFID_PPPOE_PAY 34 #define ICE_PROFID_PPPOE_IPV4_TCP 35 #define ICE_PROFID_PPPOE_IPV4_UDP 36 From patchwork Wed Apr 13 16:09:11 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109653 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EB723A0508; Wed, 13 Apr 2022 10:13:00 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E3A0B4283E; Wed, 13 Apr 2022 10:11:39 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 1338142835 for ; Wed, 13 Apr 2022 10:11:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837497; x=1681373497; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=GypuUfHl5KQvgLq0XITj/gqSyLC9O1aub31sne0FyNQ=; b=EPvEJsOmtRiWl04s3bed3rdgXIysQhgb40PPhseIvn86gtbk2Arhk4iO MNQ5FOGOhk36X+z78bMBhNqtDDbk8UuUXUAK/3eRWW27q2aJp9rVNaOev LN6dlbw5QwE4gJPseMqzLAtHXTEIWeGl/9q3AWYe9TB0obTa56Fzf0LkE xUQNUPUyMLWasJ1itgb5lB1x4YsYb79+Q+QcVw0k7EdHmeNPpTEnN0NcM eIU6FsIf0TULT1TCipmktxpi8WgfIIt1jhcZvB04cVZxhydlH3VT+iIt6 Eiog/oqcNp/mkqeh1jYVw9eQCvZvvr67L91ADRczbqJoE2ax2JcMcH9tl Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189131" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189131" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:12 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847636" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:09 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Junfeng Guo , Kevin Liu Subject: [PATCH v2 12/33] net/ice: add VXLAN support for switch filter Date: Wed, 13 Apr 2022 16:09:11 +0000 Message-Id: <20220413160932.2074781-13-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo 1. Add profile rule for VXLAN on Switch Filter, including pattern_eth_ipv4_udp_vxlan_any pattern_eth_ipv6_udp_vxlan_any pattern_eth_ipv4_udp_vxlan_eth_ipv4 pattern_eth_ipv4_udp_vxlan_eth_ipv6 pattern_eth_ipv6_udp_vxlan_eth_ipv4 pattern_eth_ipv6_udp_vxlan_eth_ipv6 2. Add common rule for VXLAN on Switch Filter, including +-----------------+-----------------------------------------------------+ | Pattern | Input Set | +-----------------+-----------------------------------------------------+ | ipv4_vxlan_ipv4 | vni, inner dmac, inner dst/src ip, outer dst/src ip | | ipv4_vxlan_ipv6 | vni, inner dmac, inner dst/src ip | | ipv6_vxlan_ipv4 | vni, inner dmac, inner dst/src ip | | ipv6_vxlan_ipv6 | vni, inner dmac, inner dst/src ip | +-----------------+-----------------------------------------------------+ Signed-off-by: Junfeng Guo Signed-off-by: Kevin Liu --- drivers/net/ice/ice_generic_flow.c | 20 ++++++++++ drivers/net/ice/ice_generic_flow.h | 4 ++ drivers/net/ice/ice_switch_filter.c | 59 +++++++++++++++++++++++++++-- 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 53b1c0b69a..1433094ed4 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -375,6 +375,26 @@ enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_icmp[] = { RTE_FLOW_ITEM_TYPE_END, }; +/* IPv4 VXLAN ANY */ +enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_any[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ANY, + RTE_FLOW_ITEM_TYPE_END, +}; + +/* IPv6 VXLAN ANY */ +enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_any[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_ANY, + RTE_FLOW_ITEM_TYPE_END, +}; + /* IPv4 VXLAN MAC IPv4 */ enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_eth_ipv4[] = { RTE_FLOW_ITEM_TYPE_ETH, diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index 11f51a5c15..def7e2d6d6 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -175,6 +175,10 @@ extern enum rte_flow_item_type pattern_eth_ipv6_icmp6[]; extern enum rte_flow_item_type pattern_eth_vlan_ipv6_icmp6[]; extern enum rte_flow_item_type pattern_eth_qinq_ipv6_icmp6[]; +/* IPv4/IPv6 VXLAN ANY */ +extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_any[]; +extern enum rte_flow_item_type pattern_eth_ipv6_udp_vxlan_any[]; + /* IPv4 VXLAN IPv4 */ extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4[]; extern enum rte_flow_item_type pattern_eth_ipv4_udp_vxlan_ipv4_udp[]; diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 36c9bffb73..e90e109eca 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -85,6 +85,19 @@ #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \ ICE_INSET_VXLAN_VNI) +#define ICE_SW_INSET_DIST_IPV4_VXLAN_IPV4 ( \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI | \ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST) +#define ICE_SW_INSET_DIST_IPV4_VXLAN_IPV6 ( \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI | \ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST) +#define ICE_SW_INSET_DIST_IPV6_VXLAN_IPV4 ( \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI | \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_DIST_IPV6_VXLAN_IPV6 ( \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI | \ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST) #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \ @@ -112,6 +125,9 @@ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \ ICE_INSET_IPV4_TOS) +#define ICE_SW_INSET_PERM_TUNNEL_IPV6 ( \ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \ + ICE_INSET_IPV6_NEXT_HDR | ICE_INSET_IPV6_TC) #define ICE_SW_INSET_MAC_PPPOE ( \ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION) @@ -217,9 +233,14 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv6, ICE_SW_INSET_DIST_IPV4_VXLAN_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_eth_ipv4, ICE_SW_INSET_DIST_IPV6_VXLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_eth_ipv6, ICE_SW_INSET_DIST_IPV6_VXLAN_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, @@ -301,9 +322,14 @@ ice_pattern_match_item ice_switch_pattern_perm_list[] = { {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_any, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv6, ICE_SW_INSET_DIST_IPV4_VXLAN_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_eth_ipv4, ICE_SW_INSET_DIST_IPV6_VXLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_vxlan_eth_ipv6, ICE_SW_INSET_DIST_IPV6_VXLAN_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, @@ -566,6 +592,11 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], bool inner_ipv6_valid = 0; bool inner_tcp_valid = 0; bool inner_udp_valid = 0; + bool ipv4_ipv4_valid = 0; + bool ipv4_ipv6_valid = 0; + bool ipv6_ipv4_valid = 0; + bool ipv6_ipv6_valid = 0; + bool any_valid = 0; uint16_t j, k, t = 0; if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ || @@ -586,6 +617,7 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], switch (item_type) { case RTE_FLOW_ITEM_TYPE_ANY: *tun_type = ICE_SW_TUN_AND_NON_TUN; + any_valid = 1; break; case RTE_FLOW_ITEM_TYPE_ETH: @@ -654,6 +686,10 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV4: ipv4_spec = item->spec; ipv4_mask = item->mask; + if (ipv4_valid) + ipv4_ipv4_valid = 1; + if (ipv6_valid) + ipv6_ipv4_valid = 1; if (tunnel_valid) { inner_ipv4_valid = 1; input = &inner_input_set; @@ -734,6 +770,10 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; ipv6_mask = item->mask; + if (ipv4_valid) + ipv4_ipv6_valid = 1; + if (ipv6_valid) + ipv6_ipv6_valid = 1; if (tunnel_valid) { inner_ipv6_valid = 1; input = &inner_input_set; @@ -1577,9 +1617,7 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], } if (*tun_type == ICE_NON_TUN) { - if (vxlan_valid) - *tun_type = ICE_SW_TUN_VXLAN; - else if (nvgre_valid) + if (nvgre_valid) *tun_type = ICE_SW_TUN_NVGRE; else if (ipv4_valid && tcp_valid) *tun_type = ICE_SW_IPV4_TCP; @@ -1591,6 +1629,21 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], *tun_type = ICE_SW_IPV6_UDP; } + if (vxlan_valid) { + if (ipv4_ipv4_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV4; + else if (ipv4_ipv6_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN_IPV6; + else if (ipv6_ipv4_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV4; + else if (ipv6_ipv6_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN_IPV6; + else if (ipv6_valid && any_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV6_VXLAN; + else if (ipv4_valid && any_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV4_VXLAN; + } + if (input_set_byte > MAX_INPUT_SET_BYTE) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, From patchwork Wed Apr 13 16:09:12 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109654 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7AD14A0508; Wed, 13 Apr 2022 10:13:10 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0528C42847; Wed, 13 Apr 2022 10:11:41 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 0A2E042802 for ; Wed, 13 Apr 2022 10:11:37 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837498; x=1681373498; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Eg+W1/VI+8uwinFV5UxZmP882/27WuMKR2L8a+fQOsw=; b=howE8CZbiEUsE5qOedsbySnEzTT1Gcfwb+dbH5EWpG67Icrc+JEv2Ra6 jlSeWOnnpemqadhJjqElqcOIuj6yp/ec4CdMk0OQpU5YUh6Svkf4jVNbl hJGlU00seMJXvRefbqK4egA2QWFE4r309tgZrfx3eFqf/D9D0a2Yx1yN7 l3i0kYkaLwDVD7a6LeDNHErrJwTW627dz/pUeqKt+417rXcL3mPvv4ZqM jcTPbH/MCZSI5BEVf/qFFJSyY1EZABx5iMBg7kexdw0pilkhq76awboCR cI9sbkuOE7D8EJBhKE7TAKTwoInvdKKByYWkYjnTH5URMnRLTliipRfWp w==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="243189138" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="243189138" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:15 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847649" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:12 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Steven Zou , Kevin Liu Subject: [PATCH v2 13/33] common/iavf: support flushing rules and reporting DCF id Date: Wed, 13 Apr 2022 16:09:12 +0000 Message-Id: <20220413160932.2074781-14-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add virtual channel opcode for DCF flushing rules. Add virtual channel event for PF reporting DCF id. Signed-off-by: Steven Zou Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/common/iavf/virtchnl.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index 3e44eca7d8..6e2a24b281 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -164,6 +164,12 @@ enum virtchnl_ops { VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, + + /** + * To reduce the risk for future combability issue, + * set VIRTCHNL_OP_DCF_RULE_FLUSH carefully by using a special value. + */ + VIRTCHNL_OP_DCF_RULE_FLUSH = 6000, VIRTCHNL_OP_MAX, }; @@ -1424,6 +1430,12 @@ enum virtchnl_event_codes { VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, + + /** + * To reduce the risk for future combability issue, + * set VIRTCHNL_EVENT_DCF_VSI_INFO carefully by using a special value. + */ + VIRTCHNL_EVENT_DCF_VSI_INFO = 1000, }; #define PF_EVENT_SEVERITY_INFO 0 @@ -2200,6 +2212,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, */ valid_len = msglen; break; + case VIRTCHNL_OP_DCF_RULE_FLUSH: case VIRTCHNL_OP_DCF_DISABLE: case VIRTCHNL_OP_DCF_GET_VSI_MAP: case VIRTCHNL_OP_DCF_GET_PKG_INFO: From patchwork Wed Apr 13 16:09:13 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109646 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0C3E7A0508; Wed, 13 Apr 2022 10:12:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 975204068B; Wed, 13 Apr 2022 10:11:20 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id B0635410DD for ; Wed, 13 Apr 2022 10:11:18 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837478; x=1681373478; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=LV+q9yR0xTP+mFXIzBaBeeG/9OhtLKY9m3tE6qltvrM=; b=XcUGHXOwvTK+Dbr/oic8WWwpUVe86FmL/+p85zVEqF7KqTt2I0Ie2nZu +QOW75XvkxYBz+NEQVreMkKxO3yVlqHvA8+Sbwy9rJFOuDTfhzsjebYMY JNRyANdFp+NHWQYKrzZx8gXVLptYrmnhjUMkZNc0VjwXtu5lmYh211ZLw XDL2KOdx+O8xcTvra4e1u9D1ctoThWldyC0rjpSysds8Yhc7sJcopEEMI tyzhTfEksHc3+bM99Il7Xb22AIG1rQ3p06Adn/gj7e+j7pdehcP/J0vW9 rr+sC8HPAKUEo7K4gTJJyGaJAvnNjHmT0LB1AzQMoNHuJcW2AL8yJ+fM8 Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="244490824" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="244490824" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:17 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847667" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:15 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 14/33] net/ice/base: fix ethertype filter input set Date: Wed, 13 Apr 2022 16:09:13 +0000 Message-Id: <20220413160932.2074781-15-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add destination and source MAC as the input sets to ethertype filter. For example: flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 type is 0x802 / end actions queue index 2 / end This flow will result in all the matched ingress packets be forwarded to queue 2. Fixes: 1f70fb3e958a ("net/ice/base: support flow director for non-IP packets") Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_fdir.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ice/base/ice_fdir.c b/drivers/net/ice/base/ice_fdir.c index ae76361102..0a1d45a9d7 100644 --- a/drivers/net/ice/base/ice_fdir.c +++ b/drivers/net/ice/base/ice_fdir.c @@ -3935,6 +3935,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, input->ip.v6.dst_port); break; case ICE_FLTR_PTYPE_NON_IP_L2: + ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, + input->ext_data.src_mac); ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET, input->ext_data.ether_type); break; From patchwork Wed Apr 13 16:09:14 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109647 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9977BA0508; Wed, 13 Apr 2022 10:12:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7A08A42801; Wed, 13 Apr 2022 10:11:22 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 826D442801 for ; Wed, 13 Apr 2022 10:11:21 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837481; x=1681373481; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=3rmbh29fN3RU4dPLHE6OjOHImGxTUIo8SR2xHknHuBQ=; b=Vf2m7MNEoAO6DsF4zuMiTYxu1VJvglS0pJB7TBb/9l8igixMIfRdbDUx Knsu6SP/XXSQc2JFP/UT0w7mVjMGsK94WQ+DtleZn6jBFm2FSyOg8ElR8 7SBFwtYn0/hn9uaYH78DnJ6j/B2OgwYMqNK9PrKLFhZQYpGtWHdWUoZEP 6BtUrugk7fwMab+34/pxHYR/z/PTbZGt9RIN5AtkYXPDV/Dmds4CPTJM1 nuGZj0igMtRmwLzTt+Jh0u1gnTZNRhdrrHLLqwdjaFSS5HQr+1uy7d2LE Cese9aL6ukbzHPZnRyPc1qzfnXYggFbopkNw5hn80Kd+Id6djnzjSB+Gk g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630024" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630024" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:20 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847678" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:17 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 15/33] net/ice/base: support IPv6 GRE UDP pattern Date: Wed, 13 Apr 2022 16:09:14 +0000 Message-Id: <20220413160932.2074781-16-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add supports(trainer packet and it's offsets, definitions, pattern matching) for IPv6 GRE UDP pattern. Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_protocol_type.h | 1 + drivers/net/ice/base/ice_switch.c | 43 +++++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h index d6332c5690..eec9f27823 100644 --- a/drivers/net/ice/base/ice_protocol_type.h +++ b/drivers/net/ice/base/ice_protocol_type.h @@ -44,6 +44,7 @@ enum ice_protocol_type { ICE_GENEVE, ICE_VXLAN_GPE, ICE_NVGRE, + ICE_GRE, ICE_GTP, ICE_PPPOE, ICE_PFCP, diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c index b0c50c8f40..f444a2da07 100644 --- a/drivers/net/ice/base/ice_switch.c +++ b/drivers/net/ice/base/ice_switch.c @@ -12,6 +12,7 @@ #define ICE_MAX_VLAN_ID 0xFFF #define ICE_IPV6_ETHER_ID 0x86DD #define ICE_IPV4_NVGRE_PROTO_ID 0x002F +#define ICE_IPV6_GRE_PROTO_ID 0x002F #define ICE_PPP_IPV6_PROTO_ID 0x0057 #define ICE_TCP_PROTO_ID 0x06 #define ICE_GTPU_PROFILE 24 @@ -129,6 +130,34 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; +static const struct ice_dummy_pkt_offsets +dummy_ipv6_gre_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_GRE, 54 }, + { ICE_IPV6_IL, 58 }, + { ICE_UDP_ILOS, 98 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_ipv6_gre_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00, + 0x00, 0x00, 0x00, 0x36, 0x2f, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x86, 0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x11, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0xff, 0xd8, 0x00, 0x00, +}; + static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -8207,8 +8236,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, udp = true; else if (lkups[i].type == ICE_TCP_IL) tcp = true; - else if (lkups[i].type == ICE_IPV6_OFOS) + else if (lkups[i].type == ICE_IPV6_OFOS) { ipv6 = true; + if (lkups[i].h_u.ipv6_hdr.next_hdr == + ICE_IPV6_GRE_PROTO_ID && + lkups[i].m_u.ipv6_hdr.next_hdr == 0xFF) + gre = true; + } else if (lkups[i].type == ICE_VLAN_OFOS) vlan = true; else if (lkups[i].type == ICE_ETYPE_OL && @@ -8568,6 +8602,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (ipv6 && gre) { + *pkt = dummy_ipv6_gre_udp_packet; + *pkt_len = sizeof(dummy_ipv6_gre_udp_packet); + *offsets = dummy_ipv6_gre_udp_packet_offsets; + return; + } + if (tun_type == ICE_SW_TUN_NVGRE || gre) { if (tcp) { *pkt = dummy_gre_tcp_packet; From patchwork Wed Apr 13 16:09:15 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109648 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E7095A0508; Wed, 13 Apr 2022 10:12:27 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 539184282A; Wed, 13 Apr 2022 10:11:24 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 9AD774282A for ; Wed, 13 Apr 2022 10:11:23 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837483; x=1681373483; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=HGAH1fi4ZYs5JA57wTnOrPNTkUE7AC7qqNIiEoQpqAI=; b=Ub4SQIJ8P4RnkDB9qEoJcvoExqYt2BBHaGbxYIctP2oyUJNbeXR1ogGS M+qEnapXNpqMA+LbqsKGXlSHP4vSPP02ifMBsI/GExEJTZWbcP24wvpNw uQEw6CbwCn5MN8pafifhjadyRHqgBMM2xf1ShKkiTxXPdOEYrsE3JKgcg Ah+EH3sqWOCdfiXb/3COV41Q2gNOtvZr9M1p+ViaclL+CpDXNOitUBb5e gDTLio/inlx0HhF8y4PIb0D2YhRQgSzBKDOrk2LMwYWj7W1RxiZ36bBDN M/tTl1Hsrjd6MuGHf7cJBSbKy00ugZnoAwPnKEba9jIShfTkMeijluhVP A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630034" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630034" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:23 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847685" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:20 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 16/33] net/ice: support IPv6 NVGRE tunnel Date: Wed, 13 Apr 2022 16:09:15 +0000 Message-Id: <20220413160932.2074781-17-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add protocol definition and pattern matching for IPv6 NVGRE tunnel. Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_switch_filter.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index e90e109eca..4e9c85aed4 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -31,6 +31,7 @@ #define ICE_PPP_IPV4_PROTO 0x0021 #define ICE_PPP_IPV6_PROTO 0x0057 #define ICE_IPV4_PROTO_NVGRE 0x002F +#define ICE_IPV6_PROTO_NVGRE 0x002F #define ICE_SW_PRI_BASE 6 #define ICE_SW_INSET_ETHER ( \ @@ -803,6 +804,10 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], break; } } + if ((ipv6_spec->hdr.proto & + ipv6_mask->hdr.proto) == + ICE_IPV6_PROTO_NVGRE) + *tun_type = ICE_SW_TUN_AND_NON_TUN; if (ipv6_mask->hdr.proto) *input |= ICE_INSET_IPV6_NEXT_HDR; if (ipv6_mask->hdr.hop_limits) From patchwork Wed Apr 13 16:09:16 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109649 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9B850A0508; Wed, 13 Apr 2022 10:12:34 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 53406427F6; Wed, 13 Apr 2022 10:11:29 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id ABA69410DD for ; Wed, 13 Apr 2022 10:11:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837487; x=1681373487; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=2br0CrSJ69J1ja9v/yKNQzBX1RuWsI7i5lX+K5EKcq4=; b=IhD4G4ou/DAmOWANJW+jt/D0Rtmt1IYqPmRy8s+akoXhX04lNV0ON7Ds TMu3YLp2j6LMqGj+EZ80AoYmpmEGcCPqM4l5I1nR/YWorbVLlfCRcWUf7 fvBsbqcpmTuvdcVyFDjjFDaZKH3Wyz5NdA7llx94USrFDJeLb2zvvtF+3 ZQ9LQBLSvLT8M0txTV+Mm9UM1pVGO2yGlutXpQA1e0yZ49GtJjoPCyEfL R9InOd71o5/smVg4tvumzP8HkwERvFIW+Cq4vpqooLsJLjouVlEn96/2c jWxK8rt1UgbIIlyVq6G/tGWVgfYSnLDJvuhN5VgtGqtxYzLPoU2u4RYv/ w==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630044" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630044" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:27 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847709" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:23 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 17/33] net/ice: support new pattern of IPv4 Date: Wed, 13 Apr 2022 16:09:16 +0000 Message-Id: <20220413160932.2074781-18-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add definition and pattern entry for IPv4 pattern: MAC/VLAN/IPv4 Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_switch_filter.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 4e9c85aed4..a8cb70ee0c 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -38,6 +38,8 @@ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) #define ICE_SW_INSET_MAC_VLAN ( \ ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER) +#define ICE_SW_INSET_MAC_VLAN_IPV4 ( \ + ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4) #define ICE_SW_INSET_MAC_QINQ ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \ ICE_INSET_VLAN_OUTER) @@ -231,6 +233,7 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_ipv4, ICE_SW_INSET_MAC_VLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, From patchwork Wed Apr 13 16:09:17 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109650 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9A7F7A0508; Wed, 13 Apr 2022 10:12:40 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3990C410E1; Wed, 13 Apr 2022 10:11:33 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id C0F2040E2D for ; Wed, 13 Apr 2022 10:11:30 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837490; x=1681373490; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=gnym2WRTAAtHqye+S7YpwHniEXJezVknPJvvlsGJ5j8=; b=VvtpguIo/tCGrqJh30RHroz9W9czmrS2vm1yzmN910XZT0WsaJpen27G dnixFKF9Vw2B2bU1Ab/Xe4QK0sR1aijRV4KKEM+uGUrzXqjHxW+O7zGr3 TLSfk2zKg7DiHp/3JyNRAd/2OhbkXr5XZL/1dku33FK9qNmyzXNDjxQi6 CDze7JEX0C2TirWM9BMlA1gOEILbXSipeBV2YJjv5atEcojQoKnRPxeZt H7Ysm9TAswtPGVE1aVg9BAosrQB34dKfiWRXfok9d9z1hkvwH7I6RzZAu dXr8NfbMZ5pufEMXOo73ItW7YDyQQ9AElOC48/O3wZ9JYWIlpTNFSxKSG Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630052" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630052" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:29 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847723" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:27 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 18/33] net/ice/base: support new patterns of TCP and UDP Date: Wed, 13 Apr 2022 16:09:17 +0000 Message-Id: <20220413160932.2074781-19-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Find training packets for below TCP and UDP patterns: MAC/VLAN/IPv4/TCP MAC/VLAN/IPv4/UDP Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_switch.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c index f444a2da07..c742dba138 100644 --- a/drivers/net/ice/base/ice_switch.c +++ b/drivers/net/ice/base/ice_switch.c @@ -8568,6 +8568,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } if (tun_type == ICE_SW_IPV4_TCP) { + if (vlan && tcp) { + *pkt = dummy_vlan_tcp_packet; + *pkt_len = sizeof(dummy_vlan_tcp_packet); + *offsets = dummy_vlan_tcp_packet_offsets; + return; + } *pkt = dummy_tcp_packet; *pkt_len = sizeof(dummy_tcp_packet); *offsets = dummy_tcp_packet_offsets; @@ -8575,6 +8581,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } if (tun_type == ICE_SW_IPV4_UDP) { + if (vlan && udp) { + *pkt = dummy_vlan_udp_packet; + *pkt_len = sizeof(dummy_vlan_udp_packet); + *offsets = dummy_vlan_udp_packet_offsets; + return; + } *pkt = dummy_udp_packet; *pkt_len = sizeof(dummy_udp_packet); *offsets = dummy_udp_packet_offsets; From patchwork Wed Apr 13 16:09:18 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109651 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13801A0508; Wed, 13 Apr 2022 10:12:47 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 27BAA42838; Wed, 13 Apr 2022 10:11:35 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 5164542811 for ; Wed, 13 Apr 2022 10:11:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837493; x=1681373493; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=rPki7LM4lZ8ZSBqMaBam3rsY2yYXGCMWtpr8cL4jZuU=; b=ICPs4WnomQnZMFxO6zpmAHgIhLGcdZ/gvlWlVI7CN8T14mLgniDBXdu1 Q5RywOiYgftxNQmohGVE9dJUFqKp6xz+GqIlvFmuhEQo5WPAsFaGVXXV4 Q5aq5RPQNfM4YVYLg2ELZTjYwpf9O4GxTK6ZOkVqqfXAT+vv0hJbzmN1M 0jwrYlmWlM6nURGTc/8qIImfrfE5n4GwbXDBKt848s2eeetIY3xyOp3bu 2UMXLU2gzpCW2FCNn8M3sZoBW63B/8fFsYLPRz31VukszVJ4I65wapyq/ H1SH8p8LLK1UNPKJw8Ehxhl9YkcjApW2PQFaHkApYhQAv9uqAtYpeDK7v Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630056" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630056" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:32 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847734" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:30 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 19/33] net/ice: support new patterns of TCP and UDP Date: Wed, 13 Apr 2022 16:09:18 +0000 Message-Id: <20220413160932.2074781-20-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add definitions and pattern entries for below TCP and UDP patterns: MAC/VLAN/IPv4/TCP MAC/VLAN/IPv4/UDP Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_switch_filter.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index a8cb70ee0c..44046f803c 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -62,6 +62,10 @@ ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) +#define ICE_SW_INSET_MAC_VLAN_IPV4_TCP ( \ + ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4_TCP) +#define ICE_SW_INSET_MAC_VLAN_IPV4_UDP ( \ + ICE_SW_INSET_MAC_VLAN | ICE_SW_INSET_MAC_IPV4_UDP) #define ICE_SW_INSET_MAC_IPV6 ( \ ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \ @@ -234,6 +238,8 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_vlan_ipv4, ICE_SW_INSET_MAC_VLAN_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_ipv4_tcp, ICE_SW_INSET_MAC_VLAN_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_ipv4_udp, ICE_SW_INSET_MAC_VLAN_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, From patchwork Wed Apr 13 16:09:19 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109652 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D80BFA0508; Wed, 13 Apr 2022 10:12:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1346C42835; Wed, 13 Apr 2022 10:11:39 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 7958042802 for ; Wed, 13 Apr 2022 10:11:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837496; x=1681373496; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=e3DAAmQzR/BbODjS4ihK3J3G75SEspdVFZqp2V/eqf4=; b=Q38llHzxnMaW8sX9t6WtgpQxq4lp4ynrZKBLSG7+ENZpgknR/+6wNDLQ 8VMNR6PVcNPTQXzIT8pYEVEP6ftvmSiN566HrRbnxx4VsDO9cDCm+RL45 rSEvYSF5ZN8DxHelZO1vvmz1PcGg4E5+9bFYC/H4luDc1fKq3G5I7Ynq/ EVe0CPF/FkNaAnaQ33ryMpU0v8WtG+gzfmneKxKx4AFY/sjSUFSB1tGfz Rc8XiU7wHesGhbgtUgXJt2N365tteoslBxHvwKmV+x62SvP9j8fOy8LAD 9+KwH3kBukQETJJVeT3RLjcNFv/6Rrh3gWPVzH8v0Zv3ftuuTkWmfz3wr g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630064" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630064" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:35 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847758" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:32 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Junfeng Guo , Kevin Liu Subject: [PATCH v2 20/33] net/ice/base: support IPv4 GRE tunnel Date: Wed, 13 Apr 2022 16:09:19 +0000 Message-Id: <20220413160932.2074781-21-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add definitions, trainer packets and routine path for IPv4 GRE tunnel. Ref: https://www.ietf.org/rfc/rfc1701.html Signed-off-by: Junfeng Guo Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_flex_pipe.c | 37 ++- drivers/net/ice/base/ice_flex_pipe.h | 3 +- drivers/net/ice/base/ice_protocol_type.h | 15 ++ drivers/net/ice/base/ice_switch.c | 304 ++++++++++++++++++++++- 4 files changed, 332 insertions(+), 27 deletions(-) diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c index f6a29f87c5..8672c41c69 100644 --- a/drivers/net/ice/base/ice_flex_pipe.c +++ b/drivers/net/ice/base/ice_flex_pipe.c @@ -1851,6 +1851,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, * @ids_cnt: lookup/protocol count * @bm: bitmap of field vectors to consider * @fv_list: Head of a list + * @lkup_exts: lookup elements * * Finds all the field vector entries from switch block that contain * a given protocol ID and returns a list of structures of type @@ -1861,7 +1862,8 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, */ enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list, + struct ice_prot_lkup_ext *lkup_exts) { struct ice_sw_fv_list_entry *fvl; struct ice_sw_fv_list_entry *tmp; @@ -1892,29 +1894,26 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, if (!ice_is_bit_set(bm, (u16)offset)) continue; - for (i = 0; i < ids_cnt; i++) { + int found = 1; + for (i = 0; i < lkup_exts->n_val_words; i++) { int j; - /* This code assumes that if a switch field vector line - * has a matching protocol, then this line will contain - * the entries necessary to represent every field in - * that protocol header. - */ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == prot_ids[i]) + if (fv->ew[j].prot_id == + lkup_exts->fv_words[i].prot_id && + fv->ew[j].off == lkup_exts->fv_words[i].off) break; if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == ids_cnt) { - fvl = (struct ice_sw_fv_list_entry *) - ice_malloc(hw, sizeof(*fvl)); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - LIST_ADD(&fvl->list_entry, fv_list); - break; - } + found = 0; + } + if (found) { + fvl = (struct ice_sw_fv_list_entry *) + ice_malloc(hw, sizeof(*fvl)); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + LIST_ADD(&fvl->list_entry, fv_list); } } while (fv); if (LIST_EMPTY(fv_list)) diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h index 23ba45564a..a22d66f3cf 100644 --- a/drivers/net/ice/base/ice_flex_pipe.h +++ b/drivers/net/ice/base/ice_flex_pipe.h @@ -37,7 +37,8 @@ void ice_init_prof_result_bm(struct ice_hw *hw); enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list, + struct ice_prot_lkup_ext *lkup_exts); enum ice_status ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h index eec9f27823..ffd34606e0 100644 --- a/drivers/net/ice/base/ice_protocol_type.h +++ b/drivers/net/ice/base/ice_protocol_type.h @@ -67,6 +67,7 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_VXLAN, /* VXLAN matches only non-VLAN pkts */ ICE_SW_TUN_VXLAN_VLAN, /* VXLAN matches both VLAN and non-VLAN pkts */ ICE_SW_TUN_NVGRE, + ICE_SW_TUN_GRE, ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN * and GENEVE */ @@ -231,6 +232,10 @@ enum ice_prot_id { #define ICE_TUN_FLAG_VLAN_MASK 0x01 #define ICE_TUN_FLAG_FV_IND 2 +#define ICE_GRE_FLAG_MDID 22 +#define ICE_GRE_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_GRE_FLAG_MDID) +#define ICE_GRE_FLAG_MASK 0x01C0 + #define ICE_PROTOCOL_MAX_ENTRIES 16 /* Mapping of software defined protocol ID to hardware defined protocol ID */ @@ -371,6 +376,15 @@ struct ice_nvgre { __be32 tni_flow; }; +struct ice_gre { + __be16 flags; + __be16 protocol; + __be16 chksum; + __be16 offset; + __be32 key; + __be32 seqnum; +}; + union ice_prot_hdr { struct ice_ether_hdr eth_hdr; struct ice_ethtype_hdr ethertype; @@ -381,6 +395,7 @@ union ice_prot_hdr { struct ice_sctp_hdr sctp_hdr; struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre nvgre_hdr; + struct ice_gre gre_hdr; struct ice_udp_gtp_hdr gtp_hdr; struct ice_pppoe_hdr pppoe_hdr; struct ice_pfcp_hdr pfcp_hdr; diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c index c742dba138..1b51cd4321 100644 --- a/drivers/net/ice/base/ice_switch.c +++ b/drivers/net/ice/base/ice_switch.c @@ -12,6 +12,7 @@ #define ICE_MAX_VLAN_ID 0xFFF #define ICE_IPV6_ETHER_ID 0x86DD #define ICE_IPV4_NVGRE_PROTO_ID 0x002F +#define ICE_IPV4_GRE_PROTO_ID 0x002F #define ICE_IPV6_GRE_PROTO_ID 0x002F #define ICE_PPP_IPV6_PROTO_ID 0x0057 #define ICE_TCP_PROTO_ID 0x06 @@ -158,6 +159,188 @@ static const u8 dummy_ipv6_gre_udp_packet[] = { 0xff, 0xd8, 0x00, 0x00, }; +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c1k1_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 50 }, + { ICE_TCP_IL, 70 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c1k1_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x2f, 0x7c, 0x7e, + 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, + + 0xb0, 0x00, 0x08, 0x00, /* ICE_GRE 34 */ + 0x46, 0x1e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x45, 0x00, 0x00, 0x2a, /* ICE_IPV4_IL 50 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x06, 0x7c, 0xcb, + 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, + + 0x00, 0x14, 0x00, 0x50, /* ICE_TCP_IL 70 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x50, 0x02, 0x20, 0x00, + 0x91, 0x7a, 0x00, 0x00, + + 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c1k1_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 50 }, + { ICE_UDP_ILOS, 70 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c1k1_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x42, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x2f, 0x7c, 0x8a, + 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, + + 0xb0, 0x00, 0x08, 0x00, /* ICE_GRE 34 */ + 0x46, 0x1d, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x45, 0x00, 0x00, 0x1e, /* ICE_IPV4_IL 50 */ + 0x00, 0x01, 0x00, 0x00, + 0x40, 0x11, 0x7c, 0xcc, + 0x7f, 0x00, 0x00, 0x01, + 0x7f, 0x00, 0x00, 0x01, + + 0x00, 0x35, 0x00, 0x35, /* ICE_UDP_ILOS 70 */ + 0x00, 0x0a, 0x01, 0x6e, + + 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c0k1_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 46 }, + { ICE_TCP_IL, 66 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c0k1_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x4a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x82, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x30, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x2a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, + 0x7c, 0xcb, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, + 0x20, 0x00, 0x91, 0x7a, 0x00, 0x00, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c0k1_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 46 }, + { ICE_UDP_ILOS, 66 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c0k1_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x3e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x8e, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x30, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00, + 0x00, 0x1e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, + 0x7c, 0xcc, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x00, 0x35, 0x00, 0x35, 0x00, 0x0a, + 0x01, 0x6e, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c0k0_tcp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 42 }, + { ICE_TCP_IL, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c0k0_tcp_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x46, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x86, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x10, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x2a, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcb, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0x91, 0x7a, + 0x00, 0x00, 0x00, 0x00, +}; + +static const struct ice_dummy_pkt_offsets +dummy_gre_rfc1701_c0k0_udp_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_GRE, 34 }, + { ICE_IPV4_IL, 42 }, + { ICE_UDP_ILOS, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +static const u8 dummy_gre_rfc1701_c0k0_udp_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x3a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x2f, + 0x7c, 0x92, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, + 0x00, 0x01, 0x10, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x45, 0x00, 0x00, 0x1e, 0x00, 0x01, + 0x00, 0x00, 0x40, 0x11, 0x7c, 0xcc, 0x7f, 0x00, + 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, + 0x00, 0x35, 0x00, 0x0a, 0x01, 0x6e, 0x00, 0x00, +}; + static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -173,7 +356,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { }; static const u8 dummy_udp_tun_tcp_packet[] = { - 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -224,7 +407,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { }; static const u8 dummy_udp_tun_udp_packet[] = { - 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -6892,6 +7075,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { { ICE_GENEVE, { 8, 10, 12, 14 } }, { ICE_VXLAN_GPE, { 8, 10, 12, 14 } }, { ICE_NVGRE, { 0, 2, 4, 6 } }, + { ICE_GRE, { 0, 2, 4, 6, 8, 10, 12, 14 } }, { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, { ICE_PPPOE, { 0, 2, 4, 6 } }, { ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } }, @@ -6927,6 +7111,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_GENEVE, ICE_UDP_OF_HW }, { ICE_VXLAN_GPE, ICE_UDP_OF_HW }, { ICE_NVGRE, ICE_GRE_OF_HW }, + { ICE_GRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, { ICE_PPPOE, ICE_PPPOE_HW }, { ICE_PFCP, ICE_UDP_ILOS_HW }, @@ -7065,6 +7250,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, struct ice_prot_lkup_ext *lkup_exts) { u8 j, word, prot_id, ret_val; + u8 extra_byte = 0; if (!ice_prot_type_to_id(rule->type, &prot_id)) return 0; @@ -7077,8 +7263,15 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, /* No more space to accommodate */ if (word >= ICE_MAX_CHAIN_WORDS) return 0; + if (rule->type == ICE_GRE) { + if (ice_prot_ext[rule->type].offs[j] == 0) { + if (((u16 *)&rule->h_u)[j] == 0x20) + extra_byte = 4; + continue; + } + } lkup_exts->fv_words[word].off = - ice_prot_ext[rule->type].offs[j]; + ice_prot_ext[rule->type].offs[j] - extra_byte; lkup_exts->fv_words[word].prot_id = ice_prot_id_tbl[rule->type].protocol_id; lkup_exts->field_mask[word] = @@ -7622,10 +7815,12 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, * @lkups_cnt: number of protocols * @bm: bitmap of field vectors to consider * @fv_list: pointer to a list that holds the returned field vectors + * @lkup_exts: lookup elements */ static enum ice_status ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list, + struct ice_prot_lkup_ext *lkup_exts) { enum ice_status status; u8 *prot_ids; @@ -7645,7 +7840,8 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /* Find field vectors that include all specified protocol types */ - status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); + status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list, + lkup_exts); free_mem: ice_free(hw, prot_ids); @@ -7681,6 +7877,10 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) *mask = ICE_TUN_FLAG_MASK; return true; + case ICE_SW_TUN_GRE: + *mask = ICE_GRE_FLAG_MASK; + return true; + case ICE_SW_TUN_GENEVE_VLAN: case ICE_SW_TUN_VXLAN_VLAN: *mask = ICE_TUN_FLAG_MASK & ~ICE_TUN_FLAG_VLAN_MASK; @@ -7702,6 +7902,12 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { u16 mask; + u8 has_gre_key = 0; + u8 i; + + for (i = 0; i < lkup_exts->n_val_words; i++) + if (lkup_exts->fv_words[i].prot_id == 0x40) + has_gre_key = 1; /* If this is a tunneled packet, then add recipe index to match the * tunnel bit in the packet metadata flags. @@ -7713,6 +7919,13 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; lkup_exts->field_mask[word] = mask; + + if (rinfo->tun_type == ICE_SW_TUN_GRE) + lkup_exts->fv_words[word].off = + ICE_GRE_FLAG_MDID_OFF; + + if (!has_gre_key) + lkup_exts->field_mask[word] = 0x0140; } else { return ICE_ERR_MAX_LIMIT; } @@ -7754,6 +7967,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_NVGRE: prof_type = ICE_PROF_TUN_GRE; break; + case ICE_SW_TUN_GRE: + prof_type = ICE_PROF_TUN_GRE; + break; case ICE_SW_TUN_PPPOE: case ICE_SW_TUN_PPPOE_QINQ: prof_type = ICE_PROF_TUN_PPPOE; @@ -8079,7 +8295,8 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); - status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); + status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list, + lkup_exts); if (status) goto err_unroll; @@ -8228,6 +8445,8 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, const struct ice_dummy_pkt_offsets **offsets) { bool tcp = false, udp = false, ipv6 = false, vlan = false; + bool gre_c_bit = false; + bool gre_k_bit = false; bool gre = false, mpls = false; u16 i; @@ -8245,6 +8464,17 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } else if (lkups[i].type == ICE_VLAN_OFOS) vlan = true; + else if (lkups[i].type == ICE_GRE) { + if (lkups[i].h_u.gre_hdr.flags & 0x20) + gre_k_bit = true; + if (lkups[i].h_u.gre_hdr.flags & 0x80) + gre_c_bit = true; + } else if (lkups[i].type == ICE_IPV4_OFOS && + lkups[i].h_u.ipv4_hdr.protocol == + ICE_IPV4_GRE_PROTO_ID && + lkups[i].m_u.ipv4_hdr.protocol == + 0xFF) + gre = true; else if (lkups[i].type == ICE_ETYPE_OL && lkups[i].h_u.ethertype.ethtype_id == CPU_TO_BE16(ICE_IPV6_ETHER_ID) && @@ -8650,6 +8880,46 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, return; } + if (tun_type == ICE_SW_TUN_GRE && tcp) { + if (gre_c_bit && gre_k_bit) { + *pkt = dummy_gre_rfc1701_c1k1_tcp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c1k1_tcp_packet); + *offsets = dummy_gre_rfc1701_c1k1_tcp_packet_offsets; + return; + } + if (!gre_c_bit && gre_k_bit) { + *pkt = dummy_gre_rfc1701_c0k1_tcp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c0k1_tcp_packet); + *offsets = dummy_gre_rfc1701_c0k1_tcp_packet_offsets; + return; + } + + *pkt = dummy_gre_rfc1701_c0k0_tcp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c0k0_tcp_packet); + *offsets = dummy_gre_rfc1701_c0k0_tcp_packet_offsets; + return; + } + + if (tun_type == ICE_SW_TUN_GRE) { + if (gre_c_bit && gre_k_bit) { + *pkt = dummy_gre_rfc1701_c1k1_udp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c1k1_udp_packet); + *offsets = dummy_gre_rfc1701_c1k1_udp_packet_offsets; + return; + } + if (!gre_c_bit && gre_k_bit) { + *pkt = dummy_gre_rfc1701_c0k1_udp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c0k1_udp_packet); + *offsets = dummy_gre_rfc1701_c0k1_udp_packet_offsets; + return; + } + + *pkt = dummy_gre_rfc1701_c0k0_udp_packet; + *pkt_len = sizeof(dummy_gre_rfc1701_c0k0_udp_packet); + *offsets = dummy_gre_rfc1701_c0k0_udp_packet_offsets; + return; + } + if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE || tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP || tun_type == ICE_SW_TUN_PROFID_IPV4_VXLAN || @@ -8800,6 +9070,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_NVGRE: len = sizeof(struct ice_nvgre); break; + case ICE_GRE: + len = sizeof(struct ice_gre); + break; case ICE_VXLAN: case ICE_GENEVE: case ICE_VXLAN_GPE: @@ -8833,6 +9106,20 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, if (len % ICE_BYTES_PER_WORD) return ICE_ERR_CFG; + if (lkups[i].type == ICE_GRE) { + if (lkups[i].h_u.gre_hdr.flags == 0x20) + offset -= 4; + + for (j = 1; j < len / sizeof(u16); j++) + if (((u16 *)&lkups[i].m_u)[j]) + ((u16 *)(pkt + offset))[j] = + (((u16 *)(pkt + offset))[j] & + ~((u16 *)&lkups[i].m_u)[j]) | + (((u16 *)&lkups[i].h_u)[j] & + ((u16 *)&lkups[i].m_u)[j]); + continue; + } + /* We have the offset to the header start, the length, the * caller's header values and mask. Use this information to * copy the data into the dummy packet appropriately based on @@ -9420,8 +9707,11 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return ICE_ERR_CFG; count = ice_fill_valid_words(&lkups[i], &lkup_exts); - if (!count) + if (!count) { + if (lkups[i].type == ICE_GRE) + continue; return ICE_ERR_CFG; + } } /* Create any special protocol/offset pairs, such as looking at tunnel From patchwork Wed Apr 13 16:09:20 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109655 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5709A0508; Wed, 13 Apr 2022 10:13:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 13A6142812; Wed, 13 Apr 2022 10:11:42 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 7404540694 for ; Wed, 13 Apr 2022 10:11:39 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837499; x=1681373499; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Z0opagSknuRIvze615JycuONGeoeSUg9ZgewTpQEK+U=; b=SHR87mTvdTqjSZRYk7U6RkVSxPL/0lYtG+dAzJDmNwB+Iyo4RulzW0qL WagFP21caHmzxCNkED32Qz4oTZv6YfQ8nUI5/T7U5Y3Sh3HekBzYzrtw6 XXD2v623zxnxzbg0oE/wlfSdnQHvtHrMHxYnTotJ6ARpXK2W73i3z8e68 +0x5BhwkPwyPZcEgnh/Eonby8+wOGluXyg29QPHKjOJltVyitt/9cC4lU fm+MD6nvj6bD78auqWVawoikO1ANuqKKjuCmqh7L+kjj7wwnNFVEHF+zz NB57tqlxB1X656nX2GUQ7puZrvk/zdGWydwEiF+TOTNQwQwB/kw2txapk g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630070" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630070" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:39 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847805" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:36 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Steven Zou , Kevin Liu Subject: [PATCH v2 21/33] net/ice: support IPv4 GRE raw pattern type Date: Wed, 13 Apr 2022 16:09:20 +0000 Message-Id: <20220413160932.2074781-22-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang Add definitions, matching entries, parsers for below patterns: ETH/IPV4/GRE/RAW/IPV4 ETH/IPV4/GRE/RAW/IPV4/UDP ETH/IPV4/GRE/RAW/IPV4/TCP Signed-off-by: Steven Zou Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_generic_flow.c | 27 +++++++++ drivers/net/ice/ice_generic_flow.h | 9 +++ drivers/net/ice/ice_switch_filter.c | 90 +++++++++++++++++++++++++++++ 3 files changed, 126 insertions(+) diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 1433094ed4..6663a85ed0 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -1084,6 +1084,33 @@ enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[] = { RTE_FLOW_ITEM_TYPE_ICMP6, RTE_FLOW_ITEM_TYPE_END, }; +/* IPv4 GRE RAW IPv4 */ +enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_GRE, + RTE_FLOW_ITEM_TYPE_RAW, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END, +}; /*IPv4 GTPU (EH) */ enum rte_flow_item_type pattern_eth_ipv4_gtpu[] = { diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h index def7e2d6d6..12193cbd9d 100644 --- a/drivers/net/ice/ice_generic_flow.h +++ b/drivers/net/ice/ice_generic_flow.h @@ -27,6 +27,7 @@ #define ICE_PROT_L2TPV3OIP BIT_ULL(16) #define ICE_PROT_PFCP BIT_ULL(17) #define ICE_PROT_NAT_T_ESP BIT_ULL(18) +#define ICE_PROT_GRE BIT_ULL(19) /* field */ @@ -54,6 +55,7 @@ #define ICE_PFCP_SEID BIT_ULL(42) #define ICE_PFCP_S_FIELD BIT_ULL(41) #define ICE_IP_PK_ID BIT_ULL(40) +#define ICE_RAW_PATTERN BIT_ULL(39) /* input set */ @@ -104,6 +106,8 @@ (ICE_PROT_GTPU | ICE_GTPU_TEID) #define ICE_INSET_GTPU_QFI \ (ICE_PROT_GTPU | ICE_GTPU_QFI) +#define ICE_INSET_RAW \ + (ICE_PROT_GRE | ICE_RAW_PATTERN) #define ICE_INSET_PPPOE_SESSION \ (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION) #define ICE_INSET_PPPOE_PROTO \ @@ -291,6 +295,11 @@ extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_udp[]; extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_sctp[]; extern enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[]; +/* IPv4 GRE RAW IPv4 */ +extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_udp[]; +extern enum rte_flow_item_type pattern_eth_ipv4_gre_raw_ipv4_tcp[]; + /* IPv4 GTPU (EH) */ extern enum rte_flow_item_type pattern_eth_ipv4_gtpu[]; extern enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh[]; diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index 44046f803c..435ca5a05c 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -196,6 +196,22 @@ #define ICE_SW_INSET_GTPU_IPV6_TCP ( \ ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \ ICE_INSET_TCP_DST_PORT) +#define ICE_SW_INSET_DIST_GRE_RAW_IPV4 ( \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_RAW) +#define ICE_SW_INSET_DIST_GRE_RAW_IPV4_TCP ( \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \ + ICE_INSET_RAW) +#define ICE_SW_INSET_DIST_GRE_RAW_IPV4_UDP ( \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \ + ICE_INSET_RAW) + +#define CUSTOM_GRE_KEY_OFFSET 4 +#define GRE_CFLAG 0x80 +#define GRE_KFLAG 0x20 +#define GRE_SFLAG 0x10 struct sw_meta { struct ice_adv_lkup_elem *list; @@ -317,6 +333,9 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gre_raw_ipv4, ICE_SW_INSET_DIST_GRE_RAW_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gre_raw_ipv4_tcp, ICE_SW_INSET_DIST_GRE_RAW_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gre_raw_ipv4_udp, ICE_SW_INSET_DIST_GRE_RAW_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, }; static struct @@ -608,6 +627,11 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], bool ipv6_ipv6_valid = 0; bool any_valid = 0; uint16_t j, k, t = 0; + uint16_t c_rsvd0_ver = 0; + bool gre_valid = 0; + +#define set_cur_item_einval(msg) \ + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, (msg)) if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ || *tun_type == ICE_NON_TUN_QINQ) @@ -1100,6 +1124,70 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], } break; + case RTE_FLOW_ITEM_TYPE_GRE: { + const struct rte_flow_item_gre *gre_spec = item->spec; + const struct rte_flow_item_gre *gre_mask = item->mask; + + gre_valid = 1; + tunnel_valid = 1; + if (gre_spec && gre_mask) { + list[t].type = ICE_GRE; + if (gre_mask->c_rsvd0_ver) { + /* GRE RFC1701 */ + list[t].h_u.gre_hdr.flags = + gre_spec->c_rsvd0_ver; + list[t].m_u.gre_hdr.flags = + gre_mask->c_rsvd0_ver; + c_rsvd0_ver = gre_spec->c_rsvd0_ver & + gre_mask->c_rsvd0_ver; + } + } + break; + } + + case RTE_FLOW_ITEM_TYPE_RAW: { + const struct rte_flow_item_raw *raw_spec; + char *endp = NULL; + unsigned long key; + char s[sizeof("0x12345678")]; + + raw_spec = item->spec; + + if (list[t].type != ICE_GRE) + return set_cur_item_einval("RAW must follow GRE."); + + if (!(c_rsvd0_ver & GRE_KFLAG)) { + if (!raw_spec) + break; + + return set_cur_item_einval("Invalid pattern! k_bit is 0 while raw pattern exists."); + } + + if (!raw_spec) + return set_cur_item_einval("Invalid pattern! k_bit is 1 while raw pattern doesn't exist."); + + if ((c_rsvd0_ver & GRE_CFLAG) == GRE_CFLAG && + raw_spec->offset != CUSTOM_GRE_KEY_OFFSET) + return set_cur_item_einval("Invalid pattern! c_bit is 1 while offset is not 4."); + + if (raw_spec->length >= sizeof(s)) + return set_cur_item_einval("Invalid key"); + + memcpy(s, raw_spec->pattern, raw_spec->length); + s[raw_spec->length] = '\0'; + key = strtol(s, &endp, 16); + if (*endp != '\0' || key > UINT32_MAX) + return set_cur_item_einval("Invalid key"); + + list[t].h_u.gre_hdr.key = (uint32_t)key; + list[t].m_u.gre_hdr.key = UINT32_MAX; + *input |= ICE_INSET_RAW; + input_set_byte += 2; + t++; + + break; + } + case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; @@ -1633,6 +1721,8 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[], if (*tun_type == ICE_NON_TUN) { if (nvgre_valid) *tun_type = ICE_SW_TUN_NVGRE; + else if (gre_valid) + *tun_type = ICE_SW_TUN_GRE; else if (ipv4_valid && tcp_valid) *tun_type = ICE_SW_IPV4_TCP; else if (ipv4_valid && udp_valid) From patchwork Wed Apr 13 16:09:21 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109656 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E4FD0A0508; Wed, 13 Apr 2022 10:13:24 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E9E4042849; Wed, 13 Apr 2022 10:11:43 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id DAA8642849 for ; Wed, 13 Apr 2022 10:11:41 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837502; x=1681373502; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=EX5z1L9Py0aRxNU4gUXbHDsejPvnhgFet+y7t9iFhk4=; b=Pa6lXsgFLxv2ZNsnCzNfuW1gvMWi7VPkQeLuOJmDgEOQTiiBHpg/Gwej IcSv9K9AlZ+G0dAg+ppTS+dH4Fiwi0JfdmAahAHSOhzbz2a4oQHAB5U1N ZbT8mNRSgsiX9eE91PYKUlhqGEJDvwzPAXuRclGFbjmj78HWsNsA+EgX4 2RmHKzF5q+AjReCe3MhoNpw0ppF4CB6u6JeUQvlOqRhw9Cf552z7otbFI Ax+HPwPb+qJ+4Wc1MQh7Iy29j2Jzg/u1VaQVvUFLwTBu+m9DXTwRqxTh7 9ld0i5tE7frLo7Rut0/TIvI4mWKNvKJmN6j6EngCoLNLbnM1IWIl5FWHv g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630080" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630080" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:41 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847824" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:39 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 22/33] net/ice: treat unknown package as OS default package Date: Wed, 13 Apr 2022 16:09:21 +0000 Message-Id: <20220413160932.2074781-23-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang In order to use custom package, unknown package should be treated as OS default package. Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_ethdev.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 73e550f5fb..ad9b09d081 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1710,13 +1710,16 @@ ice_load_pkg_type(struct ice_hw *hw) /* store the activated package type (OS default or Comms) */ if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME, - ICE_PKG_NAME_SIZE)) + ICE_PKG_NAME_SIZE)) { package_type = ICE_PKG_TYPE_OS_DEFAULT; - else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, - ICE_PKG_NAME_SIZE)) + } else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME, + ICE_PKG_NAME_SIZE)) { package_type = ICE_PKG_TYPE_COMMS; - else - package_type = ICE_PKG_TYPE_UNKNOWN; + } else { + PMD_INIT_LOG(WARNING, + "The package type is not identified, treaded as OS default type"); + package_type = ICE_PKG_TYPE_OS_DEFAULT; + } PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)", hw->active_pkg_ver.major, hw->active_pkg_ver.minor, From patchwork Wed Apr 13 16:09:22 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109657 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 82676A0508; Wed, 13 Apr 2022 10:13:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1629C42851; Wed, 13 Apr 2022 10:11:46 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 574094284F for ; Wed, 13 Apr 2022 10:11:44 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837504; x=1681373504; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=YfyqBPReywlBNta2D9Qoy3i5xa2INQkEib+o31XfKd4=; b=TNEVhiY3hCDygLFsEnz+CL6BknUqRjbOyTcA0FKBhflu4y0p1ilUffns wpIrwyMT+HVN+xG+AxRYC1LoStBIMt/nVb9Txq82gQXBrp+BhdpDuH2oB bD5K18ftLWzGs4D27nRS7XkPCNKZ+gAC3qL09qN74iDKUahloK5XdsBY9 eeKGLrOYF76QjR0KVnU99kskXRroG3LynIaybMQbbCqx46/0BoGiHnapW fR9/l72oF+okRwsgiwH+/rteDw71JFzCf1Edzhg4TARyAHf1zQMNH1VdS oz2NrabTFI9HIefh59gdxcms0SoVDVSJBOM5oy6DBJKa0Z46p+Q6d//LP A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630089" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630089" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:43 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847840" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:41 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Junfeng Guo , Kevin Liu Subject: [PATCH v2 23/33] net/ice/base: update Profile ID table for VXLAN Date: Wed, 13 Apr 2022 16:09:22 +0000 Message-Id: <20220413160932.2074781-24-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo Update Profile ID table for VXLAN to align with Tencent customed DDP. Signed-off-by: Junfeng Guo Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_switch.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h index efb9399b77..c8071aa50d 100644 --- a/drivers/net/ice/base/ice_switch.h +++ b/drivers/net/ice/base/ice_switch.h @@ -23,15 +23,15 @@ #define ICE_PROFID_IPV4_TUN_M_IPV4_TCP 10 #define ICE_PROFID_IPV4_TUN_M_IPV4_UDP 11 #define ICE_PROFID_IPV4_TUN_M_IPV4_OTHER 12 -#define ICE_PROFID_IPV6_TUN_M_IPV4_TCP 16 -#define ICE_PROFID_IPV6_TUN_M_IPV4_UDP 17 -#define ICE_PROFID_IPV6_TUN_M_IPV4_OTHER 18 -#define ICE_PROFID_IPV4_TUN_M_IPV6_TCP 22 -#define ICE_PROFID_IPV4_TUN_M_IPV6_UDP 23 -#define ICE_PROFID_IPV4_TUN_M_IPV6_OTHER 24 -#define ICE_PROFID_IPV6_TUN_M_IPV6_TCP 25 -#define ICE_PROFID_IPV6_TUN_M_IPV6_UDP 26 -#define ICE_PROFID_IPV6_TUN_M_IPV6_OTHER 27 +#define ICE_PROFID_IPV6_TUN_M_IPV4_TCP 34 +#define ICE_PROFID_IPV6_TUN_M_IPV4_UDP 35 +#define ICE_PROFID_IPV6_TUN_M_IPV4_OTHER 36 +#define ICE_PROFID_IPV4_TUN_M_IPV6_TCP 40 +#define ICE_PROFID_IPV4_TUN_M_IPV6_UDP 41 +#define ICE_PROFID_IPV4_TUN_M_IPV6_OTHER 42 +#define ICE_PROFID_IPV6_TUN_M_IPV6_TCP 43 +#define ICE_PROFID_IPV6_TUN_M_IPV6_UDP 44 +#define ICE_PROFID_IPV6_TUN_M_IPV6_OTHER 45 #define ICE_PROFID_PPPOE_PAY 34 #define ICE_PROFID_PPPOE_IPV4_TCP 35 #define ICE_PROFID_PPPOE_IPV4_UDP 36 From patchwork Wed Apr 13 16:09:23 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109658 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 942DFA0508; Wed, 13 Apr 2022 10:13:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EE1C64283D; Wed, 13 Apr 2022 10:11:48 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id CBE2F4284F for ; Wed, 13 Apr 2022 10:11:46 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837507; x=1681373507; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=LSCqbl5aDHh6+V4X5Ndg3y1FuFAJ2piQ49GMd3ps9qQ=; b=HN3/qFC++bB+5xqJisVsVMa49Msw18GYd93uhH4lxZtYxvbDbhrrmRyZ C0GR4J5hJ1NqqBYVxehSRLTvH9kNXbXSjejBqRaGR/YVusxXWuHaM9/Im xywvP53LP0DBhhcVGQ/mS0AXMZRP7D2F5ivwpPKPshy3bFe6eGrOvYPD7 Bkw4uEutccYF3AEY4mjNG8JHLNoBPMmAuAWbgWzY/5V51ahpm5Rk30jET dkEEo4+vW4jsUj+gMp/81RLE0VYlWx5bOkIwgvHVq2NydmzcVuIMQI5GW Pg4oq90rKFGP2869UaHgIDiG6qDy3zyZ5lp0qEEB4hUEwJJKLA/agRDDd g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630097" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630097" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:46 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847850" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:44 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Junfeng Guo , Kevin Liu Subject: [PATCH v2 24/33] net/ice/base: update Protocol ID table to match DVM DDP Date: Wed, 13 Apr 2022 16:09:23 +0000 Message-Id: <20220413160932.2074781-25-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo The ice kernel driver and DDP is working in Double VLAN Mode (DVM), but the DVM is not supported on this PMD. Thus update the SW to HW Protocol ID table for VLAN to support common switch filtering with single VLAN layer. Signed-off-by: Junfeng Guo Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c index 1b51cd4321..64302b1617 100644 --- a/drivers/net/ice/base/ice_switch.c +++ b/drivers/net/ice/base/ice_switch.c @@ -7098,7 +7098,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, { ICE_MAC_IL, ICE_MAC_IL_HW }, { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, - { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, + { ICE_VLAN_OFOS, ICE_VLAN_OF_HW }, { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, { ICE_IPV4_IL, ICE_IPV4_IL_HW }, { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, From patchwork Wed Apr 13 16:09:24 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109659 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9BD1CA0508; Wed, 13 Apr 2022 10:13:45 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D56AE42858; Wed, 13 Apr 2022 10:11:50 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 779EF4285A for ; Wed, 13 Apr 2022 10:11:49 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837509; x=1681373509; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=vx3KvaMnEFzaeQ20Xa1BkP9Xre0h7AyMATPkcVMuFNY=; b=C6RWu+HFl9qh+KJvRAjve3cb7FcYjXB/mFnvuqsfw53yi/MmXwxG+fhN Tld8AqWrWXkxv4TquyVDHFpcZqvn+WWd5Rwmnf7hwG7u6vwTjPtP0CYS0 r6q2K3ZYrIvQ/1eoEwQ46Cvo87Y97LmLuEDp02ZwDz/ADcLoI73fHfqfm o2rcSmS3nkwbWoCttK2u5t5dRF0EvlVXbGvTbcN43yplZqC9v6ewDDyEB I/1wUmVfgLCgcc64OO+7sms/IRbGGLq3DLkMb+G9/AnFYuXKU/4XpjzR/ Wlbk7pas6p8hiM+g3KAQHAJZ9mS5BXGkeVy/AvisQYJt25OunaHLfIVhd A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630101" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630101" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:48 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847862" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:46 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 25/33] net/ice: handle virtchnl event message without interrupt Date: Wed, 13 Apr 2022 16:09:24 +0000 Message-Id: <20220413160932.2074781-26-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang Currently, VF can only handle virtchnl event message by calling interrupt. It is not available in two cases: 1. If the event message comes during VF initialization before interrupt is enabled, this message will not be handled correctly. 2. Some virtchnl commands need to receive the event message and handle it with interrupt disabled. To solve this issue, we add the virtchnl event message handling in the process of reading vitchnl messages in adminq from PF. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 9c2f13cf72..1415f26ac3 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -63,11 +63,32 @@ ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op, goto again; v_op = rte_le_to_cpu_32(event.desc.cookie_high); - if (v_op != op) - goto again; + + if (v_op == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + switch (vpe->event) { + case VIRTCHNL_EVENT_RESET_IMPENDING: + hw->resetting = true; + if (rsp_msglen) + *rsp_msglen = 0; + return IAVF_SUCCESS; + default: + goto again; + } + } else { + /* async reply msg on command issued by vf previously */ + if (v_op != op) { + PMD_DRV_LOG(WARNING, + "command mismatch, expect %u, get %u", + op, v_op); + goto again; + } + } if (rsp_msglen != NULL) *rsp_msglen = event.msg_len; + return rte_le_to_cpu_32(event.desc.cookie_low); again: From patchwork Wed Apr 13 16:09:25 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109660 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 933EFA0508; Wed, 13 Apr 2022 10:13:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B4C504285C; Wed, 13 Apr 2022 10:11:52 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 5B48E4285C for ; Wed, 13 Apr 2022 10:11:51 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837511; x=1681373511; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=7TAsBqQ3YNcnG1Z7KL7RLjVjf3ECygbjVKiDqccpqxU=; b=GxQRXtgPW5ZwEKnynRz0H3cZMhBTCkRKsNpEeWIv3j2Ir9X3scCx5IQl JAJ7j337yJjYPEEudx2wlfA0JVNDyrKVxe9sGtI9cpowu7z1P6Fkvfau4 zXBvSF0rMNSG92XBp696qg4FEifyzWtowN6A3iXgKrpxqoxYIDRvE8UH/ g8qaqwHBJ5Wvuw6Wp3dBIuzna3xOquYFVGORrscyrqbeU8/Ud/BNhI/cS 1nqvoZuLD1gPJM73liXuW2D4ji86OLHXJojKoiXEqsgnP3a5VjU/4hoMr 9WH5RMSK0qEsTzWlwnt0ciMhs9AWEGCe5i9crsl9AeKbUdjM3BPQKV8aX g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630105" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630105" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:50 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847878" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:48 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 26/33] net/ice: add DCF request queues function Date: Wed, 13 Apr 2022 16:09:25 +0000 Message-Id: <20220413160932.2074781-27-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang Add a new virtchnl function to request additional queues from PF. Current default queue pairs number is 16. In order to support up to 256 queue pairs DCF port, enable this request queues function. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 98 +++++++++++++++++++++++++++++++++------ drivers/net/ice/ice_dcf.h | 1 + 2 files changed, 86 insertions(+), 13 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 1415f26ac3..6aeafa6681 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -257,7 +257,7 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw) VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | - VIRTCHNL_VF_OFFLOAD_QOS; + VIRTCHNL_VF_OFFLOAD_QOS | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES, (uint8_t *)&caps, sizeof(caps)); @@ -468,18 +468,38 @@ ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, goto ret; } - do { - if (!cmd->pending) - break; - - rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); - } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); - - if (cmd->v_ret != IAVF_SUCCESS) { - err = -1; - PMD_DRV_LOG(ERR, - "No response (%d times) or return failure (%d) for cmd %d", - i, cmd->v_ret, cmd->v_op); + switch (cmd->v_op) { + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ice_dcf_recv_cmd_rsp_no_irq(hw, + VIRTCHNL_OP_REQUEST_QUEUES, + cmd->rsp_msgbuf, + cmd->rsp_buflen, + NULL); + if (err != IAVF_SUCCESS || !hw->resetting) { + err = -1; + PMD_DRV_LOG(ERR, + "Failed to get response of " + "VIRTCHNL_OP_REQUEST_QUEUES %d", + err); + } + break; + default: + /* For other virtchnl ops in running time, + * wait for the cmd done flag. + */ + do { + if (!cmd->pending) + break; + rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); + } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); + + if (cmd->v_ret != IAVF_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, + "No response (%d times) or " + "return failure (%d) for cmd %d", + i, cmd->v_ret, cmd->v_op); + } } ret: @@ -1011,6 +1031,58 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw) return err; } +int +ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num) +{ + struct virtchnl_vf_res_request vfres; + struct dcf_virtchnl_cmd args; + uint16_t num_queue_pairs; + int err; + + if (!(hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + PMD_DRV_LOG(ERR, "request queues not supported"); + return -1; + } + + if (num == 0) { + PMD_DRV_LOG(ERR, "queue number cannot be zero"); + return -1; + } + vfres.num_queue_pairs = num; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_REQUEST_QUEUES; + + args.req_msg = (u8 *)&vfres; + args.req_msglen = sizeof(vfres); + + args.rsp_msgbuf = hw->arq_buf; + args.rsp_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_buflen = ICE_DCF_AQ_BUF_SZ; + + /* + * disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. + */ + rte_intr_disable(hw->eth_dev->intr_handle); + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + rte_intr_enable(hw->eth_dev->intr_handle); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* request additional queues failed, return available number */ + num_queue_pairs = ((struct virtchnl_vf_res_request *) + args.rsp_msgbuf)->num_queue_pairs; + PMD_DRV_LOG(ERR, + "request queues failed, only %u queues available", + num_queue_pairs); + + return -1; +} + int ice_dcf_config_irq_map(struct ice_dcf_hw *hw) { diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 8cf17e7700..99498e2184 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -127,6 +127,7 @@ int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw); int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw *hw); +int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); From patchwork Wed Apr 13 16:09:26 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109661 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1DC3FA0508; Wed, 13 Apr 2022 10:13:58 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AD4D342864; Wed, 13 Apr 2022 10:11:54 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 93BB64284A for ; Wed, 13 Apr 2022 10:11:53 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837513; x=1681373513; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=fIjotZh2MROq4VuuVjbsamRCTCM56b64epyDL+0hz18=; b=Cx1Bo1v+Fl1oDrcwaoXmj4hCQX0lJricmrBBb51xpUrmywghWT5y6S8B cOjRMDSDzVPMXKbOsucoGmQCDFQL0uaeq5x9bKBKPv/Bcxmlu8l0R6GSX +1dykRb7wpcTBSktOdtp+FNq7TS5/jtPDLDT646ZiUKTMbhsOUntOpEvp quny9JVJvXrKf4nEh2J36ofd7gzyn9NsP0nGJqZlayFwXqh8QnCOembWM Kfr84nTT765swZvKnTxbHhfKgHkg9/jp4QVzUMuEsNK3jIdtvfhGJASLK +OEYWJHv/GMRRAWx8tddDpLIu5i2yzajQgOdXJeM5ZYoKACZkBCvd2f7E g==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630113" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630113" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:53 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847892" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:51 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 27/33] net/ice: negotiate large VF and request more queues Date: Wed, 13 Apr 2022 16:09:26 +0000 Message-Id: <20220413160932.2074781-28-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang Negotiate large VF capability with PF during VF initialization. If large VF is supported and the number of queues larger than 16 is required, VF requests additional queues from PF. Mark the state that large VF is supported. If the allocated queues number is larger than 16, the max RSS queue region cannot be 16 anymore. Add the function to query max RSS queue region from PF, use it in the RSS initialization and future filters configuration. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 34 +++++++++++++++- drivers/net/ice/ice_dcf.h | 4 ++ drivers/net/ice/ice_dcf_ethdev.c | 69 +++++++++++++++++++++++++++++++- drivers/net/ice/ice_dcf_ethdev.h | 2 + 4 files changed, 106 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 6aeafa6681..7091658841 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -257,7 +257,8 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw) VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | - VIRTCHNL_VF_OFFLOAD_QOS | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + VIRTCHNL_VF_OFFLOAD_QOS | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_LARGE_NUM_QPAIRS; err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES, (uint8_t *)&caps, sizeof(caps)); @@ -1083,6 +1084,37 @@ ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num) return -1; } +int +ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw) +{ + struct dcf_virtchnl_cmd args; + uint16_t qregion_width; + int err; + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_GET_MAX_RSS_QREGION; + args.req_msg = NULL; + args.req_msglen = 0; + args.rsp_msgbuf = hw->arq_buf; + args.rsp_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_buflen = ICE_DCF_AQ_BUF_SZ; + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of " + "VIRTCHNL_OP_GET_MAX_RSS_QREGION"); + return err; + } + + qregion_width = ((struct virtchnl_max_rss_qregion *) + args.rsp_msgbuf)->qregion_width; + hw->max_rss_qregion = (uint16_t)(1 << qregion_width); + + return 0; +} + + int ice_dcf_config_irq_map(struct ice_dcf_hw *hw) { diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 99498e2184..05ea91d2a5 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -105,6 +105,7 @@ struct ice_dcf_hw { uint16_t msix_base; uint16_t nb_msix; + uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ uint16_t rxq_map[16]; struct virtchnl_eth_stats eth_stats_offset; struct virtchnl_vlan_caps vlan_v2_caps; @@ -114,6 +115,8 @@ struct ice_dcf_hw { uint32_t link_speed; bool resetting; + /* Indicate large VF support enabled or not */ + bool lv_enabled; }; int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, @@ -128,6 +131,7 @@ int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw *hw); int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); +int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index d4bfa182a4..a43c5a320d 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -39,6 +39,8 @@ static int ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int ice_dcf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num); + static int ice_dcf_dev_init(struct rte_eth_dev *eth_dev); @@ -663,6 +665,11 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev) { struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; struct ice_adapter *ad = &dcf_ad->parent; + struct ice_dcf_hw *hw = &dcf_ad->real_hw; + int ret; + + uint16_t num_queue_pairs = + RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); ad->rx_bulk_alloc_allowed = true; ad->tx_simple_allowed = true; @@ -670,6 +677,47 @@ ice_dcf_dev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + /* Large VF setting */ + if (num_queue_pairs > ICE_DCF_MAX_NUM_QUEUES_DFLT) { + if (!(hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_LARGE_NUM_QPAIRS)) { + PMD_DRV_LOG(ERR, "large VF is not supported"); + return -1; + } + + if (num_queue_pairs > ICE_DCF_MAX_NUM_QUEUES_LV) { + PMD_DRV_LOG(ERR, + "queue pairs number cannot be larger than %u", + ICE_DCF_MAX_NUM_QUEUES_LV); + return -1; + } + + ret = ice_dcf_queues_req_reset(dev, num_queue_pairs); + if (ret) + return ret; + + ret = ice_dcf_get_max_rss_queue_region(hw); + if (ret) { + PMD_INIT_LOG(ERR, "get max rss queue region failed"); + return ret; + } + + hw->lv_enabled = true; + } else { + /* Check if large VF is already enabled. If so, disable and + * release redundant queue resource. + */ + if (hw->lv_enabled) { + ret = ice_dcf_queues_req_reset(dev, num_queue_pairs); + if (ret) + return ret; + + hw->lv_enabled = false; + } + /* if large VF is not required, use default rss queue region */ + hw->max_rss_qregion = ICE_DCF_MAX_NUM_QUEUES_DFLT; + } + return 0; } @@ -681,8 +729,8 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, struct ice_dcf_hw *hw = &adapter->real_hw; dev_info->max_mac_addrs = DCF_NUM_MACADDR_MAX; - dev_info->max_rx_queues = hw->vsi_res->num_queue_pairs; - dev_info->max_tx_queues = hw->vsi_res->num_queue_pairs; + dev_info->max_rx_queues = ICE_DCF_MAX_NUM_QUEUES_LV; + dev_info->max_tx_queues = ICE_DCF_MAX_NUM_QUEUES_LV; dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN; dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX; dev_info->hash_key_size = hw->vf_res->rss_key_size; @@ -1829,6 +1877,23 @@ ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int ice_dcf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + int ret; + + ret = ice_dcf_request_queues(hw, num); + if (ret) { + PMD_DRV_LOG(ERR, "request queues from PF failed"); + return ret; + } + PMD_DRV_LOG(INFO, "change queue pairs from %u to %u", + hw->vsi_res->num_queue_pairs, num); + + return ice_dcf_dev_reset(dev); +} + static int ice_dcf_cap_check_handler(__rte_unused const char *key, const char *value, __rte_unused void *opaque) diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 27f6402786..4a08d32e0c 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -20,6 +20,8 @@ #define ICE_DCF_ETH_OVERHEAD \ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2) #define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD) +#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 +#define ICE_DCF_MAX_NUM_QUEUES_LV 256 struct ice_dcf_queue { uint64_t dummy; From patchwork Wed Apr 13 16:09:27 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109662 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 014A1A0508; Wed, 13 Apr 2022 10:14:04 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7D78E42842; Wed, 13 Apr 2022 10:11:57 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 4B9EC427FC for ; Wed, 13 Apr 2022 10:11:56 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837516; x=1681373516; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=NIbIicjzmCp9S2UjNaODMmdaSeJOBCRfYx5rHYYnZyc=; b=HJImLa5upGwb3e2GsUIX0MMyzGYzg7DpPqXKBLT4KaKNTOvtX7Hsr/Qj T/FaNEBFW3P1f+/+kxUL4SllDLpTh8Lvr2OMOpkOiC2pU5E2pnZCTPDVU Fd5SWIl26iqTplX6XN2ZuBHo9Z667f1lbdfjd8ObVPb3JCukdwRf3FmXr 4v99sJMmuL6Xqt7VoEKSpt5JuXEZlTlzjyCSvGtwSwZlnLEM4D7TMcpOW wyCFJ6w7XFFmqw6DmXL5U0H6UtDyGWyQvnsj2MS8BB39/B8gR7y8MWqf0 BgBfAXuvZXqk1fnbiRM3meLdG25ul3fWRTLv5gqEyjhBr+BHtQrqHaE+u A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630123" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630123" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:55 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847903" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:53 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 28/33] net/ice: enable multiple queues configurations for large VF Date: Wed, 13 Apr 2022 16:09:27 +0000 Message-Id: <20220413160932.2074781-29-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang Since the adminq buffer size has a 4K limitation, the current virtchnl command VIRTCHNL_OP_CONFIG_VSI_QUEUES cannot send the message only once to configure up to 256 queues. In this patch, we send the messages multiple times to make sure that the buffer size is less than 4K each time. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 11 ++++++----- drivers/net/ice/ice_dcf.h | 3 ++- drivers/net/ice/ice_dcf_ethdev.c | 20 ++++++++++++++++++-- drivers/net/ice/ice_dcf_ethdev.h | 1 + 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 7091658841..7004c00f1c 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -949,7 +949,8 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) #define IAVF_RXDID_COMMS_OVS_1 22 int -ice_dcf_configure_queues(struct ice_dcf_hw *hw) +ice_dcf_configure_queues(struct ice_dcf_hw *hw, + uint16_t num_queue_pairs, uint16_t index) { struct ice_rx_queue **rxq = (struct ice_rx_queue **)hw->eth_dev->data->rx_queues; @@ -962,16 +963,16 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw) int err; size = sizeof(*vc_config) + - sizeof(vc_config->qpair[0]) * hw->num_queue_pairs; + sizeof(vc_config->qpair[0]) * num_queue_pairs; vc_config = rte_zmalloc("cfg_queue", size, 0); if (!vc_config) return -ENOMEM; vc_config->vsi_id = hw->vsi_res->vsi_id; - vc_config->num_queue_pairs = hw->num_queue_pairs; + vc_config->num_queue_pairs = num_queue_pairs; - for (i = 0, vc_qp = vc_config->qpair; - i < hw->num_queue_pairs; + for (i = index, vc_qp = vc_config->qpair; + i < index + num_queue_pairs; i++, vc_qp++) { vc_qp->txq.vsi_id = hw->vsi_res->vsi_id; vc_qp->txq.queue_id = i; diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 05ea91d2a5..e36428a92a 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -129,7 +129,8 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw); int ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); -int ice_dcf_configure_queues(struct ice_dcf_hw *hw); +int ice_dcf_configure_queues(struct ice_dcf_hw *hw, + uint16_t num_queue_pairs, uint16_t index); int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index a43c5a320d..78df82d5b5 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -513,6 +513,8 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle = dev->intr_handle; struct ice_adapter *ad = &dcf_ad->parent; struct ice_dcf_hw *hw = &dcf_ad->real_hw; + uint16_t num_queue_pairs; + uint16_t index = 0; int ret; if (hw->resetting) { @@ -531,6 +533,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); + num_queue_pairs = hw->num_queue_pairs; ret = ice_dcf_init_rx_queues(dev); if (ret) { @@ -546,7 +549,20 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) } } - ret = ice_dcf_configure_queues(hw); + /* If needed, send configure queues msg multiple times to make the + * adminq buffer length smaller than the 4K limitation. + */ + while (num_queue_pairs > ICE_DCF_CFG_Q_NUM_PER_BUF) { + if (ice_dcf_configure_queues(hw, + ICE_DCF_CFG_Q_NUM_PER_BUF, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); + goto err_queue; + } + num_queue_pairs -= ICE_DCF_CFG_Q_NUM_PER_BUF; + index += ICE_DCF_CFG_Q_NUM_PER_BUF; + } + + ret = ice_dcf_configure_queues(hw, num_queue_pairs, index); if (ret) { PMD_DRV_LOG(ERR, "Fail to config queues"); return ret; @@ -586,7 +602,7 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) dev->data->dev_link.link_status = RTE_ETH_LINK_UP; - +err_queue: return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 4a08d32e0c..2fac1e5b21 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -22,6 +22,7 @@ #define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD) #define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 #define ICE_DCF_MAX_NUM_QUEUES_LV 256 +#define ICE_DCF_CFG_Q_NUM_PER_BUF 32 struct ice_dcf_queue { uint64_t dummy; From patchwork Wed Apr 13 16:09:28 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109663 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 595A8A0508; Wed, 13 Apr 2022 10:14:14 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A64B64286F; Wed, 13 Apr 2022 10:12:00 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id C5A2E4286E for ; Wed, 13 Apr 2022 10:11:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837519; x=1681373519; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=gEEGeqX8JUaOwgf5NT8uyy75ilpyVaI3wVYaYUB7elg=; b=OvcwvBS1a6y8wKeyM6YgU40DvqR8K36+Pe1mL51WCBD+IuldbXyIiRnS 0aFxgIvPv6zcB5gpowI/OlrdJDuuMl0yPcd6n4uXZ0DfBQfub/B1t0stB UJPiBKfRR/jYwrCXYsrlO7jgUQJHc00QSAT23blPA4WmT4hASGZdzwCDW x1/W5RjrRE/SYGJp1AW/L9azHxkTGMcVF43MU8JQstYd3HSxWriufdHjn FfRb66qjB+bi/bhQwofdxzZQHPHFceaf8dfeKJ5F+TFVfFaX9iV4yckG9 QgE4OzsEAiwarmFZT8JecHkDZ+uDgsf/rCjYGaDZdzib9DbtooJsgjf8J Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630134" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630134" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:58 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847912" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:56 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 29/33] net/ice: enable IRQ mapping configuration for large VF Date: Wed, 13 Apr 2022 16:09:28 +0000 Message-Id: <20220413160932.2074781-30-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang The current IRQ mapping configuration only supports max 16 queues and 16 MSIX vectors. Change the queue vector mapping structure to indicate up to 256 queues. A new opcode is used to handle the case with large number of queues. To avoid adminq buffer size limitation, we support to send the virtchnl message multiple times if needed. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 50 +++++++++++++++++++++++++++---- drivers/net/ice/ice_dcf.h | 10 ++++++- drivers/net/ice/ice_dcf_ethdev.c | 51 +++++++++++++++++++++++++++----- drivers/net/ice/ice_dcf_ethdev.h | 1 + 4 files changed, 99 insertions(+), 13 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 7004c00f1c..290f754049 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1115,7 +1115,6 @@ ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw) return 0; } - int ice_dcf_config_irq_map(struct ice_dcf_hw *hw) { @@ -1132,13 +1131,14 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return -ENOMEM; map_info->num_vectors = hw->nb_msix; - for (i = 0; i < hw->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < hw->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[hw->qv_map[i].vector_id - hw->msix_base]; vecmap->vsi_id = hw->vsi_res->vsi_id; vecmap->rxitr_idx = 0; - vecmap->vector_id = hw->msix_base + i; + vecmap->vector_id = hw->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = hw->rxq_map[hw->msix_base + i]; + vecmap->rxq_map |= 1 << hw->qv_map[i].queue_id; } memset(&args, 0, sizeof(args)); @@ -1154,6 +1154,46 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return err; } +int +ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index) +{ + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct dcf_virtchnl_cmd args; + int len, i, err; + int count = 0; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * (num - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = hw->vsi_res->vsi_id; + map_info->num_qv_maps = num; + for (i = index; i < index + map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[count++]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = hw->qv_map[i].queue_id; + qv_maps->vector_id = hw->qv_map[i].vector_id; + } + + args.v_op = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.req_msg = (u8 *)map_info; + args.req_msglen = len; + args.rsp_msgbuf = hw->arq_buf; + args.req_msglen = ICE_DCF_AQ_BUF_SZ; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) { diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index e36428a92a..ce57a687ab 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -74,6 +74,11 @@ struct ice_dcf_tm_conf { bool committed; }; +struct ice_dcf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; + struct ice_dcf_hw { struct iavf_hw avf; @@ -106,7 +111,8 @@ struct ice_dcf_hw { uint16_t msix_base; uint16_t nb_msix; uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ - uint16_t rxq_map[16]; + + struct ice_dcf_qv_map *qv_map; /* queue vector mapping */ struct virtchnl_eth_stats eth_stats_offset; struct virtchnl_vlan_caps vlan_v2_caps; @@ -134,6 +140,8 @@ int ice_dcf_configure_queues(struct ice_dcf_hw *hw, int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); +int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); int ice_dcf_query_stats(struct ice_dcf_hw *hw, diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 78df82d5b5..1ddba02ebb 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -143,6 +143,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, { struct ice_dcf_adapter *adapter = dev->data->dev_private; struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -161,6 +162,14 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct ice_dcf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -196,17 +205,22 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(&hw->avf); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - hw->rxq_map[hw->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; + } + hw->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { hw->nb_msix = 1; hw->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[hw->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; rte_intr_vec_list_index_set(intr_handle, i, IAVF_MISC_VEC_ID); } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", hw->msix_base); @@ -219,21 +233,44 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, hw->msix_base = IAVF_MISC_VEC_ID; vec = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; rte_intr_vec_list_index_set(intr_handle, i, vec++); if (vec >= hw->nb_msix) vec = IAVF_RX_VEC_START; } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", hw->nb_msix, dev->data->nb_rx_queues); } } - if (ice_dcf_config_irq_map(hw)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!hw->lv_enabled) { + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + uint16_t num_qv_maps = dev->data->nb_rx_queues; + uint16_t index = 0; + + while (num_qv_maps > ICE_DCF_IRQ_MAP_NUM_PER_BUF) { + if (ice_dcf_config_irq_map_lv(hw, + ICE_DCF_IRQ_MAP_NUM_PER_BUF, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + num_qv_maps -= ICE_DCF_IRQ_MAP_NUM_PER_BUF; + index += ICE_DCF_IRQ_MAP_NUM_PER_BUF; + } + + if (ice_dcf_config_irq_map_lv(hw, num_qv_maps, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + } return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 2fac1e5b21..9ef524c97c 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -23,6 +23,7 @@ #define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 #define ICE_DCF_MAX_NUM_QUEUES_LV 256 #define ICE_DCF_CFG_Q_NUM_PER_BUF 32 +#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128 struct ice_dcf_queue { uint64_t dummy; From patchwork Wed Apr 13 16:09:29 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109664 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 24B3EA0508; Wed, 13 Apr 2022 10:14:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9C61642810; Wed, 13 Apr 2022 10:12:03 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 0CAE14286E for ; Wed, 13 Apr 2022 10:12:01 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837522; x=1681373522; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9he4SI3BOgx/8v3pcNkRUBde9SioYkxnwTjlp+0lFNY=; b=LLCBg1E0J/ajsLHRSXuFooAfxEC5ifOsi7Zgk5Aok+VnGa2hxxOy+NC1 8mAjr7KhqlNmfAFcJx9iyfxJygXnEudyRTEMhwpgyuWtW1wqJJMsmMPhQ x1LeUoqMTdYOqNxh+MFRBNOKRcazYQ8o05H8UJ+shoJpGEtJsQdqe+Cg3 IAbtaY15VyqV2z2PEEzB9wVVZNEn2Pgcpl+CP1dishPTvGASvgxpOzVCB 6wrHOE9YEdybRG8dxuvHuwFYZqMIWMdQIZkTGxbAs7TuYfKopursbPqbR lUgGHSgys33Xt5Iv1i7IcQwbd+xV6jVLVhOjGDVxsa4I41vhXsqswWaIV Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630144" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630144" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:00 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847923" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:58 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 30/33] net/ice: add enable/disable queues for DCF large VF Date: Wed, 13 Apr 2022 16:09:29 +0000 Message-Id: <20220413160932.2074781-31-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The current virtchnl structure for enable/disable queues only supports max 32 queue pairs. Use a new opcode and structure to indicate up to 256 queue pairs, in order to enable/disable queues in large VF case. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 99 +++++++++++++++++++++++++++++++- drivers/net/ice/ice_dcf.h | 5 ++ drivers/net/ice/ice_dcf_ethdev.c | 26 +++++++-- drivers/net/ice/ice_dcf_ethdev.h | 8 +-- 4 files changed, 125 insertions(+), 13 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 290f754049..23edfd09b1 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -90,7 +90,6 @@ ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op, *rsp_msglen = event.msg_len; return rte_le_to_cpu_32(event.desc.cookie_low); - again: rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME); } while (i++ < ICE_DCF_ARQ_MAX_RETRIES); @@ -896,7 +895,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) { struct rte_eth_dev *dev = hw->eth_dev; struct rte_eth_rss_conf *rss_conf; - uint8_t i, j, nb_q; + uint16_t i, j, nb_q; int ret; rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; @@ -1075,6 +1074,12 @@ ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num) return err; } + /* request queues succeeded, vf is resetting */ + if (hw->resetting) { + PMD_DRV_LOG(INFO, "vf is resetting"); + return 0; + } + /* request additional queues failed, return available number */ num_queue_pairs = ((struct virtchnl_vf_res_request *) args.rsp_msgbuf)->num_queue_pairs; @@ -1185,7 +1190,8 @@ ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, args.req_msg = (u8 *)map_info; args.req_msglen = len; args.rsp_msgbuf = hw->arq_buf; - args.req_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_buflen = ICE_DCF_AQ_BUF_SZ; err = ice_dcf_execute_virtchnl_cmd(hw, &args); if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); @@ -1225,6 +1231,50 @@ ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) return err; } +int +ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) +{ + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct dcf_virtchnl_cmd args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = 1; + queue_select->vport_id = hw->vsi_res->vsi_id; + + if (rx) { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } else { + queue_chunk->type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk->start_queue_id = qid; + queue_chunk->num_queues = 1; + } + + if (on) + args.v_op = VIRTCHNL_OP_ENABLE_QUEUES_V2; + else + args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.req_msg = (u8 *)queue_select; + args.req_msglen = len; + args.rsp_msgbuf = hw->arq_buf; + args.rsp_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_buflen = ICE_DCF_AQ_BUF_SZ; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); + rte_free(queue_select); + return err; +} + int ice_dcf_disable_queues(struct ice_dcf_hw *hw) { @@ -1254,6 +1304,49 @@ ice_dcf_disable_queues(struct ice_dcf_hw *hw) return err; } +int +ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw) +{ + struct virtchnl_del_ena_dis_queues *queue_select; + struct virtchnl_queue_chunk *queue_chunk; + struct dcf_virtchnl_cmd args; + int err, len; + + len = sizeof(struct virtchnl_del_ena_dis_queues) + + sizeof(struct virtchnl_queue_chunk) * + (ICE_DCF_RXTX_QUEUE_CHUNKS_NUM - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (!queue_select) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = ICE_DCF_RXTX_QUEUE_CHUNKS_NUM; + queue_select->vport_id = hw->vsi_res->vsi_id; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues = + hw->eth_dev->data->nb_tx_queues; + + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0; + queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues = + hw->eth_dev->data->nb_rx_queues; + + args.v_op = VIRTCHNL_OP_DISABLE_QUEUES_V2; + args.req_msg = (u8 *)queue_select; + args.req_msglen = len; + args.rsp_msgbuf = hw->arq_buf; + args.rsp_msglen = ICE_DCF_AQ_BUF_SZ; + args.rsp_buflen = ICE_DCF_AQ_BUF_SZ; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); + rte_free(queue_select); + return err; +} + int ice_dcf_query_stats(struct ice_dcf_hw *hw, struct virtchnl_eth_stats *pstats) diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index ce57a687ab..78ab23aaa6 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -15,6 +15,8 @@ #include "base/ice_type.h" #include "ice_logs.h" +#define ICE_DCF_RXTX_QUEUE_CHUNKS_NUM 2 + struct dcf_virtchnl_cmd { TAILQ_ENTRY(dcf_virtchnl_cmd) next; @@ -143,7 +145,10 @@ int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, uint16_t num, uint16_t index); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); +int ice_dcf_switch_queue_lv(struct ice_dcf_hw *hw, + uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); +int ice_dcf_disable_queues_lv(struct ice_dcf_hw *hw); int ice_dcf_query_stats(struct ice_dcf_hw *hw, struct virtchnl_eth_stats *pstats); int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 1ddba02ebb..e46c8405aa 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -317,6 +317,7 @@ static int ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *dcf_hw = &ad->real_hw; struct iavf_hw *hw = &ad->real_hw.avf; struct ice_rx_queue *rxq; int err = 0; @@ -339,7 +340,11 @@ ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true); + if (!dcf_hw->lv_enabled) + err = ice_dcf_switch_queue(dcf_hw, rx_queue_id, true, true); + else + err = ice_dcf_switch_queue_lv(dcf_hw, rx_queue_id, true, true); + if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -448,6 +453,7 @@ static int ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *dcf_hw = &ad->real_hw; struct iavf_hw *hw = &ad->real_hw.avf; struct ice_tx_queue *txq; int err = 0; @@ -463,7 +469,10 @@ ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true); + if (!dcf_hw->lv_enabled) + err = ice_dcf_switch_queue(dcf_hw, tx_queue_id, false, true); + else + err = ice_dcf_switch_queue_lv(dcf_hw, tx_queue_id, false, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", @@ -650,12 +659,17 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev) struct ice_dcf_hw *hw = &ad->real_hw; struct ice_rx_queue *rxq; struct ice_tx_queue *txq; - int ret, i; + int i; /* Stop All queues */ - ret = ice_dcf_disable_queues(hw); - if (ret) - PMD_DRV_LOG(WARNING, "Fail to stop queues"); + if (!hw->lv_enabled) { + if (ice_dcf_disable_queues(hw)) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + } else { + if (ice_dcf_disable_queues_lv(hw)) + PMD_DRV_LOG(WARNING, + "Fail to stop queues for large VF"); + } for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 9ef524c97c..3f740e2c7b 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -20,10 +20,10 @@ #define ICE_DCF_ETH_OVERHEAD \ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_DCF_VLAN_TAG_SIZE * 2) #define ICE_DCF_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_DCF_ETH_OVERHEAD) -#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 -#define ICE_DCF_MAX_NUM_QUEUES_LV 256 -#define ICE_DCF_CFG_Q_NUM_PER_BUF 32 -#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128 +#define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 +#define ICE_DCF_MAX_NUM_QUEUES_LV 256 +#define ICE_DCF_CFG_Q_NUM_PER_BUF 32 +#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128 struct ice_dcf_queue { uint64_t dummy; From patchwork Wed Apr 13 16:09:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109665 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B9BB1A0508; Wed, 13 Apr 2022 10:14:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9ED7942855; Wed, 13 Apr 2022 10:12:06 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 29F4142869 for ; Wed, 13 Apr 2022 10:12:03 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837524; x=1681373524; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=nQQnY1ahkr9CHA4kMGvk++J2qLxA6EMDciLvNmNIlsc=; b=lBVqMNgtcNPXaWU7bo6ttWpbfc2iKtMQXjtLjtNF5rvyGgsshQWlAJqA XRBjITW9qWg+b6v19UzuaRTZRRcTlVm6qnf2qOnojbck3+NXGggTPKR77 DySlqD6xu+8fMvdhnZwgcYj3GldMfbyuSgYxCf2L6Pyg/6DCvGlmZksRs CNI8xsZf1E2AI85bOnw300OX1NwyD8X1hdP6jgHar7gK9ZHGlnn4XnolO BcvEcbquOJjbCEwntHu3h3z8nSJX7hFVYSI9/U5mTwW4N681aAfwfskM+ mw7PN/4cwODgwvZ8haRTY1XzVm10lashVkCvjr1QqTa3cVi4RMbIjI+dT Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630151" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630151" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:03 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847933" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:00 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Alvin Zhang , Kevin Liu Subject: [PATCH v2 31/33] net/ice: fix DCF ACL flow engine Date: Wed, 13 Apr 2022 16:09:30 +0000 Message-Id: <20220413160932.2074781-32-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Alvin Zhang ACL is not a necessary feature for DCF, it may not be supported by the ice kernel driver, so in this patch the program does not return the ACL initiation fails to high level functions, as substitute it prints some error logs, cleans the related resources and unregisters the ACL engine. Fixes: 40d466fa9f76 ("net/ice: support ACL filter in DCF") Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_acl_filter.c | 20 ++++++++++++++---- drivers/net/ice/ice_generic_flow.c | 34 +++++++++++++++++++++++------- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c index 8fe6f5aeb0..20a1f86c43 100644 --- a/drivers/net/ice/ice_acl_filter.c +++ b/drivers/net/ice/ice_acl_filter.c @@ -56,6 +56,8 @@ ice_pattern_match_item ice_acl_pattern[] = { {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE}, }; +static void ice_acl_prof_free(struct ice_hw *hw); + static int ice_acl_prof_alloc(struct ice_hw *hw) { @@ -1007,17 +1009,27 @@ ice_acl_init(struct ice_adapter *ad) ret = ice_acl_setup(pf); if (ret) - return ret; + goto deinit_acl; ret = ice_acl_bitmap_init(pf); if (ret) - return ret; + goto deinit_acl; ret = ice_acl_prof_init(pf); if (ret) - return ret; + goto deinit_acl; - return ice_register_parser(parser, ad); + ret = ice_register_parser(parser, ad); + if (ret) + goto deinit_acl; + + return 0; + +deinit_acl: + ice_deinit_acl(pf); + ice_acl_prof_free(hw); + PMD_DRV_LOG(ERR, "ACL init failed, may not supported!"); + return ret; } static void diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 6663a85ed0..e9e4d776b2 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -1864,6 +1864,12 @@ ice_register_flow_engine(struct ice_flow_engine *engine) TAILQ_INSERT_TAIL(&engine_list, engine, node); } +static void +ice_unregister_flow_engine(struct ice_flow_engine *engine) +{ + TAILQ_REMOVE(&engine_list, engine, node); +} + int ice_flow_init(struct ice_adapter *ad) { @@ -1887,9 +1893,18 @@ ice_flow_init(struct ice_adapter *ad) ret = engine->init(ad); if (ret) { - PMD_INIT_LOG(ERR, "Failed to initialize engine %d", - engine->type); - return ret; + /** + * ACL may not supported in kernel driver, + * so just unregister the engine. + */ + if (engine->type == ICE_FLOW_ENGINE_ACL) { + ice_unregister_flow_engine(engine); + } else { + PMD_INIT_LOG(ERR, + "Failed to initialize engine %d", + engine->type); + return ret; + } } } return 0; @@ -1976,7 +1991,7 @@ ice_register_parser(struct ice_flow_parser *parser, list = ice_get_parser_list(parser, ad); if (list == NULL) - return -EINVAL; + goto err; if (ad->devargs.pipe_mode_support) { TAILQ_INSERT_TAIL(list, parser_node, node); @@ -1988,7 +2003,7 @@ ice_register_parser(struct ice_flow_parser *parser, ICE_FLOW_ENGINE_ACL) { TAILQ_INSERT_AFTER(list, existing_node, parser_node, node); - goto DONE; + return 0; } } TAILQ_INSERT_HEAD(list, parser_node, node); @@ -1999,7 +2014,7 @@ ice_register_parser(struct ice_flow_parser *parser, ICE_FLOW_ENGINE_SWITCH) { TAILQ_INSERT_AFTER(list, existing_node, parser_node, node); - goto DONE; + return 0; } } TAILQ_INSERT_HEAD(list, parser_node, node); @@ -2008,11 +2023,14 @@ ice_register_parser(struct ice_flow_parser *parser, } else if (parser->engine->type == ICE_FLOW_ENGINE_ACL) { TAILQ_INSERT_HEAD(list, parser_node, node); } else { - return -EINVAL; + goto err; } } -DONE: return 0; +err: + rte_free(parser_node); + PMD_DRV_LOG(ERR, "%s failed.", __func__); + return -EINVAL; } void From patchwork Wed Apr 13 16:09:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109666 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8C0B0A0508; Wed, 13 Apr 2022 10:14:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9D29542823; Wed, 13 Apr 2022 10:12:09 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id A4DFB42855 for ; Wed, 13 Apr 2022 10:12:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837525; x=1681373525; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=6916rklRMOIDhdIxoTtjqeyYdrSxqKUKN4C0RUzgDyE=; b=lysNo3xc1qtKvY5qV4mlGf4NuJAJxzLAWTXZr7vO91i8m2I1/zKse6nh YXhapsi/yKMQqokQIKanrArbjrZcwfN+tYhz2dtwq5f1JUB9h455AqqAd Gy5XKsiFhq0Ii2VrpbfzRoK271huL+SGuUyTPMAMaalwjYj26EPsOWpZo /Gmys+oI5mppoeT9T+LVA766YilmLWVbmBP9NpFiRSjUHBmCyLLEhbVxX KD2uEslw3ZPStYdy59kKOsj1E9zyN89RMffPiE7sWc+q7FSwvvvJcgLl0 fkaeiHEk3hJ/fjucJ00jZKZOSSmBQ6AVppc/2jmU/cqU5rHGKsWKeXc6F A==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630159" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630159" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:05 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847943" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:03 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 32/33] testpmd: force flow flush Date: Wed, 13 Apr 2022 16:09:31 +0000 Message-Id: <20220413160932.2074781-33-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Qi Zhang For mdcf, rte_flow_flush is still need to be invoked even there are no flows be created in current instance. Signed-off-by: Qi Zhang Signed-off-by: Kevin Liu --- app/test-pmd/config.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index cc8e7aa138..3d40e3e43d 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -2923,15 +2923,15 @@ port_flow_flush(portid_t port_id) port = &ports[port_id]; - if (port->flow_list == NULL) - return ret; - /* Poisoning to make sure PMDs update it in case of error. */ memset(&error, 0x44, sizeof(error)); if (rte_flow_flush(port_id, &error)) { port_flow_complain(&error); } + if (port->flow_list == NULL) + return ret; + while (port->flow_list) { struct port_flow *pf = port->flow_list->next; From patchwork Wed Apr 13 16:09:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Liu X-Patchwork-Id: 109667 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 869E3A0508; Wed, 13 Apr 2022 10:14:37 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9D6DD42874; Wed, 13 Apr 2022 10:12:10 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 1800D42871 for ; Wed, 13 Apr 2022 10:12:07 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837528; x=1681373528; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=dAIbwqYxVmw5fWOkgq11u1+5kzjeVixuJse3T9of4uI=; b=YjyVa6yDkLnJWK+9AuQiiUWc1AmLLHBBAmjgiReFKcDVAsHN2VNqzZse VRdLKTe8C2mrlUSzmzHCAtFd1Atf/1ROj8DB6dLzUXfaMqpfbCM3J4Dct kBah1TNGOpvNCX6Si+PzKd7b1BXy4S8kOZ1uw/PybuvVWLBDqO6lZInd4 cz+28eoQX5Jj8618zxDWJSnKX1jSf3oHRqgtbqwbEbollV9eM4atg08b6 vqTjjmviH/aaSgWz6qOCvzsxy/V0SXMhQXAvcuh3CkhQExYvjdpuEwqLq WZPuiP5T8UM19uu2+zHANChuQfSPzWX3K8LjsV02+udAWD/qzIq9RljTK Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630163" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630163" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:07 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847963" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:12:05 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu , Alvin Zhang Subject: [PATCH v2 33/33] net/ice: fix DCF reset Date: Wed, 13 Apr 2022 16:09:32 +0000 Message-Id: <20220413160932.2074781-34-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org After the PF triggers the VF reset, before the VF PMD can perform any operations on the hardware, it must reinitialize the all resources. This patch adds a flag to indicate whether the VF has been reset by PF, and update the DCF resetting operations according to this flag. Fixes: 1a86f4dbdf42 ("net/ice: support DCF device reset") Signed-off-by: Alvin Zhang Signed-off-by: Kevin Liu --- drivers/net/ice/base/ice_common.c | 4 +++- drivers/net/ice/ice_dcf.c | 2 +- drivers/net/ice/ice_dcf_ethdev.c | 17 ++++++++++++++++- drivers/net/ice/ice_dcf_parent.c | 3 +++ 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index db87bacd97..13feb55469 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -755,6 +755,7 @@ enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); if (status) { ice_free(hw, hw->switch_info); + hw->switch_info = NULL; return status; } return ICE_SUCCESS; @@ -823,7 +824,6 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) } ice_rm_sw_replay_rule_info(hw, sw); ice_free(hw, sw->recp_list); - ice_free(hw, sw); } /** @@ -833,6 +833,8 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); + ice_free(hw, hw->switch_info); + hw->switch_info = NULL; } /** diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 23edfd09b1..35773e2acd 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1429,7 +1429,7 @@ ice_dcf_cap_reset(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) int ret; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ice_dcf_disable_irq0(hw); rte_intr_disable(intr_handle); diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index e46c8405aa..0315e694d7 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -1004,6 +1004,15 @@ dcf_add_del_mc_addr_list(struct ice_dcf_hw *hw, uint32_t i; int len, err = 0; + if (hw->resetting) { + if (!add) + return 0; + + PMD_DRV_LOG(ERR, + "fail to add multicast MACs for VF resetting"); + return -EIO; + } + len = sizeof(struct virtchnl_ether_addr_list); len += sizeof(struct virtchnl_ether_addr) * mc_addrs_num; @@ -1642,7 +1651,13 @@ ice_dcf_dev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - (void)ice_dcf_dev_stop(dev); + if (adapter->parent.pf.adapter_stopped) + (void)ice_dcf_dev_stop(dev); + + if (adapter->real_hw.resetting) { + ice_dcf_uninit_hw(dev, &adapter->real_hw); + ice_dcf_init_hw(dev, &adapter->real_hw); + } ice_free_queues(dev); diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c index 2f96dedcce..7f7ed796e2 100644 --- a/drivers/net/ice/ice_dcf_parent.c +++ b/drivers/net/ice/ice_dcf_parent.c @@ -240,6 +240,9 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); dcf_hw->resetting = true; + rte_eth_dev_callback_process(dcf_hw->eth_dev, + RTE_ETH_EVENT_INTR_RESET, + NULL); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");