From patchwork Mon Feb 13 02:19:54 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123755 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F36C241C7F; Mon, 13 Feb 2023 04:19:32 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EE50E42D5F; Mon, 13 Feb 2023 04:18:12 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 84B8240EF1 for ; Mon, 13 Feb 2023 04:18:10 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676258290; x=1707794290; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=fCg6yf4P2IVc8Q5DiLfUexRNVePCzGTjWfup520KXTM=; b=NLys+kuSUkjLy3feYx7aauMcuRcxaJB14bIwthPJMAVksk9h5wJ1yjxq 0m5/DRMx7LGRrTh8jQTRFx3snPw3QcmXWI4BkpYvqknmIywIz8KRH1VPM lGvGOJHRsvBbmoaL+3TvRH3EKGMkI+GiOOXLvpF03M0vhjQhJ5n7/Wu0k rKdmM2b+oDps0C2wj88KAEhqV9M+idWCQaGiDF7M/TDiOEgS7jQIj20RA eYB0D4Sw0dl0ZDqG8jmZM7/nB3KlLSBcL/SP0Io9MouBJpu8ziiQPUsFh ZtWaoPNiTJHlebqKmgIbdTpVU0zX84LymvHu0FBmvmApzqsy/JpsGxU9g g==; X-IronPort-AV: E=McAfee;i="6500,9779,10619"; a="328504081" X-IronPort-AV: E=Sophos;i="5.97,291,1669104000"; d="scan'208";a="328504081" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Feb 2023 19:17:50 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10619"; a="777658029" X-IronPort-AV: E=Sophos;i="5.97,291,1669104000"; d="scan'208";a="777658029" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by fmsmga002.fm.intel.com with ESMTP; 12 Feb 2023 19:17:49 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu Subject: [PATCH v6 19/21] net/cpfl: add RSS set/get ops Date: Mon, 13 Feb 2023 02:19:54 +0000 Message-Id: <20230213021956.2953088-20-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230213021956.2953088-1-mingxia.liu@intel.com> References: <20230209084541.2712723-1-mingxia.liu@intel.com> <20230213021956.2953088-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for these device ops: - rss_reta_update - rss_reta_query - rss_hash_update - rss_hash_conf_get Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 270 ++++++++++++++++++++++++++++++++- 1 file changed, 269 insertions(+), 1 deletion(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 0fb9f0455b..d2387b9a39 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -30,6 +30,56 @@ static const char * const cpfl_valid_args[] = { NULL }; +static const uint64_t cpfl_map_hena_rss[] = { + [IDPF_HASH_NONF_UNICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_TCP] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV4_SCTP, + [IDPF_HASH_NONF_IPV4_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, + [IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4, + + /* IPv6 */ + [IDPF_HASH_NONF_UNICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_TCP] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV6_SCTP, + [IDPF_HASH_NONF_IPV6_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, + [IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6, + + /* L2 Payload */ + [IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD +}; + +static const uint64_t cpfl_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV4_TCP | + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | + RTE_ETH_RSS_FRAG_IPV4; + +static const uint64_t cpfl_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_TCP | + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | + RTE_ETH_RSS_FRAG_IPV6; + static int cpfl_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) @@ -97,6 +147,9 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mtu = vport->max_mtu; dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->hash_key_size = vport->rss_key_size; + dev_info->reta_size = vport->rss_lut_size; + dev_info->flow_type_rss_offloads = CPFL_RSS_OFFLOAD_ALL; dev_info->rx_offload_capa = @@ -259,6 +312,36 @@ cpfl_dev_stats_reset(struct rte_eth_dev *dev) return 0; } +static int cpfl_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf) +{ + uint64_t hena = 0; + uint16_t i; + + /** + * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2 + * generalizations of all other IPv4 and IPv6 RSS types. + */ + if (rss_hf & RTE_ETH_RSS_IPV4) + rss_hf |= cpfl_ipv4_rss; + + if (rss_hf & RTE_ETH_RSS_IPV6) + rss_hf |= cpfl_ipv6_rss; + + for (i = 0; i < RTE_DIM(cpfl_map_hena_rss); i++) { + if (cpfl_map_hena_rss[i] & rss_hf) + hena |= BIT_ULL(i); + } + + /** + * At present, cp doesn't process the virtual channel msg of rss_hf configuration, + * tips are given below. + */ + if (hena != vport->rss_hf) + PMD_DRV_LOG(WARNING, "Updating RSS Hash Function is not supported at present."); + + return 0; +} + static int cpfl_init_rss(struct idpf_vport *vport) { @@ -279,7 +362,7 @@ cpfl_init_rss(struct idpf_vport *vport) vport->rss_key_size); return -EINVAL; } else { - rte_memcpy(vport->rss_key, rss_conf->rss_key, + memcpy(vport->rss_key, rss_conf->rss_key, vport->rss_key_size); } @@ -295,6 +378,187 @@ cpfl_init_rss(struct idpf_vport *vport) return ret; } +static int +cpfl_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", + reta_size, vport->rss_lut_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + vport->rss_lut[i] = reta_conf[idx].reta[shift]; + } + + /* send virtchnl ops to configure RSS */ + ret = idpf_vc_rss_lut_set(vport); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS lut"); + + return ret; +} + +static int +cpfl_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, vport->rss_lut_size); + return -EINVAL; + } + + ret = idpf_vc_rss_lut_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS LUT"); + return ret; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = vport->rss_lut[i]; + } + + return 0; +} + +static int +cpfl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + goto skip_rss_key; + } else if (rss_conf->rss_key_len != vport->rss_key_size) { + PMD_DRV_LOG(ERR, "The size of hash key configured " + "(%d) doesn't match the size of hardware can " + "support (%d)", + rss_conf->rss_key_len, + vport->rss_key_size); + return -EINVAL; + } + + memcpy(vport->rss_key, rss_conf->rss_key, + vport->rss_key_size); + ret = idpf_vc_rss_key_set(vport); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS key"); + return ret; + } + +skip_rss_key: + ret = cpfl_config_rss_hf(vport, rss_conf->rss_hf); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS hash"); + return ret; + } + + return 0; +} + +static uint64_t +cpfl_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf) +{ + uint64_t valid_rss_hf = 0; + uint16_t i; + + for (i = 0; i < RTE_DIM(cpfl_map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if (bit & config_rss_hf) + valid_rss_hf |= cpfl_map_hena_rss[i]; + } + + if (valid_rss_hf & cpfl_ipv4_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4; + + if (valid_rss_hf & cpfl_ipv6_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6; + + return valid_rss_hf; +} + +static int +cpfl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + ret = idpf_vc_rss_hash_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS hf"); + return ret; + } + + rss_conf->rss_hf = cpfl_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf); + + if (!rss_conf->rss_key) + return 0; + + ret = idpf_vc_rss_key_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS key"); + return ret; + } + + if (rss_conf->rss_key_len > vport->rss_key_size) + rss_conf->rss_key_len = vport->rss_key_size; + + memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len); + + return 0; +} + static int cpfl_dev_configure(struct rte_eth_dev *dev) { @@ -852,6 +1116,10 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .dev_supported_ptypes_get = cpfl_dev_supported_ptypes_get, .stats_get = cpfl_dev_stats_get, .stats_reset = cpfl_dev_stats_reset, + .reta_update = cpfl_rss_reta_update, + .reta_query = cpfl_rss_reta_query, + .rss_hash_update = cpfl_rss_hash_update, + .rss_hash_conf_get = cpfl_rss_hash_conf_get, }; static uint16_t