From patchwork Tue Feb 7 09:56:57 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 123226 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 46D2B41C2D; Tue, 7 Feb 2023 11:54:18 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0018442D2D; Tue, 7 Feb 2023 11:54:10 +0100 (CET) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 7FB90427F5 for ; Tue, 7 Feb 2023 11:54:08 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675767248; x=1707303248; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=FRWbLg8Rn71nGdtc7VJzYosirgILTnjJxpzmRuZejgY=; b=iWcgpy/OwmbVTv5WVdG8Ur1wI4qUlWPX6aWYSYdFUvpHFjowCzoEio+Q 5VolMw+uqloaWnfCa5jNrWoxTx1J2Q/AQ2O/NyTJ6f5iLC1NZVaz17Jtn HR6fp06XrG5k+0T9AAYqmfSLSRtq3Z3BaZfZAS02Z07GLz8H55IdkI5Jn BQWHZ8m5XfVJ594K9q4ee4vjOghdQyMKvi7z/h57nKb0zXQPXoWHRhdvV hmpHnoDXZHoxEgIdEX5Pyth0LJ5AvSoqCK2nzv6dJowzN56myUZA9gb7o bKuDYy7GNGHiawLEKb5EHzPt0wMpBxQLet91mfgvB6KuIMPQkYsKN0GlG Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10613"; a="391873453" X-IronPort-AV: E=Sophos;i="5.97,278,1669104000"; d="scan'208";a="391873453" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 07 Feb 2023 02:54:07 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10613"; a="730388576" X-IronPort-AV: E=Sophos;i="5.97,278,1669104000"; d="scan'208";a="730388576" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.167]) by fmsmga008.fm.intel.com with ESMTP; 07 Feb 2023 02:54:06 -0800 From: Mingxia Liu To: dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: Mingxia Liu Subject: [PATCH v4 2/6] common/idpf: add RSS set/get ops Date: Tue, 7 Feb 2023 09:56:57 +0000 Message-Id: <20230207095701.2400179-3-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230207095701.2400179-1-mingxia.liu@intel.com> References: <20230118071440.902155-1-mingxia.liu@intel.com> <20230207095701.2400179-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for these device ops: - rss_reta_update - rss_reta_query - rss_hash_update - rss_hash_conf_get Signed-off-by: Mingxia Liu --- drivers/common/idpf/idpf_common_device.h | 1 + drivers/common/idpf/idpf_common_virtchnl.c | 119 +++++++++ drivers/common/idpf/idpf_common_virtchnl.h | 6 + drivers/common/idpf/version.map | 3 + drivers/net/idpf/idpf_ethdev.c | 268 +++++++++++++++++++++ drivers/net/idpf/idpf_ethdev.h | 3 +- 6 files changed, 399 insertions(+), 1 deletion(-) diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h index 1d8e7d405a..7abc4d2a3a 100644 --- a/drivers/common/idpf/idpf_common_device.h +++ b/drivers/common/idpf/idpf_common_device.h @@ -98,6 +98,7 @@ struct idpf_vport { uint32_t *rss_lut; uint8_t *rss_key; uint64_t rss_hf; + uint64_t last_general_rss_hf; /* MSIX info*/ struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */ diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c index 40cff34c09..10cfa33704 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.c +++ b/drivers/common/idpf/idpf_common_virtchnl.c @@ -218,6 +218,9 @@ idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args) case VIRTCHNL2_OP_ALLOC_VECTORS: case VIRTCHNL2_OP_DEALLOC_VECTORS: case VIRTCHNL2_OP_GET_STATS: + case VIRTCHNL2_OP_GET_RSS_KEY: + case VIRTCHNL2_OP_GET_RSS_HASH: + case VIRTCHNL2_OP_GET_RSS_LUT: /* for init virtchnl ops, need to poll the response */ err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer); clear_cmd(adapter); @@ -448,6 +451,48 @@ idpf_vc_rss_key_set(struct idpf_vport *vport) return err; } +int idpf_vc_rss_key_get(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_key *rss_key_ret; + struct virtchnl2_rss_key rss_key; + struct idpf_cmd_info args; + int err; + + memset(&rss_key, 0, sizeof(rss_key)); + rss_key.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_KEY; + args.in_args = (uint8_t *)&rss_key; + args.in_args_size = sizeof(rss_key); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + + if (!err) { + rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer; + if (rss_key_ret->key_len != vport->rss_key_size) { + rte_free(vport->rss_key); + vport->rss_key = NULL; + vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN, + rss_key_ret->key_len); + vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0); + if (!vport->rss_key) { + vport->rss_key_size = 0; + DRV_LOG(ERR, "Failed to allocate RSS key"); + return -ENOMEM; + } + } + rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size); + } else { + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY"); + } + + return err; +} + int idpf_vc_rss_lut_set(struct idpf_vport *vport) { @@ -482,6 +527,80 @@ idpf_vc_rss_lut_set(struct idpf_vport *vport) return err; } +int +idpf_vc_rss_lut_get(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_lut *rss_lut_ret; + struct virtchnl2_rss_lut rss_lut; + struct idpf_cmd_info args; + int err; + + memset(&rss_lut, 0, sizeof(rss_lut)); + rss_lut.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_LUT; + args.in_args = (uint8_t *)&rss_lut; + args.in_args_size = sizeof(rss_lut); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + + if (!err) { + rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer; + if (rss_lut_ret->lut_entries != vport->rss_lut_size) { + rte_free(vport->rss_lut); + vport->rss_lut = NULL; + vport->rss_lut = rte_zmalloc("rss_lut", + sizeof(uint32_t) * rss_lut_ret->lut_entries, 0); + if (vport->rss_lut == NULL) { + DRV_LOG(ERR, "Failed to allocate RSS lut"); + return -ENOMEM; + } + } + rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries); + vport->rss_lut_size = rss_lut_ret->lut_entries; + } else { + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT"); + } + + return err; +} + +int +idpf_vc_rss_hash_get(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_hash *rss_hash_ret; + struct virtchnl2_rss_hash rss_hash; + struct idpf_cmd_info args; + int err; + + memset(&rss_hash, 0, sizeof(rss_hash)); + rss_hash.ptype_groups = vport->rss_hf; + rss_hash.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_HASH; + args.in_args = (uint8_t *)&rss_hash; + args.in_args_size = sizeof(rss_hash); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_vc_cmd_execute(adapter, &args); + + if (!err) { + rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer; + vport->rss_hf = rss_hash_ret->ptype_groups; + } else { + DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH"); + } + + return err; +} + int idpf_vc_rss_hash_set(struct idpf_vport *vport) { diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h index 6b94fd5b8f..205d1a932d 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.h +++ b/drivers/common/idpf/idpf_common_virtchnl.h @@ -52,4 +52,10 @@ int idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq); __rte_internal int idpf_vc_stats_query(struct idpf_vport *vport, struct virtchnl2_vport_stats **pstats); +__rte_internal +int idpf_vc_rss_key_get(struct idpf_vport *vport); +__rte_internal +int idpf_vc_rss_lut_get(struct idpf_vport *vport); +__rte_internal +int idpf_vc_rss_hash_get(struct idpf_vport *vport); #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index e6a02828ba..f6c92e7e57 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -42,8 +42,11 @@ INTERNAL { idpf_vc_ptype_info_query; idpf_vc_queue_switch; idpf_vc_queues_ena_dis; + idpf_vc_rss_hash_get; idpf_vc_rss_hash_set; + idpf_vc_rss_key_get; idpf_vc_rss_key_set; + idpf_vc_rss_lut_get; idpf_vc_rss_lut_set; idpf_vc_rxq_config; idpf_vc_stats_query; diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 02ddb0330a..d50e0952bf 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -29,6 +29,56 @@ static const char * const idpf_valid_args[] = { NULL }; +static const uint64_t idpf_map_hena_rss[] = { + [IDPF_HASH_NONF_UNICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_TCP] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV4_SCTP, + [IDPF_HASH_NONF_IPV4_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, + [IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4, + + /* IPv6 */ + [IDPF_HASH_NONF_UNICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_TCP] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV6_SCTP, + [IDPF_HASH_NONF_IPV6_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, + [IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6, + + /* L2 Payload */ + [IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD +}; + +static const uint64_t idpf_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV4_TCP | + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | + RTE_ETH_RSS_FRAG_IPV4; + +static const uint64_t idpf_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_TCP | + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | + RTE_ETH_RSS_FRAG_IPV6; + static int idpf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) @@ -59,6 +109,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mtu = vport->max_mtu; dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->hash_key_size = vport->rss_key_size; + dev_info->reta_size = vport->rss_lut_size; + dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL; dev_info->rx_offload_capa = @@ -221,6 +274,36 @@ idpf_dev_stats_reset(struct rte_eth_dev *dev) return 0; } +static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf) +{ + uint64_t hena = 0; + uint16_t i; + + /** + * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2 + * generalizations of all other IPv4 and IPv6 RSS types. + */ + if (rss_hf & RTE_ETH_RSS_IPV4) + rss_hf |= idpf_ipv4_rss; + + if (rss_hf & RTE_ETH_RSS_IPV6) + rss_hf |= idpf_ipv6_rss; + + for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) { + if (idpf_map_hena_rss[i] & rss_hf) + hena |= BIT_ULL(i); + } + + /** + * At present, cp doesn't proccess the virtual channel msg of rss_hf configuration, + * tips are given below. + */ + if (hena != vport->rss_hf) + PMD_DRV_LOG(WARNING, "Updating RSS Hash Function is not supported at present."); + + return 0; +} + static int idpf_init_rss(struct idpf_vport *vport) { @@ -257,6 +340,187 @@ idpf_init_rss(struct idpf_vport *vport) return ret; } +static int +idpf_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", + reta_size, vport->rss_lut_size); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + vport->rss_lut[i] = reta_conf[idx].reta[shift]; + } + + /* send virtchnl ops to configure RSS */ + ret = idpf_vc_rss_lut_set(vport); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS lut"); + + return ret; +} + +static int +idpf_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, vport->rss_lut_size); + return -EINVAL; + } + + ret = idpf_vc_rss_lut_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS LUT"); + return ret; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = vport->rss_lut[i]; + } + + return 0; +} + +static int +idpf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + goto skip_rss_key; + } else if (rss_conf->rss_key_len != vport->rss_key_size) { + PMD_DRV_LOG(ERR, "The size of hash key configured " + "(%d) doesn't match the size of hardware can " + "support (%d)", + rss_conf->rss_key_len, + vport->rss_key_size); + return -EINVAL; + } + + rte_memcpy(vport->rss_key, rss_conf->rss_key, + vport->rss_key_size); + ret = idpf_vc_rss_key_set(vport); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS key"); + return ret; + } + +skip_rss_key: + ret = idpf_config_rss_hf(vport, rss_conf->rss_hf); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS hash"); + return ret; + } + + return 0; +} + +static uint64_t +idpf_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf) +{ + uint64_t valid_rss_hf = 0; + uint16_t i; + + for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if (bit & config_rss_hf) + valid_rss_hf |= idpf_map_hena_rss[i]; + } + + if (valid_rss_hf & idpf_ipv4_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4; + + if (valid_rss_hf & idpf_ipv6_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6; + + return valid_rss_hf; +} + +static int +idpf_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + ret = idpf_vc_rss_hash_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS hf"); + return ret; + } + + rss_conf->rss_hf = idpf_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf); + + if (!rss_conf->rss_key) + return 0; + + ret = idpf_vc_rss_key_get(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS key"); + return ret; + } + + if (rss_conf->rss_key_len > vport->rss_key_size) + rss_conf->rss_key_len = vport->rss_key_size; + + rte_memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len); + + return 0; +} + static int idpf_dev_configure(struct rte_eth_dev *dev) { @@ -692,6 +956,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = { .dev_supported_ptypes_get = idpf_dev_supported_ptypes_get, .stats_get = idpf_dev_stats_get, .stats_reset = idpf_dev_stats_reset, + .reta_update = idpf_rss_reta_update, + .reta_query = idpf_rss_reta_query, + .rss_hash_update = idpf_rss_hash_update, + .rss_hash_conf_get = idpf_rss_hash_conf_get, }; static uint16_t diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index d791d402fb..839a2bd82c 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -48,7 +48,8 @@ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ - RTE_ETH_RSS_NONFRAG_IPV6_OTHER) + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ + RTE_ETH_RSS_L2_PAYLOAD) #define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1)