[v3,2/6] common/idpf: add RSS set/get ops

Message ID 20230118071440.902155-3-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series add idpf pmd enhancement features |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Liu, Mingxia Jan. 18, 2023, 7:14 a.m. UTC
  Add support for these device ops:
- rss_reta_update
- rss_reta_query
- rss_hash_update
- rss_hash_conf_get

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/common/idpf/idpf_common_device.h   |   1 +
 drivers/common/idpf/idpf_common_virtchnl.c | 119 ++++++++
 drivers/common/idpf/idpf_common_virtchnl.h |   6 +
 drivers/common/idpf/version.map            |   3 +
 drivers/net/idpf/idpf_ethdev.c             | 303 +++++++++++++++++++++
 drivers/net/idpf/idpf_ethdev.h             |   3 +-
 6 files changed, 434 insertions(+), 1 deletion(-)
  

Comments

Jingjing Wu Feb. 2, 2023, 3:28 a.m. UTC | #1
> +static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf)
> +{
> +	uint64_t hena = 0, valid_rss_hf = 0;
According to the coding style, only the last variable on a line should be initialized.

> +	int ret = 0;
> +	uint16_t i;
> +
> +	/**
> +	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
> +	 * generalizations of all other IPv4 and IPv6 RSS types.
> +	 */
> +	if (rss_hf & RTE_ETH_RSS_IPV4)
> +		rss_hf |= idpf_ipv4_rss;
> +
> +	if (rss_hf & RTE_ETH_RSS_IPV6)
> +		rss_hf |= idpf_ipv6_rss;
> +
> +	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
> +		uint64_t bit = BIT_ULL(i);
> +
> +		if (idpf_map_hena_rss[i] & rss_hf) {
> +			valid_rss_hf |= idpf_map_hena_rss[i];
> +			hena |= bit;
> +		}
> +	}
> +
> +	vport->rss_hf = hena;
> +
> +	ret = idpf_vc_set_rss_hash(vport);
> +	if (ret != 0) {
> +		PMD_DRV_LOG(WARNING,
> +			    "fail to set RSS offload types, ret: %d", ret);
> +		return ret;
> +	}
> +
> +	if (valid_rss_hf & idpf_ipv4_rss)
> +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
> +
> +	if (valid_rss_hf & idpf_ipv6_rss)
> +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
> +
> +	if (rss_hf & ~valid_rss_hf)
> +		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
> +			    rss_hf & ~valid_rss_hf);
It makes me a bit confused, valid_rss_hf is would be the sub of rss_hf according above assignment. Would it be possible to go here?
And if it is possible, why not set valid_rss_hf before calling vc command?

> +	vport->last_general_rss_hf = valid_rss_hf;
> +
> +	return ret;
> +}
> +
>  static int
>  idpf_init_rss(struct idpf_vport *vport)
>  {
> @@ -256,6 +357,204 @@ idpf_init_rss(struct idpf_vport *vport)
>  	return ret;
>  }
> 
> +static int
> +idpf_rss_reta_update(struct rte_eth_dev *dev,
> +		     struct rte_eth_rss_reta_entry64 *reta_conf,
> +		     uint16_t reta_size)
> +{
> +	struct idpf_vport *vport = dev->data->dev_private;
> +	struct idpf_adapter *adapter = vport->adapter;
> +	uint16_t idx, shift;
> +	uint32_t *lut;
> +	int ret = 0;
> +	uint16_t i;
> +
> +	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
> +		PMD_DRV_LOG(DEBUG, "RSS is not supported");
> +		return -ENOTSUP;
> +	}
> +
> +	if (reta_size != vport->rss_lut_size) {
> +		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
> +				 "(%d) doesn't match the number of hardware can "
> +				 "support (%d)",
> +			    reta_size, vport->rss_lut_size);
> +		return -EINVAL;
> +	}
> +
> +	/* It MUST use the current LUT size to get the RSS lookup table,
> +	 * otherwise if will fail with -100 error code.
> +	 */
> +	lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0);
> +	if (!lut) {
> +		PMD_DRV_LOG(ERR, "No memory can be allocated");
> +		return -ENOMEM;
> +	}
> +	/* store the old lut table temporarily */
> +	rte_memcpy(lut, vport->rss_lut, reta_size * sizeof(uint32_t));
Stored the vport->rss_lut to lut? But you overwrite the lut below?

> +
> +	for (i = 0; i < reta_size; i++) {
> +		idx = i / RTE_ETH_RETA_GROUP_SIZE;
> +		shift = i % RTE_ETH_RETA_GROUP_SIZE;
> +		if (reta_conf[idx].mask & (1ULL << shift))
> +			lut[i] = reta_conf[idx].reta[shift];
> +	}
> +
> +	rte_memcpy(vport->rss_lut, lut, reta_size * sizeof(uint32_t));
> +	/* send virtchnl ops to configure RSS */
> +	ret = idpf_vc_set_rss_lut(vport);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
> +		goto out;
> +	}
> +out:
> +	rte_free(lut);
> +
> +	return ret;
> +}
  
Liu, Mingxia Feb. 7, 2023, 3:10 a.m. UTC | #2
> > +static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t
> > +rss_hf) {
> > +	uint64_t hena = 0, valid_rss_hf = 0;
> According to the coding style, only the last variable on a line should be
> initialized.
> 
[Liu, Mingxia] Ok, thank, I'll check if the same issue exist otherwhere.


> > +	vport->rss_hf = hena;
> > +
> > +	ret = idpf_vc_set_rss_hash(vport);
> > +	if (ret != 0) {
> > +		PMD_DRV_LOG(WARNING,
> > +			    "fail to set RSS offload types, ret: %d", ret);
> > +		return ret;
> > +	}
> > +
> > +	if (valid_rss_hf & idpf_ipv4_rss)
> > +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
> > +
> > +	if (valid_rss_hf & idpf_ipv6_rss)
> > +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
> > +
> > +	if (rss_hf & ~valid_rss_hf)
> > +		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%"
> PRIx64,
> > +			    rss_hf & ~valid_rss_hf);
> It makes me a bit confused, valid_rss_hf is would be the sub of rss_hf
> according above assignment. Would it be possible to go here?
> And if it is possible, why not set valid_rss_hf before calling vc command?
>
[Liu, Mingxia] According to cmd_config_rss_parsed(), when the rss_hf set is not belong to flow_type_rss_offloads, it will be delete by &flow_type_rss_offloads.
What's more, in rte_eth_dev_rss_hash_update(), it will check again if rss_hf set is belong to flow_type_rss_offloads, if not, will return error.
So when entering function idpf_config_rss_hf, it wouldn't be possible that (rss_hf & ~valid_rss_hf) != 0.
Better to delete this piece of code.

For the second question,  why not set valid_rss_hf before calling vc command?
Because if we  set rss_hf to RTE_ETH_RSS_IPV4, then the rss hf value on idpf side mapping to  RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP |  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |RTE_ETH_RSS_NONFRAG_IPV4_OTHER |RTE_ETH_RSS_FRAG_IPV4 is been set. 
But there is no  rss hf value on idpf side mapping to RTE_ETH_RSS_IPV4.
When we get rss_hf from vc, it won't tell us if RTE_ETH_RSS_IPV4 have ever been configured.
So dpdk software should record if RTE_ETH_RSS_IPV4 have ever been set by valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4, and return to user when needed.

RTE_ETH_RSS_IPV6 is similar.


> > +	/* It MUST use the current LUT size to get the RSS lookup table,
> > +	 * otherwise if will fail with -100 error code.
> > +	 */
> > +	lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0);
> > +	if (!lut) {
> > +		PMD_DRV_LOG(ERR, "No memory can be allocated");
> > +		return -ENOMEM;
> > +	}
> > +	/* store the old lut table temporarily */
> > +	rte_memcpy(lut, vport->rss_lut, reta_size * sizeof(uint32_t));
> Stored the vport->rss_lut to lut? But you overwrite the lut below?
> 
[Liu, Mingxia] Because lut include all redirection table, but we may want to update only several value of redirection table,
so we first stored the original lut, and update the required table entries.

> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Thursday, February 2, 2023 11:28 AM
> To: Liu, Mingxia <mingxia.liu@intel.com>; dev@dpdk.org
> Cc: Xing, Beilei <beilei.xing@intel.com>
> Subject: RE: [PATCH v3 2/6] common/idpf: add RSS set/get ops
> 
> > +static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t
> > +rss_hf) {
> > +	uint64_t hena = 0, valid_rss_hf = 0;
> According to the coding style, only the last variable on a line should be
> initialized.
> 
> > +	int ret = 0;
> > +	uint16_t i;
> > +
> > +	/**
> > +	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
> > +	 * generalizations of all other IPv4 and IPv6 RSS types.
> > +	 */
> > +	if (rss_hf & RTE_ETH_RSS_IPV4)
> > +		rss_hf |= idpf_ipv4_rss;
> > +
> > +	if (rss_hf & RTE_ETH_RSS_IPV6)
> > +		rss_hf |= idpf_ipv6_rss;
> > +
> > +	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
> > +		uint64_t bit = BIT_ULL(i);
> > +
> > +		if (idpf_map_hena_rss[i] & rss_hf) {
> > +			valid_rss_hf |= idpf_map_hena_rss[i];
> > +			hena |= bit;
> > +		}
> > +	}
> > +
> > +	vport->rss_hf = hena;
> > +
> > +	ret = idpf_vc_set_rss_hash(vport);
> > +	if (ret != 0) {
> > +		PMD_DRV_LOG(WARNING,
> > +			    "fail to set RSS offload types, ret: %d", ret);
> > +		return ret;
> > +	}
> > +
> > +	if (valid_rss_hf & idpf_ipv4_rss)
> > +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
> > +
> > +	if (valid_rss_hf & idpf_ipv6_rss)
> > +		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
> > +
> > +	if (rss_hf & ~valid_rss_hf)
> > +		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%"
> PRIx64,
> > +			    rss_hf & ~valid_rss_hf);
> It makes me a bit confused, valid_rss_hf is would be the sub of rss_hf
> according above assignment. Would it be possible to go here?
> And if it is possible, why not set valid_rss_hf before calling vc command?
> 
> > +	vport->last_general_rss_hf = valid_rss_hf;
> > +
> > +	return ret;
> > +}
> > +
> >  static int
> >  idpf_init_rss(struct idpf_vport *vport)  { @@ -256,6 +357,204 @@
> > idpf_init_rss(struct idpf_vport *vport)
> >  	return ret;
> >  }
> >
> > +static int
> > +idpf_rss_reta_update(struct rte_eth_dev *dev,
> > +		     struct rte_eth_rss_reta_entry64 *reta_conf,
> > +		     uint16_t reta_size)
> > +{
> > +	struct idpf_vport *vport = dev->data->dev_private;
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	uint16_t idx, shift;
> > +	uint32_t *lut;
> > +	int ret = 0;
> > +	uint16_t i;
> > +
> > +	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
> > +		PMD_DRV_LOG(DEBUG, "RSS is not supported");
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	if (reta_size != vport->rss_lut_size) {
> > +		PMD_DRV_LOG(ERR, "The size of hash lookup table
> configured "
> > +				 "(%d) doesn't match the number of
> hardware can "
> > +				 "support (%d)",
> > +			    reta_size, vport->rss_lut_size);
> > +		return -EINVAL;
> > +	}
> > +
> > +	/* It MUST use the current LUT size to get the RSS lookup table,
> > +	 * otherwise if will fail with -100 error code.
> > +	 */
> > +	lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0);
> > +	if (!lut) {
> > +		PMD_DRV_LOG(ERR, "No memory can be allocated");
> > +		return -ENOMEM;
> > +	}
> > +	/* store the old lut table temporarily */
> > +	rte_memcpy(lut, vport->rss_lut, reta_size * sizeof(uint32_t));
> Stored the vport->rss_lut to lut? But you overwrite the lut below?
> 
[Liu, Mingxia] Because lut include all redirection table, but we may want to update only several value of redirection table,
so we first stored the original lut, and update the required table entries.

> > +
> > +	for (i = 0; i < reta_size; i++) {
> > +		idx = i / RTE_ETH_RETA_GROUP_SIZE;
> > +		shift = i % RTE_ETH_RETA_GROUP_SIZE;
> > +		if (reta_conf[idx].mask & (1ULL << shift))
> > +			lut[i] = reta_conf[idx].reta[shift];
> > +	}
> > +
> > +	rte_memcpy(vport->rss_lut, lut, reta_size * sizeof(uint32_t));
> > +	/* send virtchnl ops to configure RSS */
> > +	ret = idpf_vc_set_rss_lut(vport);
> > +	if (ret) {
> > +		PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
> > +		goto out;
> > +	}
> > +out:
> > +	rte_free(lut);
> > +
> > +	return ret;
> > +}
  

Patch

diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 73d4ffb4b3..f22ffde22e 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -98,6 +98,7 @@  struct idpf_vport {
 	uint32_t *rss_lut;
 	uint8_t *rss_key;
 	uint64_t rss_hf;
+	uint64_t last_general_rss_hf;
 
 	/* MSIX info*/
 	struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 675dcebbf4..5965f9ee55 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -218,6 +218,9 @@  idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
 	case VIRTCHNL2_OP_ALLOC_VECTORS:
 	case VIRTCHNL2_OP_DEALLOC_VECTORS:
 	case VIRTCHNL2_OP_GET_STATS:
+	case VIRTCHNL2_OP_GET_RSS_KEY:
+	case VIRTCHNL2_OP_GET_RSS_HASH:
+	case VIRTCHNL2_OP_GET_RSS_LUT:
 		/* for init virtchnl ops, need to poll the response */
 		err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
 		clear_cmd(adapter);
@@ -448,6 +451,48 @@  idpf_vc_set_rss_key(struct idpf_vport *vport)
 	return err;
 }
 
+int idpf_vc_get_rss_key(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_key *rss_key_ret;
+	struct virtchnl2_rss_key rss_key;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	rss_key.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
+	args.in_args = (uint8_t *)&rss_key;
+	args.in_args_size = sizeof(rss_key);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+
+	if (!err) {
+		rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
+		if (rss_key_ret->key_len != vport->rss_key_size) {
+			rte_free(vport->rss_key);
+			vport->rss_key = NULL;
+			vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
+						      rss_key_ret->key_len);
+			vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
+			if (!vport->rss_key) {
+				vport->rss_key_size = 0;
+				DRV_LOG(ERR, "Failed to allocate RSS key");
+				return -ENOMEM;
+			}
+		}
+		rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
+	}
+
+	return err;
+}
+
 int
 idpf_vc_set_rss_lut(struct idpf_vport *vport)
 {
@@ -482,6 +527,48 @@  idpf_vc_set_rss_lut(struct idpf_vport *vport)
 	return err;
 }
 
+int
+idpf_vc_get_rss_lut(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_lut *rss_lut_ret;
+	struct virtchnl2_rss_lut rss_lut;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_lut, 0, sizeof(rss_lut));
+	rss_lut.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
+	args.in_args = (uint8_t *)&rss_lut;
+	args.in_args_size = sizeof(rss_lut);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+
+	if (!err) {
+		rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
+		if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
+			rte_free(vport->rss_lut);
+			vport->rss_lut = NULL;
+			vport->rss_lut = rte_zmalloc("rss_lut",
+				     sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
+			if (vport->rss_lut == NULL) {
+				DRV_LOG(ERR, "Failed to allocate RSS lut");
+				return -ENOMEM;
+			}
+		}
+		rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
+		vport->rss_lut_size = rss_lut_ret->lut_entries;
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
+	}
+
+	return err;
+}
+
 int
 idpf_vc_set_rss_hash(struct idpf_vport *vport)
 {
@@ -508,6 +595,38 @@  idpf_vc_set_rss_hash(struct idpf_vport *vport)
 	return err;
 }
 
+int
+idpf_vc_get_rss_hash(struct idpf_vport *vport)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_rss_hash *rss_hash_ret;
+	struct virtchnl2_rss_hash rss_hash;
+	struct idpf_cmd_info args;
+	int err;
+
+	memset(&rss_hash, 0, sizeof(rss_hash));
+	rss_hash.ptype_groups = vport->rss_hf;
+	rss_hash.vport_id = vport->vport_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
+	args.in_args = (uint8_t *)&rss_hash;
+	args.in_args_size = sizeof(rss_hash);
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+
+	if (!err) {
+		rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
+		vport->rss_hf = rss_hash_ret->ptype_groups;
+	} else {
+		DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
+	}
+
+	return err;
+}
+
 int
 idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
 {
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 6d63e6ad35..86a8dfcece 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -52,4 +52,10 @@  int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
 __rte_internal
 int idpf_query_stats(struct idpf_vport *vport,
 		     struct virtchnl2_vport_stats **pstats);
+__rte_internal
+int idpf_vc_get_rss_key(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_get_rss_lut(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_get_rss_hash(struct idpf_vport *vport);
 #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0b4a22bae4..36a3a90d39 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -63,6 +63,9 @@  INTERNAL {
 	idpf_vc_set_rss_lut;
 	idpf_vport_deinit;
 	idpf_vport_init;
+	idpf_vc_get_rss_key;
+	idpf_vc_get_rss_lut;
+	idpf_vc_get_rss_hash;
 
 	local: *;
 };
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e8bb097c78..037cabb04e 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -29,6 +29,56 @@  static const char * const idpf_valid_args[] = {
 	NULL
 };
 
+static const uint64_t idpf_map_hena_rss[] = {
+	[IDPF_HASH_NONF_UNICAST_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_MULTICAST_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_IPV4_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+	[IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] =
+			RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	[IDPF_HASH_NONF_IPV4_TCP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+	[IDPF_HASH_NONF_IPV4_SCTP] =
+			RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
+	[IDPF_HASH_NONF_IPV4_OTHER] =
+			RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+	[IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
+
+	/* IPv6 */
+	[IDPF_HASH_NONF_UNICAST_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_MULTICAST_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_IPV6_UDP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+	[IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] =
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+	[IDPF_HASH_NONF_IPV6_TCP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+	[IDPF_HASH_NONF_IPV6_SCTP] =
+			RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
+	[IDPF_HASH_NONF_IPV6_OTHER] =
+			RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+	[IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
+
+	/* L2 Payload */
+	[IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
+};
+
+static const uint64_t idpf_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+			  RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+			  RTE_ETH_RSS_FRAG_IPV4;
+
+static const uint64_t idpf_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+			  RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+			  RTE_ETH_RSS_FRAG_IPV6;
+
 static int
 idpf_dev_link_update(struct rte_eth_dev *dev,
 		     __rte_unused int wait_to_complete)
@@ -59,6 +109,9 @@  idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mtu = vport->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+	dev_info->hash_key_size = vport->rss_key_size;
+	dev_info->reta_size = vport->rss_lut_size;
+
 	dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
 
 	dev_info->rx_offload_capa =
@@ -220,6 +273,54 @@  idpf_dev_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf)
+{
+	uint64_t hena = 0, valid_rss_hf = 0;
+	int ret = 0;
+	uint16_t i;
+
+	/**
+	 * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
+	 * generalizations of all other IPv4 and IPv6 RSS types.
+	 */
+	if (rss_hf & RTE_ETH_RSS_IPV4)
+		rss_hf |= idpf_ipv4_rss;
+
+	if (rss_hf & RTE_ETH_RSS_IPV6)
+		rss_hf |= idpf_ipv6_rss;
+
+	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+		uint64_t bit = BIT_ULL(i);
+
+		if (idpf_map_hena_rss[i] & rss_hf) {
+			valid_rss_hf |= idpf_map_hena_rss[i];
+			hena |= bit;
+		}
+	}
+
+	vport->rss_hf = hena;
+
+	ret = idpf_vc_set_rss_hash(vport);
+	if (ret != 0) {
+		PMD_DRV_LOG(WARNING,
+			    "fail to set RSS offload types, ret: %d", ret);
+		return ret;
+	}
+
+	if (valid_rss_hf & idpf_ipv4_rss)
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
+
+	if (valid_rss_hf & idpf_ipv6_rss)
+		valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
+
+	if (rss_hf & ~valid_rss_hf)
+		PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
+			    rss_hf & ~valid_rss_hf);
+	vport->last_general_rss_hf = valid_rss_hf;
+
+	return ret;
+}
+
 static int
 idpf_init_rss(struct idpf_vport *vport)
 {
@@ -256,6 +357,204 @@  idpf_init_rss(struct idpf_vport *vport)
 	return ret;
 }
 
+static int
+idpf_rss_reta_update(struct rte_eth_dev *dev,
+		     struct rte_eth_rss_reta_entry64 *reta_conf,
+		     uint16_t reta_size)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	uint16_t idx, shift;
+	uint32_t *lut;
+	int ret = 0;
+	uint16_t i;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (reta_size != vport->rss_lut_size) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+				 "(%d) doesn't match the number of hardware can "
+				 "support (%d)",
+			    reta_size, vport->rss_lut_size);
+		return -EINVAL;
+	}
+
+	/* It MUST use the current LUT size to get the RSS lookup table,
+	 * otherwise if will fail with -100 error code.
+	 */
+	lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0);
+	if (!lut) {
+		PMD_DRV_LOG(ERR, "No memory can be allocated");
+		return -ENOMEM;
+	}
+	/* store the old lut table temporarily */
+	rte_memcpy(lut, vport->rss_lut, reta_size * sizeof(uint32_t));
+
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			lut[i] = reta_conf[idx].reta[shift];
+	}
+
+	rte_memcpy(vport->rss_lut, lut, reta_size * sizeof(uint32_t));
+	/* send virtchnl ops to configure RSS */
+	ret = idpf_vc_set_rss_lut(vport);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+		goto out;
+	}
+out:
+	rte_free(lut);
+
+	return ret;
+}
+
+static int
+idpf_rss_reta_query(struct rte_eth_dev *dev,
+		    struct rte_eth_rss_reta_entry64 *reta_conf,
+		    uint16_t reta_size)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	uint16_t idx, shift;
+	int ret = 0;
+	uint16_t i;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (reta_size != vport->rss_lut_size) {
+		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+			"(%d) doesn't match the number of hardware can "
+			"support (%d)", reta_size, vport->rss_lut_size);
+		return -EINVAL;
+	}
+
+	ret = idpf_vc_get_rss_lut(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS LUT");
+		return ret;
+	}
+
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			reta_conf[idx].reta[shift] = vport->rss_lut[i];
+	}
+
+	return 0;
+}
+
+static int
+idpf_rss_hash_update(struct rte_eth_dev *dev,
+		     struct rte_eth_rss_conf *rss_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	int ret = 0;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+		PMD_DRV_LOG(DEBUG, "No key to be configured");
+		goto skip_rss_key;
+	} else if (rss_conf->rss_key_len != vport->rss_key_size) {
+		PMD_DRV_LOG(ERR, "The size of hash key configured "
+				 "(%d) doesn't match the size of hardware can "
+				 "support (%d)",
+			    rss_conf->rss_key_len,
+			    vport->rss_key_size);
+		return -EINVAL;
+	}
+
+	rte_memcpy(vport->rss_key, rss_conf->rss_key,
+		   vport->rss_key_size);
+	ret = idpf_vc_set_rss_key(vport);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+		return ret;
+	}
+
+skip_rss_key:
+	ret = idpf_config_rss_hf(vport, rss_conf->rss_hf);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+		return ret;
+	}
+
+	return 0;
+}
+
+static uint64_t
+idpf_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf)
+{
+	uint64_t valid_rss_hf = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+		uint64_t bit = BIT_ULL(i);
+
+		if (bit & config_rss_hf)
+			valid_rss_hf |= idpf_map_hena_rss[i];
+	}
+
+	if (valid_rss_hf & idpf_ipv4_rss)
+		valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4;
+
+	if (valid_rss_hf & idpf_ipv6_rss)
+		valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6;
+
+	return valid_rss_hf;
+}
+
+static int
+idpf_rss_hash_conf_get(struct rte_eth_dev *dev,
+		       struct rte_eth_rss_conf *rss_conf)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	int ret = 0;
+
+	if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
+		return -ENOTSUP;
+	}
+
+	ret = idpf_vc_get_rss_hash(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS hf");
+		return ret;
+	}
+
+	rss_conf->rss_hf = idpf_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf);
+
+	if (!rss_conf->rss_key)
+		return 0;
+
+	ret = idpf_vc_get_rss_key(vport);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get RSS key");
+		return ret;
+	}
+
+	if (rss_conf->rss_key_len > vport->rss_key_size)
+		rss_conf->rss_key_len = vport->rss_key_size;
+
+	rte_memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len);
+
+	return 0;
+}
+
 static int
 idpf_dev_configure(struct rte_eth_dev *dev)
 {
@@ -693,6 +992,10 @@  static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.stats_get			= idpf_dev_stats_get,
 	.stats_reset			= idpf_dev_stats_reset,
+	.reta_update			= idpf_rss_reta_update,
+	.reta_query			= idpf_rss_reta_query,
+	.rss_hash_update		= idpf_rss_hash_update,
+	.rss_hash_conf_get		= idpf_rss_hash_conf_get,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index d791d402fb..839a2bd82c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -48,7 +48,8 @@ 
 		RTE_ETH_RSS_NONFRAG_IPV6_TCP    |	\
 		RTE_ETH_RSS_NONFRAG_IPV6_UDP    |	\
 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP   |	\
-		RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
+		RTE_ETH_RSS_NONFRAG_IPV6_OTHER  |	\
+		RTE_ETH_RSS_L2_PAYLOAD)
 
 #define IDPF_ADAPTER_NAME_LEN	(PCI_PRI_STR_SIZE + 1)