[dpdk-dev,PATCHv4,4/9] null: virtual dynamic rss configuration

Message ID 1436981189-3320-5-git-send-email-tomaszx.kulasek@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Tomasz Kulasek July 15, 2015, 5:26 p.m. UTC
  This implementation allows to set and read RSS configuration for null
device, and is used to validate right values propagation over the slaves,
in test units for dynamic RSS configuration for bonding.

Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
---
 drivers/net/null/rte_eth_null.c |  116 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 116 insertions(+)
  

Comments

Tetsuya Mukawa Sept. 29, 2015, 2:24 a.m. UTC | #1
On 2015/07/16 2:26, Tomasz Kulasek wrote:
> This implementation allows to set and read RSS configuration for null
> device, and is used to validate right values propagation over the slaves,
> in test units for dynamic RSS configuration for bonding.
>
> Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
> ---
>  drivers/net/null/rte_eth_null.c |  116 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 116 insertions(+)
>
> diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
> index 39ffcde..f393422 100644
> --- a/drivers/net/null/rte_eth_null.c
> +++ b/drivers/net/null/rte_eth_null.c
> +static int
> +eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
> +{
> +	struct pmd_internals *internal = dev->data->dev_private;
> +
> +	rte_spinlock_lock(&internal->rss_lock);
> +
> +	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
> +		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
> +				rss_conf->rss_hf & internal->flow_type_rss_offloads;
> +
> +	if (rss_conf->rss_key)
> +		memcpy(internal->rss_key, rss_conf->rss_key, 40);
> +
> +	rte_spinlock_unlock(&internal->rss_lock);
> +
> +	return 0;
> +}
> +
> +static int
> +eth_rss_hash_conf_get(struct rte_eth_dev *dev,
> +		struct rte_eth_rss_conf *rss_conf)
> +{
> +	struct pmd_internals *internal = dev->data->dev_private;
> +
> +	rte_spinlock_lock(&internal->rss_lock);
> +
> +	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
> +	if (rss_conf->rss_key)
> +		memcpy(rss_conf->rss_key, internal->rss_key, 40);
> +
> +	rte_spinlock_unlock(&internal->rss_lock);
> +
> +	return 0;
> +}
> +
>  static const struct eth_dev_ops ops = {
>  	.dev_start = eth_dev_start,
>  	.dev_stop = eth_dev_stop,
> @@ -436,6 +547,11 @@ eth_dev_null_create(const char *name,
>  	internals->packet_copy = packet_copy;
>  	internals->numa_node = numa_node;
>  
> +	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
> +	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
> +
> +	memcpy(internals->rss_key, default_rss_key, 40);
> +
>  	eth_drv->pci_drv.name = drivername;
>  
>  	pci_dev->numa_node = numa_node;

Hi Thomasz,

I am just curious. Is it possible to use rte_memcpy instead of memcpy?
if we can, rte_memcpy may be faster.

Tetsuya
  
Tomasz Kulasek Sept. 29, 2015, 10:04 a.m. UTC | #2
> Hi Thomasz,
> 
> I am just curious. Is it possible to use rte_memcpy instead of memcpy?
> if we can, rte_memcpy may be faster.
> 
> Tetsuya

Hi Tetsuya,

I've just tested it and seems to work, so there's no objections to use rte_memcpy.

Tomasz.
  
Michal Jastrzebski Oct. 12, 2015, 9:05 a.m. UTC | #3
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Tetsuya Mukawa
> Sent: Tuesday, September 29, 2015 4:25 AM
> To: Kulasek, TomaszX; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCHv4 4/9] null: virtual dynamic rss configuration
> 
> On 2015/07/16 2:26, Tomasz Kulasek wrote:
> > This implementation allows to set and read RSS configuration for null
> > device, and is used to validate right values propagation over the slaves,
> > in test units for dynamic RSS configuration for bonding.
> >
> > Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
> > ---
> >  drivers/net/null/rte_eth_null.c |  116
> +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 116 insertions(+)
> >
> > diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
> > index 39ffcde..f393422 100644
> > --- a/drivers/net/null/rte_eth_null.c
> > +++ b/drivers/net/null/rte_eth_null.c
> > +static int
> > +eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf
> *rss_conf)
> > +{
> > +	struct pmd_internals *internal = dev->data->dev_private;
> > +
> > +	rte_spinlock_lock(&internal->rss_lock);
> > +
> > +	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
> > +		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
> > +				rss_conf->rss_hf & internal-
> >flow_type_rss_offloads;
> > +
> > +	if (rss_conf->rss_key)
> > +		memcpy(internal->rss_key, rss_conf->rss_key, 40);
> > +
> > +	rte_spinlock_unlock(&internal->rss_lock);
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +eth_rss_hash_conf_get(struct rte_eth_dev *dev,
> > +		struct rte_eth_rss_conf *rss_conf)
> > +{
> > +	struct pmd_internals *internal = dev->data->dev_private;
> > +
> > +	rte_spinlock_lock(&internal->rss_lock);
> > +
> > +	rss_conf->rss_hf = dev->data-
> >dev_conf.rx_adv_conf.rss_conf.rss_hf;
> > +	if (rss_conf->rss_key)
> > +		memcpy(rss_conf->rss_key, internal->rss_key, 40);
> > +
> > +	rte_spinlock_unlock(&internal->rss_lock);
> > +
> > +	return 0;
> > +}
> > +
> >  static const struct eth_dev_ops ops = {
> >  	.dev_start = eth_dev_start,
> >  	.dev_stop = eth_dev_stop,
> > @@ -436,6 +547,11 @@ eth_dev_null_create(const char *name,
> >  	internals->packet_copy = packet_copy;
> >  	internals->numa_node = numa_node;
> >
> > +	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
> > +	internals->reta_size = RTE_DIM(internals->reta_conf) *
> RTE_RETA_GROUP_SIZE;
> > +
> > +	memcpy(internals->rss_key, default_rss_key, 40);
> > +
> >  	eth_drv->pci_drv.name = drivername;
> >
> >  	pci_dev->numa_node = numa_node;
> 
> Hi Thomasz,
> 
> I am just curious. Is it possible to use rte_memcpy instead of memcpy?
> if we can, rte_memcpy may be faster.
> 
> Tetsuya

Hi Tetsuya,
Could You please review v5 that Tomasz sent and if You agree with this implementation could You also ACK this patch-set?
  

Patch

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 39ffcde..f393422 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,6 +37,8 @@ 
 #include <rte_memcpy.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
+#include <rte_eth_null.h>
+#include <rte_spinlock.h>
 
 #define ETH_NULL_PACKET_SIZE_ARG	"size"
 #define ETH_NULL_PACKET_COPY_ARG	"copy"
@@ -73,6 +75,17 @@  struct pmd_internals {
 
 	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
 	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+
+	/** Bit mask of RSS offloads, the bit offset also means flow type */
+	uint64_t flow_type_rss_offloads;
+
+	rte_spinlock_t rss_lock;
+
+	uint16_t reta_size;
+	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
+			RTE_RETA_GROUP_SIZE];
+
+	uint8_t rss_key[40];                /**< 40-byte hash key. */
 };
 
 
@@ -283,6 +296,8 @@  eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
 	dev_info->min_rx_bufsize = 0;
 	dev_info->pci_dev = NULL;
+	dev_info->reta_size = internals->reta_size;
+	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
 }
 
 static void
@@ -363,6 +378,91 @@  static int
 eth_link_update(struct rte_eth_dev *dev __rte_unused,
 		int wait_to_complete __rte_unused) { return 0; }
 
+static int
+eth_rss_reta_update(struct rte_eth_dev *dev,
+		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+	int i, j;
+	struct pmd_internals *internal = dev->data->dev_private;
+
+	if (reta_size != internal->reta_size)
+		return -EINVAL;
+
+	rte_spinlock_lock(&internal->rss_lock);
+
+	/* Copy RETA table */
+	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+		internal->reta_conf[i].mask = reta_conf[i].mask;
+		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			if ((reta_conf[i].mask >> j) & 0x01)
+				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
+	}
+
+	rte_spinlock_unlock(&internal->rss_lock);
+
+	return 0;
+}
+
+static int
+eth_rss_reta_query(struct rte_eth_dev *dev,
+		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
+{
+	int i, j;
+	struct pmd_internals *internal = dev->data->dev_private;
+
+	if (reta_size != internal->reta_size)
+		return -EINVAL;
+
+	rte_spinlock_lock(&internal->rss_lock);
+
+	/* Copy RETA table */
+	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
+		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+			if ((reta_conf[i].mask >> j) & 0x01)
+				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
+	}
+
+	rte_spinlock_unlock(&internal->rss_lock);
+
+	return 0;
+}
+
+static int
+eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internal = dev->data->dev_private;
+
+	rte_spinlock_lock(&internal->rss_lock);
+
+	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
+		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+				rss_conf->rss_hf & internal->flow_type_rss_offloads;
+
+	if (rss_conf->rss_key)
+		memcpy(internal->rss_key, rss_conf->rss_key, 40);
+
+	rte_spinlock_unlock(&internal->rss_lock);
+
+	return 0;
+}
+
+static int
+eth_rss_hash_conf_get(struct rte_eth_dev *dev,
+		struct rte_eth_rss_conf *rss_conf)
+{
+	struct pmd_internals *internal = dev->data->dev_private;
+
+	rte_spinlock_lock(&internal->rss_lock);
+
+	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+	if (rss_conf->rss_key)
+		memcpy(rss_conf->rss_key, internal->rss_key, 40);
+
+	rte_spinlock_unlock(&internal->rss_lock);
+
+	return 0;
+}
+
 static const struct eth_dev_ops ops = {
 	.dev_start = eth_dev_start,
 	.dev_stop = eth_dev_stop,
@@ -375,6 +475,10 @@  static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.reta_update = eth_rss_reta_update,
+	.reta_query = eth_rss_reta_query,
+	.rss_hash_update = eth_rss_hash_update,
+	.rss_hash_conf_get = eth_rss_hash_conf_get
 };
 
 static int
@@ -391,6 +495,13 @@  eth_dev_null_create(const char *name,
 	struct pmd_internals *internals = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
 
+	static const uint8_t default_rss_key[40] = {
+		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
+		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
+		0xBE, 0xAC, 0x01, 0xFA
+	};
+
 	if (name == NULL)
 		return -EINVAL;
 
@@ -436,6 +547,11 @@  eth_dev_null_create(const char *name,
 	internals->packet_copy = packet_copy;
 	internals->numa_node = numa_node;
 
+	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
+	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
+
+	memcpy(internals->rss_key, default_rss_key, 40);
+
 	eth_drv->pci_drv.name = drivername;
 
 	pci_dev->numa_node = numa_node;