[dpdk-dev,v2,2/3] ring: remove duplicate fields in internal data struct

Message ID 1455794803-20383-3-git-send-email-ferruh.yigit@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Bruce Richardson
Headers

Commit Message

Ferruh Yigit Feb. 18, 2016, 11:26 a.m. UTC
  1- Remove duplicate nb_rx/tx_queues fields from internals

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 drivers/net/ring/rte_eth_ring.c | 57 ++++++++++++++++++-----------------------
 1 file changed, 25 insertions(+), 32 deletions(-)
  

Comments

Nicolás Pernas Maradei Feb. 22, 2016, 9:55 a.m. UTC | #1
Adding Acked-by line.

On 18/02/16 11:26, Ferruh Yigit wrote:
> 1- Remove duplicate nb_rx/tx_queues fields from internals
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Nicolas Pernas Maradei <nicolas.pernas.maradei@emutex.com>
> ---
>  drivers/net/ring/rte_eth_ring.c | 57 ++++++++++++++++++-----------------------
>  1 file changed, 25 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
> index d92b088..fd87999 100644
> --- a/drivers/net/ring/rte_eth_ring.c
> +++ b/drivers/net/ring/rte_eth_ring.c
> @@ -59,9 +59,6 @@ struct ring_queue {
>  };
>  
>  struct pmd_internals {
> -	unsigned nb_rx_queues;
> -	unsigned nb_tx_queues;
> -
>  	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
>  	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
>  
> @@ -138,7 +135,7 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
>  }
>  
>  static int
> -eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
> +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>  				    uint16_t nb_rx_desc __rte_unused,
>  				    unsigned int socket_id __rte_unused,
>  				    const struct rte_eth_rxconf *rx_conf __rte_unused,
> @@ -165,40 +162,39 @@ static void
>  eth_dev_info(struct rte_eth_dev *dev,
>  		struct rte_eth_dev_info *dev_info)
>  {
> -	struct pmd_internals *internals = dev->data->dev_private;
>  	dev_info->driver_name = drivername;
>  	dev_info->max_mac_addrs = 1;
>  	dev_info->max_rx_pktlen = (uint32_t)-1;
> -	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
> -	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
> +	dev_info->max_rx_queues = dev->data->nb_rx_queues;
> +	dev_info->max_tx_queues = dev->data->nb_tx_queues;
>  	dev_info->min_rx_bufsize = 0;
>  	dev_info->pci_dev = NULL;
>  }
>  
>  static void
> -eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
> +eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>  {
>  	unsigned i;
>  	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
>  	const struct pmd_internals *internal = dev->data->dev_private;
>  
>  	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
> -			i < internal->nb_rx_queues; i++) {
> -		igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
> -		rx_total += igb_stats->q_ipackets[i];
> +			i < dev->data->nb_rx_queues; i++) {
> +		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
> +		rx_total += stats->q_ipackets[i];
>  	}
>  
>  	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
> -			i < internal->nb_tx_queues; i++) {
> -		igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
> -		igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
> -		tx_total += igb_stats->q_opackets[i];
> -		tx_err_total += igb_stats->q_errors[i];
> +			i < dev->data->nb_tx_queues; i++) {
> +		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
> +		stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
> +		tx_total += stats->q_opackets[i];
> +		tx_err_total += stats->q_errors[i];
>  	}
>  
> -	igb_stats->ipackets = rx_total;
> -	igb_stats->opackets = tx_total;
> -	igb_stats->oerrors = tx_err_total;
> +	stats->ipackets = rx_total;
> +	stats->opackets = tx_total;
> +	stats->oerrors = tx_err_total;
>  }
>  
>  static void
> @@ -206,9 +202,9 @@ eth_stats_reset(struct rte_eth_dev *dev)
>  {
>  	unsigned i;
>  	struct pmd_internals *internal = dev->data->dev_private;
> -	for (i = 0; i < internal->nb_rx_queues; i++)
> +	for (i = 0; i < dev->data->nb_rx_queues; i++)
>  		internal->rx_ring_queues[i].rx_pkts.cnt = 0;
> -	for (i = 0; i < internal->nb_tx_queues; i++) {
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>  		internal->tx_ring_queues[i].tx_pkts.cnt = 0;
>  		internal->tx_ring_queues[i].err_pkts.cnt = 0;
>  	}
> @@ -262,7 +258,6 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
>  	struct rte_eth_dev_data *data = NULL;
>  	struct pmd_internals *internals = NULL;
>  	struct rte_eth_dev *eth_dev = NULL;
> -
>  	unsigned i;
>  
>  	/* do some parameter checking */
> @@ -291,15 +286,15 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
>  		goto error;
>  	}
>  
> -	data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
> -			0, numa_node);
> +	data->rx_queues = rte_zmalloc_socket(name,
> +			sizeof(void *) * nb_rx_queues, 0, numa_node);
>  	if (data->rx_queues == NULL) {
>  		rte_errno = ENOMEM;
>  		goto error;
>  	}
>  
> -	data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
> -			0, numa_node);
> +	data->tx_queues = rte_zmalloc_socket(name,
> +			sizeof(void *) * nb_tx_queues, 0, numa_node);
>  	if (data->tx_queues == NULL) {
>  		rte_errno = ENOMEM;
>  		goto error;
> @@ -327,8 +322,6 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
>  	/* NOTE: we'll replace the data element, of originally allocated eth_dev
>  	 * so the rings are local per-process */
>  
> -	internals->nb_rx_queues = nb_rx_queues;
> -	internals->nb_tx_queues = nb_tx_queues;
>  	for (i = 0; i < nb_rx_queues; i++) {
>  		internals->rx_ring_queues[i].rng = rx_queues[i];
>  		data->rx_queues[i] = &internals->rx_ring_queues[i];
> @@ -349,10 +342,10 @@ rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
>  	eth_dev->data = data;
>  	eth_dev->driver = NULL;
>  	eth_dev->dev_ops = &ops;
> -	eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
> -	eth_dev->data->kdrv = RTE_KDRV_NONE;
> -	eth_dev->data->drv_name = drivername;
> -	eth_dev->data->numa_node = numa_node;
> +	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
> +	data->kdrv = RTE_KDRV_NONE;
> +	data->drv_name = drivername;
> +	data->numa_node = numa_node;
>  
>  	TAILQ_INIT(&(eth_dev->link_intr_cbs));
>
  
Bruce Richardson Feb. 23, 2016, 3:26 p.m. UTC | #2
On Thu, Feb 18, 2016 at 11:26:42AM +0000, Ferruh Yigit wrote:
> 1- Remove duplicate nb_rx/tx_queues fields from internals
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
>  drivers/net/ring/rte_eth_ring.c | 57 ++++++++++++++++++-----------------------
>  1 file changed, 25 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
> index d92b088..fd87999 100644
> --- a/drivers/net/ring/rte_eth_ring.c
> +++ b/drivers/net/ring/rte_eth_ring.c
> @@ -59,9 +59,6 @@ struct ring_queue {
>  };
>  
>  struct pmd_internals {
> -	unsigned nb_rx_queues;
> -	unsigned nb_tx_queues;
> -
>  	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
>  	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
>  
> @@ -138,7 +135,7 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
>  }
>  
>  static int
> -eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
> +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>  				    uint16_t nb_rx_desc __rte_unused,
>  				    unsigned int socket_id __rte_unused,
>  				    const struct rte_eth_rxconf *rx_conf __rte_unused,
> @@ -165,40 +162,39 @@ static void
>  eth_dev_info(struct rte_eth_dev *dev,
>  		struct rte_eth_dev_info *dev_info)
>  {
> -	struct pmd_internals *internals = dev->data->dev_private;
>  	dev_info->driver_name = drivername;
>  	dev_info->max_mac_addrs = 1;
>  	dev_info->max_rx_pktlen = (uint32_t)-1;
> -	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
> -	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
> +	dev_info->max_rx_queues = dev->data->nb_rx_queues;
> +	dev_info->max_tx_queues = dev->data->nb_tx_queues;

I'm still not convined this is correct. What happens if a ring PMD is created
with 16 queues (i.e. backed by 16 rings), and then the user uses
rte_eth_dev_configure to only actually use 4 queues. The fact that the internal
array still has 16 queues will be lost, and the device will only ever report
4 as the max number it can support.

Regards,
/Bruce
  
Ferruh Yigit Feb. 23, 2016, 3:58 p.m. UTC | #3
On 2/23/2016 3:26 PM, Bruce Richardson wrote:
> On Thu, Feb 18, 2016 at 11:26:42AM +0000, Ferruh Yigit wrote:
>> 1- Remove duplicate nb_rx/tx_queues fields from internals
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> ---
>>  drivers/net/ring/rte_eth_ring.c | 57 ++++++++++++++++++-----------------------
>>  1 file changed, 25 insertions(+), 32 deletions(-)
>>
>> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
>> index d92b088..fd87999 100644
>> --- a/drivers/net/ring/rte_eth_ring.c
>> +++ b/drivers/net/ring/rte_eth_ring.c
>> @@ -59,9 +59,6 @@ struct ring_queue {
>>  };
>>  
>>  struct pmd_internals {
>> -	unsigned nb_rx_queues;
>> -	unsigned nb_tx_queues;
>> -
>>  	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
>>  	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
>>  
>> @@ -138,7 +135,7 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
>>  }
>>  
>>  static int
>> -eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
>> +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
>>  				    uint16_t nb_rx_desc __rte_unused,
>>  				    unsigned int socket_id __rte_unused,
>>  				    const struct rte_eth_rxconf *rx_conf __rte_unused,
>> @@ -165,40 +162,39 @@ static void
>>  eth_dev_info(struct rte_eth_dev *dev,
>>  		struct rte_eth_dev_info *dev_info)
>>  {
>> -	struct pmd_internals *internals = dev->data->dev_private;
>>  	dev_info->driver_name = drivername;
>>  	dev_info->max_mac_addrs = 1;
>>  	dev_info->max_rx_pktlen = (uint32_t)-1;
>> -	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
>> -	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
>> +	dev_info->max_rx_queues = dev->data->nb_rx_queues;
>> +	dev_info->max_tx_queues = dev->data->nb_tx_queues;
> 
> I'm still not convined this is correct. What happens if a ring PMD is created
> with 16 queues (i.e. backed by 16 rings), and then the user uses
> rte_eth_dev_configure to only actually use 4 queues.

Right, since user explicitly set 4 queues.

> The fact that the internal
> array still has 16 queues will be lost, 

Not lost exactly, app can re-configure with rte_eth_dev_configure() to
use 16 queses back and it will work fine.

> and the device will only ever report
> 4 as the max number it can support.

I think this is same for all PMDs, and data->nb_xx_queues reports the
number of the configured queues, not max number; and indeed for ring PMD
max queue number is hardcoded in the config file.

I guess you what you refer is "number of queues used in first
configuration", is there a use case to save this value? And if there is
does it make sense to application save it instead of PMD, because for
your sample case application creates ring with rte_eth_from_ring() API,
so app already knows the initial configured queue number.

Thanks,
ferruh
  
Bruce Richardson Feb. 23, 2016, 4:06 p.m. UTC | #4
On Tue, Feb 23, 2016 at 03:58:50PM +0000, Ferruh Yigit wrote:
> On 2/23/2016 3:26 PM, Bruce Richardson wrote:
> > On Thu, Feb 18, 2016 at 11:26:42AM +0000, Ferruh Yigit wrote:
> >> 1- Remove duplicate nb_rx/tx_queues fields from internals
> >>
> >> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> >> ---
> >>  drivers/net/ring/rte_eth_ring.c | 57 ++++++++++++++++++-----------------------
> >>  1 file changed, 25 insertions(+), 32 deletions(-)
> >>
> >> diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
> >> index d92b088..fd87999 100644
> >> --- a/drivers/net/ring/rte_eth_ring.c
> >> +++ b/drivers/net/ring/rte_eth_ring.c
> >> @@ -59,9 +59,6 @@ struct ring_queue {
> >>  };
> >>  
> >>  struct pmd_internals {
> >> -	unsigned nb_rx_queues;
> >> -	unsigned nb_tx_queues;
> >> -
> >>  	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
> >>  	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
> >>  
> >> @@ -138,7 +135,7 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
> >>  }
> >>  
> >>  static int
> >> -eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
> >> +eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
> >>  				    uint16_t nb_rx_desc __rte_unused,
> >>  				    unsigned int socket_id __rte_unused,
> >>  				    const struct rte_eth_rxconf *rx_conf __rte_unused,
> >> @@ -165,40 +162,39 @@ static void
> >>  eth_dev_info(struct rte_eth_dev *dev,
> >>  		struct rte_eth_dev_info *dev_info)
> >>  {
> >> -	struct pmd_internals *internals = dev->data->dev_private;
> >>  	dev_info->driver_name = drivername;
> >>  	dev_info->max_mac_addrs = 1;
> >>  	dev_info->max_rx_pktlen = (uint32_t)-1;
> >> -	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
> >> -	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
> >> +	dev_info->max_rx_queues = dev->data->nb_rx_queues;
> >> +	dev_info->max_tx_queues = dev->data->nb_tx_queues;
> > 
> > I'm still not convined this is correct. What happens if a ring PMD is created
> > with 16 queues (i.e. backed by 16 rings), and then the user uses
> > rte_eth_dev_configure to only actually use 4 queues.
> 
> Right, since user explicitly set 4 queues.
> 
> > The fact that the internal
> > array still has 16 queues will be lost, 
> 
> Not lost exactly, app can re-configure with rte_eth_dev_configure() to
> use 16 queses back and it will work fine.
> 
No, it can't do that, because the vNIC is reporting that the max number of queues
supported is now 4, since you now set max_rx_queues = nb_rx_queues.

> > and the device will only ever report
> > 4 as the max number it can support.
> 
> I think this is same for all PMDs, and data->nb_xx_queues reports the
> number of the configured queues, not max number; and indeed for ring PMD
> max queue number is hardcoded in the config file.
Yes, it is consistent with other PMDs as it is. The variables you think are
duplicate and want to remove are the ones that track the max number of queues
to be reported out, so that the app can know how many it can use in a call
to reconfigure.

/Bruce
> 
> I guess you what you refer is "number of queues used in first
> configuration", is there a use case to save this value? And if there is
> does it make sense to application save it instead of PMD, because for
> your sample case application creates ring with rte_eth_from_ring() API,
> so app already knows the initial configured queue number.
> 
> Thanks,
> ferruh
  

Patch

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index d92b088..fd87999 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -59,9 +59,6 @@  struct ring_queue {
 };
 
 struct pmd_internals {
-	unsigned nb_rx_queues;
-	unsigned nb_tx_queues;
-
 	struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
 	struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
 
@@ -138,7 +135,7 @@  eth_dev_set_link_up(struct rte_eth_dev *dev)
 }
 
 static int
-eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t rx_queue_id,
+eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 				    uint16_t nb_rx_desc __rte_unused,
 				    unsigned int socket_id __rte_unused,
 				    const struct rte_eth_rxconf *rx_conf __rte_unused,
@@ -165,40 +162,39 @@  static void
 eth_dev_info(struct rte_eth_dev *dev,
 		struct rte_eth_dev_info *dev_info)
 {
-	struct pmd_internals *internals = dev->data->dev_private;
 	dev_info->driver_name = drivername;
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_pktlen = (uint32_t)-1;
-	dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
-	dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
+	dev_info->max_rx_queues = dev->data->nb_rx_queues;
+	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = 0;
 	dev_info->pci_dev = NULL;
 }
 
 static void
-eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
 	unsigned i;
 	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
 	const struct pmd_internals *internal = dev->data->dev_private;
 
 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
-			i < internal->nb_rx_queues; i++) {
-		igb_stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
-		rx_total += igb_stats->q_ipackets[i];
+			i < dev->data->nb_rx_queues; i++) {
+		stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
+		rx_total += stats->q_ipackets[i];
 	}
 
 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
-			i < internal->nb_tx_queues; i++) {
-		igb_stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
-		igb_stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
-		tx_total += igb_stats->q_opackets[i];
-		tx_err_total += igb_stats->q_errors[i];
+			i < dev->data->nb_tx_queues; i++) {
+		stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+		stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
+		tx_total += stats->q_opackets[i];
+		tx_err_total += stats->q_errors[i];
 	}
 
-	igb_stats->ipackets = rx_total;
-	igb_stats->opackets = tx_total;
-	igb_stats->oerrors = tx_err_total;
+	stats->ipackets = rx_total;
+	stats->opackets = tx_total;
+	stats->oerrors = tx_err_total;
 }
 
 static void
@@ -206,9 +202,9 @@  eth_stats_reset(struct rte_eth_dev *dev)
 {
 	unsigned i;
 	struct pmd_internals *internal = dev->data->dev_private;
-	for (i = 0; i < internal->nb_rx_queues; i++)
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		internal->rx_ring_queues[i].rx_pkts.cnt = 0;
-	for (i = 0; i < internal->nb_tx_queues; i++) {
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		internal->tx_ring_queues[i].tx_pkts.cnt = 0;
 		internal->tx_ring_queues[i].err_pkts.cnt = 0;
 	}
@@ -262,7 +258,6 @@  rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
 	struct rte_eth_dev_data *data = NULL;
 	struct pmd_internals *internals = NULL;
 	struct rte_eth_dev *eth_dev = NULL;
-
 	unsigned i;
 
 	/* do some parameter checking */
@@ -291,15 +286,15 @@  rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
 		goto error;
 	}
 
-	data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues,
-			0, numa_node);
+	data->rx_queues = rte_zmalloc_socket(name,
+			sizeof(void *) * nb_rx_queues, 0, numa_node);
 	if (data->rx_queues == NULL) {
 		rte_errno = ENOMEM;
 		goto error;
 	}
 
-	data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_tx_queues,
-			0, numa_node);
+	data->tx_queues = rte_zmalloc_socket(name,
+			sizeof(void *) * nb_tx_queues, 0, numa_node);
 	if (data->tx_queues == NULL) {
 		rte_errno = ENOMEM;
 		goto error;
@@ -327,8 +322,6 @@  rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
 	/* NOTE: we'll replace the data element, of originally allocated eth_dev
 	 * so the rings are local per-process */
 
-	internals->nb_rx_queues = nb_rx_queues;
-	internals->nb_tx_queues = nb_tx_queues;
 	for (i = 0; i < nb_rx_queues; i++) {
 		internals->rx_ring_queues[i].rng = rx_queues[i];
 		data->rx_queues[i] = &internals->rx_ring_queues[i];
@@ -349,10 +342,10 @@  rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
 	eth_dev->data = data;
 	eth_dev->driver = NULL;
 	eth_dev->dev_ops = &ops;
-	eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
-	eth_dev->data->kdrv = RTE_KDRV_NONE;
-	eth_dev->data->drv_name = drivername;
-	eth_dev->data->numa_node = numa_node;
+	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+	data->kdrv = RTE_KDRV_NONE;
+	data->drv_name = drivername;
+	data->numa_node = numa_node;
 
 	TAILQ_INIT(&(eth_dev->link_intr_cbs));