[dpdk-dev,v3] net/i40e: mbuf alloc failed counter not incremented

Message ID 20170425122845.154835-1-allain.legacy@windriver.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Allain Legacy April 25, 2017, 12:28 p.m. UTC
  From: Matt Peters <matt.peters@windriver.com>

When an mbuf alloc fails during the mempool get operation for the
i40e bulk alloc receive function, the rx_mbuf_alloc_failed counter
is not incremented to record the error.

This fix ensures consistency with the other i40e receive procedures and
other net drivers.

Signed-off-by: Matt Peters <matt.peters@windriver.com>
Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
---
 drivers/net/i40e/i40e_rxtx.c | 23 +++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)
  

Comments

Zhang, Helin April 25, 2017, 5:31 p.m. UTC | #1
> -----Original Message-----
> From: Allain Legacy [mailto:allain.legacy@windriver.com]
> Sent: Tuesday, April 25, 2017 8:29 PM
> To: Zhang, Helin; Wu, Jingjing
> Cc: dev@dpdk.org; Peters, Matt (Wind River)
> Subject: [PATCH v3] net/i40e: mbuf alloc failed counter not incremented
> 
> From: Matt Peters <matt.peters@windriver.com>
> 
> When an mbuf alloc fails during the mempool get operation for the i40e bulk
> alloc receive function, the rx_mbuf_alloc_failed counter is not incremented to
> record the error.
> 
> This fix ensures consistency with the other i40e receive procedures and other
> net drivers.
> 
> Signed-off-by: Matt Peters <matt.peters@windriver.com>
> Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>

> ---
>  drivers/net/i40e/i40e_rxtx.c | 23 +++++++++++++++++------
>  1 file changed, 17 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index
> e5471b143..74055bb04 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -610,6 +610,7 @@ static inline uint16_t  rx_recv_pkts(void *rx_queue,
> struct rte_mbuf **rx_pkts, uint16_t nb_pkts)  {
>  	struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
> +	struct rte_eth_dev *dev;
>  	uint16_t nb_rx = 0;
> 
>  	if (!nb_pkts)
> @@ -627,9 +628,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  		if (i40e_rx_alloc_bufs(rxq) != 0) {
>  			uint16_t i, j;
> 
> -			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
> -				   "port_id=%u, queue_id=%u",
> -				   rxq->port_id, rxq->queue_id);
> +			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
> +			dev->data->rx_mbuf_alloc_failed +=
> +				rxq->rx_free_thresh;
> +
>  			rxq->rx_nb_avail = 0;
>  			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
>  			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) @@ -691,6
> +693,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts)
>  	union i40e_rx_desc rxd;
>  	struct i40e_rx_entry *sw_ring;
>  	struct i40e_rx_entry *rxe;
> +	struct rte_eth_dev *dev;
>  	struct rte_mbuf *rxm;
>  	struct rte_mbuf *nmb;
>  	uint16_t nb_rx;
> @@ -721,10 +724,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  			break;
> 
>  		nmb = rte_mbuf_raw_alloc(rxq->mp);
> -		if (unlikely(!nmb))
> +		if (unlikely(!nmb)) {
> +			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
> +			dev->data->rx_mbuf_alloc_failed++;
>  			break;
> -		rxd = *rxdp;
> +		}
> 
> +		rxd = *rxdp;
>  		nb_hold++;
>  		rxe = &sw_ring[rx_id];
>  		rx_id++;
> @@ -816,6 +822,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
>  	struct rte_mbuf *nmb, *rxm;
>  	uint16_t rx_id = rxq->rx_tail;
>  	uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
> +	struct rte_eth_dev *dev;
>  	uint32_t rx_status;
>  	uint64_t qword1;
>  	uint64_t dma_addr;
> @@ -833,8 +840,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
>  			break;
> 
>  		nmb = rte_mbuf_raw_alloc(rxq->mp);
> -		if (unlikely(!nmb))
> +		if (unlikely(!nmb)) {
> +			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
> +			dev->data->rx_mbuf_alloc_failed++;
>  			break;
> +		}
> +
>  		rxd = *rxdp;
>  		nb_hold++;
>  		rxe = &sw_ring[rx_id];
> --
> 2.12.1
  
Ferruh Yigit April 26, 2017, 8:21 a.m. UTC | #2
On 4/25/2017 6:31 PM, Zhang, Helin wrote:
> 
> 
>> -----Original Message-----
>> From: Allain Legacy [mailto:allain.legacy@windriver.com]
>> Sent: Tuesday, April 25, 2017 8:29 PM
>> To: Zhang, Helin; Wu, Jingjing
>> Cc: dev@dpdk.org; Peters, Matt (Wind River)
>> Subject: [PATCH v3] net/i40e: mbuf alloc failed counter not incremented
>>
>> From: Matt Peters <matt.peters@windriver.com>
>>
>> When an mbuf alloc fails during the mempool get operation for the i40e bulk
>> alloc receive function, the rx_mbuf_alloc_failed counter is not incremented to
>> record the error.
>>
>> This fix ensures consistency with the other i40e receive procedures and other
>> net drivers.
>>
>> Signed-off-by: Matt Peters <matt.peters@windriver.com>
>> Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
> Acked-by: Helin Zhang <helin.zhang@intel.com>

Applied to dpdk-next-net/master, thanks.
  

Patch

diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index e5471b143..74055bb04 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -610,6 +610,7 @@  static inline uint16_t
 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+	struct rte_eth_dev *dev;
 	uint16_t nb_rx = 0;
 
 	if (!nb_pkts)
@@ -627,9 +628,10 @@  rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (i40e_rx_alloc_bufs(rxq) != 0) {
 			uint16_t i, j;
 
-			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
-				   "port_id=%u, queue_id=%u",
-				   rxq->port_id, rxq->queue_id);
+			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+			dev->data->rx_mbuf_alloc_failed +=
+				rxq->rx_free_thresh;
+
 			rxq->rx_nb_avail = 0;
 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -691,6 +693,7 @@  i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	union i40e_rx_desc rxd;
 	struct i40e_rx_entry *sw_ring;
 	struct i40e_rx_entry *rxe;
+	struct rte_eth_dev *dev;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *nmb;
 	uint16_t nb_rx;
@@ -721,10 +724,13 @@  i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			break;
 
 		nmb = rte_mbuf_raw_alloc(rxq->mp);
-		if (unlikely(!nmb))
+		if (unlikely(!nmb)) {
+			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+			dev->data->rx_mbuf_alloc_failed++;
 			break;
-		rxd = *rxdp;
+		}
 
+		rxd = *rxdp;
 		nb_hold++;
 		rxe = &sw_ring[rx_id];
 		rx_id++;
@@ -816,6 +822,7 @@  i40e_recv_scattered_pkts(void *rx_queue,
 	struct rte_mbuf *nmb, *rxm;
 	uint16_t rx_id = rxq->rx_tail;
 	uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+	struct rte_eth_dev *dev;
 	uint32_t rx_status;
 	uint64_t qword1;
 	uint64_t dma_addr;
@@ -833,8 +840,12 @@  i40e_recv_scattered_pkts(void *rx_queue,
 			break;
 
 		nmb = rte_mbuf_raw_alloc(rxq->mp);
-		if (unlikely(!nmb))
+		if (unlikely(!nmb)) {
+			dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+			dev->data->rx_mbuf_alloc_failed++;
 			break;
+		}
+
 		rxd = *rxdp;
 		nb_hold++;
 		rxe = &sw_ring[rx_id];