[v2] net/ice: fix scalar Rx and Tx path segment

Message ID 20221109125609.724612-1-mingjinx.ye@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series [v2] net/ice: fix scalar Rx and Tx path segment |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Functional success Functional Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS

Commit Message

Mingjin Ye Nov. 9, 2022, 12:56 p.m. UTC
  CRC is stripped by the hardware in the scattered Rx path. If the last
buffer packet length is '0', the scalar Tx path would send empty buffer
that causes the Tx queue to overflow.

This patch adds a judgment for the last buffer length to fix this issue,
so that it would free the mbuf associated to the last one if the last
buffer is empty.

Fixes: 6eac0b7fde95 ("net/ice: support advance Rx/Tx")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>

v2:
	* Fix log level in ice_rxtx.c source file.
---
 drivers/net/ice/ice_rxtx.c | 53 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 51 insertions(+), 2 deletions(-)
  

Comments

Ke Xu Nov. 10, 2022, 2:01 a.m. UTC | #1
> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Wednesday, November 9, 2022 8:56 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>; Ye, MingjinX <mingjinx.ye@intel.com>; Zhang, Qi
> Z <qi.z.zhang@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>; Ferruh
> Yigit <ferruh.yigit@intel.com>
> Subject: [PATCH v2] net/ice: fix scalar Rx and Tx path segment
> 
> CRC is stripped by the hardware in the scattered Rx path. If the last buffer
> packet length is '0', the scalar Tx path would send empty buffer that causes
> the Tx queue to overflow.
> 
> This patch adds a judgment for the last buffer length to fix this issue, so that
> it would free the mbuf associated to the last one if the last buffer is empty.
> 
> Fixes: 6eac0b7fde95 ("net/ice: support advance Rx/Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>

Tested and passed.

Tested-by: Ke XU <ke1.xu@intel.com>


> 
> v2:
> 	* Fix log level in ice_rxtx.c source file.
> ---
>  drivers/net/ice/ice_rxtx.c | 53 ++++++++++++++++++++++++++++++++++++--
>  1 file changed, 51 insertions(+), 2 deletions(-)
>
  
Qi Zhang Nov. 10, 2022, 10:37 a.m. UTC | #2
> -----Original Message-----
> From: Ye, MingjinX <mingjinx.ye@intel.com>
> Sent: Wednesday, November 9, 2022 8:56 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>; Ye, MingjinX <mingjinx.ye@intel.com>; Zhang, Qi
> Z <qi.z.zhang@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>; Ferruh
> Yigit <ferruh.yigit@intel.com>
> Subject: [PATCH v2] net/ice: fix scalar Rx and Tx path segment
> 
> CRC is stripped by the hardware in the scattered Rx path. If the last buffer
> packet length is '0', the scalar Tx path would send empty buffer that causes
> the Tx queue to overflow.

Please separate this patch into two, one for Rx and one for Tx, they are independent.

For the Tx implementation, I think we can move them into tx_prepare where is place to check Tx violation.
 
> 
> This patch adds a judgment for the last buffer length to fix this issue, so that
> it would free the mbuf associated to the last one if the last buffer is empty.
> 
> Fixes: 6eac0b7fde95 ("net/ice: support advance Rx/Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> 
> v2:
> 	* Fix log level in ice_rxtx.c source file.
> ---
>  drivers/net/ice/ice_rxtx.c | 53 ++++++++++++++++++++++++++++++++++++--
>  1 file changed, 51 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 0a2b0376ac..b181f66aad 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -2111,6 +2111,10 @@ ice_recv_scattered_pkts(void *rx_queue,
>  			} else
>  				rxm->data_len = (uint16_t)(rx_packet_len -
> 
> RTE_ETHER_CRC_LEN);
> +		} else if (rx_packet_len == 0) {
> +			rte_pktmbuf_free_seg(rxm);
> +			first_seg->nb_segs--;
> +			last_seg->next = NULL;
>  		}
> 
>  		first_seg->port = rxq->port_id;
> @@ -2903,6 +2907,35 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
>  	return count;
>  }
> 
> +/*Check the number of valid mbufs and free the invalid mbufs*/ static
> +inline uint16_t ice_check_mbuf(struct rte_mbuf *tx_pkt) {
> +	struct rte_mbuf *txd = tx_pkt;
> +	struct rte_mbuf *txd_removal = NULL;
> +	struct rte_mbuf *txd_pre = NULL;
> +	uint16_t count = 0;
> +	uint16_t removal = 0;
> +
> +	while (txd != NULL) {
> +		if (removal == 1 || txd->data_len == 0) {
> +			txd_removal = txd;
> +			txd = txd->next;
> +			if (removal == 0) {
> +				removal = 1;
> +				txd_pre->next = NULL;
> +			}
> +			rte_pktmbuf_free_seg(txd_removal);
> +		} else {
> +			++count;
> +			txd_pre = txd;
> +			txd = txd->next;
> +		}
> +	}
> +
> +	return count;
> +}
> +
>  uint16_t
>  ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> { @@ -2960,11 +2993,27 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		 * the mbuf data size exceeds max data size that hw allows
>  		 * per tx desc.
>  		 */
> -		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
> +		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
>  			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
>  					     nb_ctx);
> -		else
> +		} else {
> +			nb_used = ice_check_mbuf(tx_pkt);
> +			if (nb_used == 0) {
> +				PMD_TX_LOG(ERR,
> +				"Check packets is empty "
> +				"(port=%d queue=%d)\n",
> +				txq->port_id, txq->queue_id);
> +				continue;
> +			} else if (nb_used < tx_pkt->nb_segs) {
> +				PMD_TX_LOG(DEBUG,
> +				"Check packets valid num ="
> +				"%4u total num = %4u (port=%d
> queue=%d)\n",
> +				nb_used, tx_pkt->nb_segs, txq->port_id, txq-
> >queue_id);
> +				tx_pkt->nb_segs = nb_used;
> +			}
>  			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> +		}
> +
>  		tx_last = (uint16_t)(tx_id + nb_used - 1);
> 
>  		/* Circular ring */
> --
> 2.34.1
  
Mingjin Ye Nov. 11, 2022, 3:12 a.m. UTC | #3
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: 2022年11月10日 18:37
> To: Ye, MingjinX <mingjinx.ye@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>; Ferruh
> Yigit <ferruh.yigit@intel.com>
> Subject: RE: [PATCH v2] net/ice: fix scalar Rx and Tx path segment
> 
> 
> 
> > -----Original Message-----
> > From: Ye, MingjinX <mingjinx.ye@intel.com>
> > Sent: Wednesday, November 9, 2022 8:56 PM
> > To: dev@dpdk.org
> > Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou,
> > YidingX <yidingx.zhou@intel.com>; Ye, MingjinX
> > <mingjinx.ye@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Lu,
> > Wenzhuo <wenzhuo.lu@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> > Li, Xiaoyun <xiaoyun.li@intel.com>; Ferruh Yigit
> > <ferruh.yigit@intel.com>
> > Subject: [PATCH v2] net/ice: fix scalar Rx and Tx path segment
> >
> > CRC is stripped by the hardware in the scattered Rx path. If the last
> > buffer packet length is '0', the scalar Tx path would send empty
> > buffer that causes the Tx queue to overflow.
> 
> Please separate this patch into two, one for Rx and one for Tx, they are
> independent.
> 
> For the Tx implementation, I think we can move them into tx_prepare where
> is place to check Tx violation.
Thanks for your suggestion, I will provide 2 new patches according to the new
scheme and promote it to the community.
> 
> >
> > This patch adds a judgment for the last buffer length to fix this
> > issue, so that it would free the mbuf associated to the last one if the last
> buffer is empty.
> >
> > Fixes: 6eac0b7fde95 ("net/ice: support advance Rx/Tx")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> >
> > v2:
> > 	* Fix log level in ice_rxtx.c source file.
> > ---
> >  drivers/net/ice/ice_rxtx.c | 53
> > ++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 51 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> > index 0a2b0376ac..b181f66aad 100644
> > --- a/drivers/net/ice/ice_rxtx.c
> > +++ b/drivers/net/ice/ice_rxtx.c
> > @@ -2111,6 +2111,10 @@ ice_recv_scattered_pkts(void *rx_queue,
> >  			} else
> >  				rxm->data_len = (uint16_t)(rx_packet_len -
> >
> > RTE_ETHER_CRC_LEN);
> > +		} else if (rx_packet_len == 0) {
> > +			rte_pktmbuf_free_seg(rxm);
> > +			first_seg->nb_segs--;
> > +			last_seg->next = NULL;
> >  		}
> >
> >  		first_seg->port = rxq->port_id;
> > @@ -2903,6 +2907,35 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
> >  	return count;
> >  }
> >
> > +/*Check the number of valid mbufs and free the invalid mbufs*/ static
> > +inline uint16_t ice_check_mbuf(struct rte_mbuf *tx_pkt) {
> > +	struct rte_mbuf *txd = tx_pkt;
> > +	struct rte_mbuf *txd_removal = NULL;
> > +	struct rte_mbuf *txd_pre = NULL;
> > +	uint16_t count = 0;
> > +	uint16_t removal = 0;
> > +
> > +	while (txd != NULL) {
> > +		if (removal == 1 || txd->data_len == 0) {
> > +			txd_removal = txd;
> > +			txd = txd->next;
> > +			if (removal == 0) {
> > +				removal = 1;
> > +				txd_pre->next = NULL;
> > +			}
> > +			rte_pktmbuf_free_seg(txd_removal);
> > +		} else {
> > +			++count;
> > +			txd_pre = txd;
> > +			txd = txd->next;
> > +		}
> > +	}
> > +
> > +	return count;
> > +}
> > +
> >  uint16_t
> >  ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> > nb_pkts) { @@ -2960,11 +2993,27 @@ ice_xmit_pkts(void *tx_queue,
> > struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> >  		 * the mbuf data size exceeds max data size that hw allows
> >  		 * per tx desc.
> >  		 */
> > -		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
> > +		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
> >  			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
> >  					     nb_ctx);
> > -		else
> > +		} else {
> > +			nb_used = ice_check_mbuf(tx_pkt);
> > +			if (nb_used == 0) {
> > +				PMD_TX_LOG(ERR,
> > +				"Check packets is empty "
> > +				"(port=%d queue=%d)\n",
> > +				txq->port_id, txq->queue_id);
> > +				continue;
> > +			} else if (nb_used < tx_pkt->nb_segs) {
> > +				PMD_TX_LOG(DEBUG,
> > +				"Check packets valid num ="
> > +				"%4u total num = %4u (port=%d
> > queue=%d)\n",
> > +				nb_used, tx_pkt->nb_segs, txq->port_id,
> txq-
> > >queue_id);
> > +				tx_pkt->nb_segs = nb_used;
> > +			}
> >  			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> > +		}
> > +
> >  		tx_last = (uint16_t)(tx_id + nb_used - 1);
> >
> >  		/* Circular ring */
> > --
> > 2.34.1
  

Patch

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 0a2b0376ac..b181f66aad 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2111,6 +2111,10 @@  ice_recv_scattered_pkts(void *rx_queue,
 			} else
 				rxm->data_len = (uint16_t)(rx_packet_len -
 							   RTE_ETHER_CRC_LEN);
+		} else if (rx_packet_len == 0) {
+			rte_pktmbuf_free_seg(rxm);
+			first_seg->nb_segs--;
+			last_seg->next = NULL;
 		}
 
 		first_seg->port = rxq->port_id;
@@ -2903,6 +2907,35 @@  ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
 	return count;
 }
 
+/*Check the number of valid mbufs and free the invalid mbufs*/
+static inline uint16_t
+ice_check_mbuf(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	struct rte_mbuf *txd_removal = NULL;
+	struct rte_mbuf *txd_pre = NULL;
+	uint16_t count = 0;
+	uint16_t removal = 0;
+
+	while (txd != NULL) {
+		if (removal == 1 || txd->data_len == 0) {
+			txd_removal = txd;
+			txd = txd->next;
+			if (removal == 0) {
+				removal = 1;
+				txd_pre->next = NULL;
+			}
+			rte_pktmbuf_free_seg(txd_removal);
+		} else {
+			++count;
+			txd_pre = txd;
+			txd = txd->next;
+		}
+	}
+
+	return count;
+}
+
 uint16_t
 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -2960,11 +2993,27 @@  ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * the mbuf data size exceeds max data size that hw allows
 		 * per tx desc.
 		 */
-		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+		if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
 			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
 					     nb_ctx);
-		else
+		} else {
+			nb_used = ice_check_mbuf(tx_pkt);
+			if (nb_used == 0) {
+				PMD_TX_LOG(ERR,
+				"Check packets is empty "
+				"(port=%d queue=%d)\n",
+				txq->port_id, txq->queue_id);
+				continue;
+			} else if (nb_used < tx_pkt->nb_segs) {
+				PMD_TX_LOG(DEBUG,
+				"Check packets valid num ="
+				"%4u total num = %4u (port=%d queue=%d)\n",
+				nb_used, tx_pkt->nb_segs, txq->port_id, txq->queue_id);
+				tx_pkt->nb_segs = nb_used;
+			}
 			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+		}
+
 		tx_last = (uint16_t)(tx_id + nb_used - 1);
 
 		/* Circular ring */