diff mbox series

app/testpmd: fix random number of Tx segments

Message ID 20210902082013.7704-1-alvinx.zhang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers show
Series app/testpmd: fix random number of Tx segments | expand

Checks

Context Check Description
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/checkpatch success coding style OK

Commit Message

Alvin Zhang Sept. 2, 2021, 8:20 a.m. UTC
When random number of segments in Tx packets is enabled,
the total data space length of all segments must be greater
or equal than the size of an Eth/IP/UDP/timestamp packet,
that's total 14 + 20 + 8 + 16 bytes. Otherwise the Tx engine
may cause the application to crash.

Bugzilla ID: 797
Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing packets")
Cc: stable@dpdk.org

Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
---
 app/test-pmd/config.c  | 16 +++++++++++-----
 app/test-pmd/testpmd.c |  5 +++++
 app/test-pmd/testpmd.h |  5 +++++
 app/test-pmd/txonly.c  |  7 +++++--
 4 files changed, 26 insertions(+), 7 deletions(-)

Comments

Li, Xiaoyun Sept. 6, 2021, 8:58 a.m. UTC | #1
Hi

> -----Original Message-----
> From: Zhang, AlvinX <alvinx.zhang@intel.com>
> Sent: Thursday, September 2, 2021 16:20
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH] app/testpmd: fix random number of Tx segments
> 
> When random number of segments in Tx packets is enabled, the total data space
> length of all segments must be greater or equal than the size of an
> Eth/IP/UDP/timestamp packet, that's total 14 + 20 + 8 + 16 bytes. Otherwise the
> Tx engine may cause the application to crash.
> 
> Bugzilla ID: 797
> Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing packets")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> ---
>  app/test-pmd/config.c  | 16 +++++++++++-----  app/test-pmd/testpmd.c |  5
> +++++  app/test-pmd/testpmd.h |  5 +++++  app/test-pmd/txonly.c  |  7 +++++--
>  4 files changed, 26 insertions(+), 7 deletions(-)
> 
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> 31d8ba1..5105b3b 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -3837,10 +3837,11 @@ struct igb_ring_desc_16_bytes {
>  	 * Check that each segment length is greater or equal than
>  	 * the mbuf data size.
>  	 * Check also that the total packet length is greater or equal than the
> -	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
> -	 * 20 + 8).
> +	 * size of an Eth/IP/UDP + timestamp packet
> +	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).

I don't really agree on this. Most of the time, txonly generate packets with Eth/IP/UDP. It's not fair to limit the hdr length to include timestamp in all cases.
And to be honest, I don't see why you need to add "tx_pkt_nb_min_segs". It's only used in txonly when "TX_PKT_SPLIT_RND". So this issue is because when "TX_PKT_SPLIT_RND", the random nb_segs is not enough for the hdr.

But if you read txonly carefully, if "TX_PKT_SPLIT_RND", the first segment length should be equal or greater than 42 (14+20+8). Because when "TX_PKT_SPLIT_RND", update_pkt_header() should be called. And that function doesn't deal with header in multi-segments.
I think there's bug here.

So I think you should only add a check in pkt_burst_prepare() in txonly().
	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
+                           if (tx_pkt_seg_lengths[0] < 42) {
+			err_log;
+			return false;
+		}
		update_pkt_header(pkt, pkt_len);

As for timestamp, maybe refer to "pkt_copy_split" in csumonly.c is better? Copy the extra to the last segment if it's not enough. Not sure how to deal with this issue better.

>  	 */
>  	tx_pkt_len = 0;
> +	tx_pkt_nb_min_segs = 0;
>  	for (i = 0; i < nb_segs; i++) {
>  		if (seg_lengths[i] > mbuf_data_size[0]) {
>  			fprintf(stderr,
> @@ -3849,11 +3850,16 @@ struct igb_ring_desc_16_bytes {
>  			return;
>  		}
>  		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
> +
> +		if (!tx_pkt_nb_min_segs &&
> +		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 + 16))
> +			tx_pkt_nb_min_segs = i + 1;
>  	}
> -	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
> +
> +	if (!tx_pkt_nb_min_segs) {
>  		fprintf(stderr, "total packet length=%u < %d - give up\n",
> -				(unsigned) tx_pkt_len,
> -				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
> +			(unsigned int) tx_pkt_len,
> +			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
>  		return;
>  	}
> 
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> 6cbe9ba..c496e59 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -232,6 +232,11 @@ struct fwd_engine * fwd_engines[] = {  };  uint8_t
> tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
> 
> +/**< Minimum number of segments in TXONLY packets to accommodate all
> +packet
> + * headers.
> + */
> +uint8_t  tx_pkt_nb_min_segs = 1;
> +
>  enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;  /**< Split policy for
> packets to TX. */
> 
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> 16a3598..f5bc427 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -464,6 +464,11 @@ enum dcb_mode_enable  extern uint16_t
> tx_pkt_length; /**< Length of TXONLY packet */  extern uint16_t
> tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */  extern
> uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX packets */
> +
> +/**< Minimum number of segments in TXONLY packets to accommodate all
> +packet
> + * headers.
> + */
> +extern uint8_t  tx_pkt_nb_min_segs;
>  extern uint32_t tx_pkt_times_intra;
>  extern uint32_t tx_pkt_times_inter;
> 
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> aed820f..27e4458 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -195,8 +195,11 @@
>  	uint32_t nb_segs, pkt_len;
>  	uint8_t i;
> 
> -	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> -		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
> +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
> +	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
> +		nb_segs = rte_rand() %
> +			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
> +			  tx_pkt_nb_min_segs;
>  	else
>  		nb_segs = tx_pkt_nb_segs;
> 
> --
> 1.8.3.1
Alvin Zhang Sept. 6, 2021, 10:03 a.m. UTC | #2
> -----Original Message-----
> From: Li, Xiaoyun <xiaoyun.li@intel.com>
> Sent: Monday, September 6, 2021 4:59 PM
> To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> 
> Hi
> 
> > -----Original Message-----
> > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > Sent: Thursday, September 2, 2021 16:20
> > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>;
> > stable@dpdk.org
> > Subject: [PATCH] app/testpmd: fix random number of Tx segments
> >
> > When random number of segments in Tx packets is enabled, the total
> > data space length of all segments must be greater or equal than the
> > size of an Eth/IP/UDP/timestamp packet, that's total 14 + 20 + 8 + 16
> > bytes. Otherwise the Tx engine may cause the application to crash.
> >
> > Bugzilla ID: 797
> > Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing
> > packets")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> > ---
> >  app/test-pmd/config.c  | 16 +++++++++++-----  app/test-pmd/testpmd.c
> > |  5
> > +++++  app/test-pmd/testpmd.h |  5 +++++  app/test-pmd/txonly.c  |  7
> > +++++ +++++--
> >  4 files changed, 26 insertions(+), 7 deletions(-)
> >
> > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > 31d8ba1..5105b3b 100644
> > --- a/app/test-pmd/config.c
> > +++ b/app/test-pmd/config.c
> > @@ -3837,10 +3837,11 @@ struct igb_ring_desc_16_bytes {
> >  	 * Check that each segment length is greater or equal than
> >  	 * the mbuf data size.
> >  	 * Check also that the total packet length is greater or equal than the
> > -	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
> > -	 * 20 + 8).
> > +	 * size of an Eth/IP/UDP + timestamp packet
> > +	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).
> 
> I don't really agree on this. Most of the time, txonly generate packets with
> Eth/IP/UDP. It's not fair to limit the hdr length to include timestamp in all cases.
> And to be honest, I don't see why you need to add "tx_pkt_nb_min_segs". It's
> only used in txonly when "TX_PKT_SPLIT_RND". So this issue is because when
> "TX_PKT_SPLIT_RND", the random nb_segs is not enough for the hdr.
> 
> But if you read txonly carefully, if "TX_PKT_SPLIT_RND", the first segment length
> should be equal or greater than 42 (14+20+8). Because when
> "TX_PKT_SPLIT_RND", update_pkt_header() should be called. And that function
> doesn't deal with header in multi-segments.
> I think there's bug here.
> 
> So I think you should only add a check in pkt_burst_prepare() in txonly().
> 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
> +                           if (tx_pkt_seg_lengths[0] < 42) {
> +			err_log;
> +			return false;
> +		}
> 		update_pkt_header(pkt, pkt_len);

Yes, I didn't notice the updating for the UDP header, but the bug first occurs in this function 
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
			sizeof(struct rte_ether_hdr) +
			sizeof(struct rte_ipv4_hdr));
not in update_pkt_header.

Here we expecting users should set minimum 42 byte for first segment seems ok,
But I think we putting the check in configuring the data space length of first segment is more graceful.

> 
> As for timestamp, maybe refer to "pkt_copy_split" in csumonly.c is better? Copy
> the extra to the last segment if it's not enough. Not sure how to deal with this
> issue better.
> 
> >  	 */
> >  	tx_pkt_len = 0;
> > +	tx_pkt_nb_min_segs = 0;
> >  	for (i = 0; i < nb_segs; i++) {
> >  		if (seg_lengths[i] > mbuf_data_size[0]) {
> >  			fprintf(stderr,
> > @@ -3849,11 +3850,16 @@ struct igb_ring_desc_16_bytes {
> >  			return;
> >  		}
> >  		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
> > +
> > +		if (!tx_pkt_nb_min_segs &&
> > +		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 + 16))
> > +			tx_pkt_nb_min_segs = i + 1;
> >  	}
> > -	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
> > +
> > +	if (!tx_pkt_nb_min_segs) {
> >  		fprintf(stderr, "total packet length=%u < %d - give up\n",
> > -				(unsigned) tx_pkt_len,
> > -				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
> > +			(unsigned int) tx_pkt_len,
> > +			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
> >  		return;
> >  	}
> >
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > 6cbe9ba..c496e59 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -232,6 +232,11 @@ struct fwd_engine * fwd_engines[] = {  };
> > uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets
> > */
> >
> > +/**< Minimum number of segments in TXONLY packets to accommodate all
> > +packet
> > + * headers.
> > + */
> > +uint8_t  tx_pkt_nb_min_segs = 1;
> > +
> >  enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;  /**< Split policy
> > for packets to TX. */
> >
> > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > 16a3598..f5bc427 100644
> > --- a/app/test-pmd/testpmd.h
> > +++ b/app/test-pmd/testpmd.h
> > @@ -464,6 +464,11 @@ enum dcb_mode_enable  extern uint16_t
> > tx_pkt_length; /**< Length of TXONLY packet */  extern uint16_t
> > tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */  extern
> > uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX packets */
> > +
> > +/**< Minimum number of segments in TXONLY packets to accommodate all
> > +packet
> > + * headers.
> > + */
> > +extern uint8_t  tx_pkt_nb_min_segs;
> >  extern uint32_t tx_pkt_times_intra;
> >  extern uint32_t tx_pkt_times_inter;
> >
> > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > aed820f..27e4458 100644
> > --- a/app/test-pmd/txonly.c
> > +++ b/app/test-pmd/txonly.c
> > @@ -195,8 +195,11 @@
> >  	uint32_t nb_segs, pkt_len;
> >  	uint8_t i;
> >
> > -	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> > -		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
> > +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
> > +	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
> > +		nb_segs = rte_rand() %
> > +			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
> > +			  tx_pkt_nb_min_segs;
> >  	else
> >  		nb_segs = tx_pkt_nb_segs;
> >
> > --
> > 1.8.3.1
Li, Xiaoyun Sept. 6, 2021, 10:54 a.m. UTC | #3
> -----Original Message-----
> From: Zhang, AlvinX <alvinx.zhang@intel.com>
> Sent: Monday, September 6, 2021 18:04
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> 
> > -----Original Message-----
> > From: Li, Xiaoyun <xiaoyun.li@intel.com>
> > Sent: Monday, September 6, 2021 4:59 PM
> > To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; stable@dpdk.org
> > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> >
> > Hi
> >
> > > -----Original Message-----
> > > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > > Sent: Thursday, September 2, 2021 16:20
> > > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > > <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>;
> > > stable@dpdk.org
> > > Subject: [PATCH] app/testpmd: fix random number of Tx segments
> > >
> > > When random number of segments in Tx packets is enabled, the total
> > > data space length of all segments must be greater or equal than the
> > > size of an Eth/IP/UDP/timestamp packet, that's total 14 + 20 + 8 +
> > > 16 bytes. Otherwise the Tx engine may cause the application to crash.
> > >
> > > Bugzilla ID: 797
> > > Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing
> > > packets")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> > > ---
> > >  app/test-pmd/config.c  | 16 +++++++++++-----
> > > app/test-pmd/testpmd.c
> > > |  5
> > > +++++  app/test-pmd/testpmd.h |  5 +++++  app/test-pmd/txonly.c  |
> > > +++++ 7
> > > +++++ +++++--
> > >  4 files changed, 26 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > > 31d8ba1..5105b3b 100644
> > > --- a/app/test-pmd/config.c
> > > +++ b/app/test-pmd/config.c
> > > @@ -3837,10 +3837,11 @@ struct igb_ring_desc_16_bytes {
> > >  	 * Check that each segment length is greater or equal than
> > >  	 * the mbuf data size.
> > >  	 * Check also that the total packet length is greater or equal than the
> > > -	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
> > > -	 * 20 + 8).
> > > +	 * size of an Eth/IP/UDP + timestamp packet
> > > +	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).
> >
> > I don't really agree on this. Most of the time, txonly generate
> > packets with Eth/IP/UDP. It's not fair to limit the hdr length to include
> timestamp in all cases.
> > And to be honest, I don't see why you need to add
> > "tx_pkt_nb_min_segs". It's only used in txonly when
> > "TX_PKT_SPLIT_RND". So this issue is because when "TX_PKT_SPLIT_RND", the
> random nb_segs is not enough for the hdr.
> >
> > But if you read txonly carefully, if "TX_PKT_SPLIT_RND", the first
> > segment length should be equal or greater than 42 (14+20+8). Because
> > when "TX_PKT_SPLIT_RND", update_pkt_header() should be called. And
> > that function doesn't deal with header in multi-segments.
> > I think there's bug here.
> >
> > So I think you should only add a check in pkt_burst_prepare() in txonly().
> > 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
> > +                           if (tx_pkt_seg_lengths[0] < 42) {
> > +			err_log;
> > +			return false;
> > +		}
> > 		update_pkt_header(pkt, pkt_len);
> 
> Yes, I didn't notice the updating for the UDP header, but the bug first occurs in
> this function copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> 			sizeof(struct rte_ether_hdr) +
> 			sizeof(struct rte_ipv4_hdr));
> not in update_pkt_header.
> 
> Here we expecting users should set minimum 42 byte for first segment seems ok,
> But I think we putting the check in configuring the data space length of first
> segment is more graceful.

No. You didn't get my point. It's not graceful at all.
The segment fault will only happen when "TX_PKT_SPLIT_RND". Because the hdr may take 2 segs while random nb_segs is only 1.
But if it's not "TX_PKT_SPLIT_RND", the current set_tx_pkt_segments() already make sure pkt_len is enough for 42.

So the only case you need to deal with is "TX_PKT_SPLIT_RND". And since "TX_PKT_SPLIT_RND" actually needs the first segment to be enough to contain 42 bytes.
And in cmd_set_txpkts_parsed, you may not get the configuration to know if it will be configured as "TX_PKT_SPLIT_RND". That's why you should check before update_pkt_header().

In this way, when it's NOT "TX_PKT_SPLIT_RND", it will maintain the old behavior that hdrs may cross several segs which makes more sense.

> 
> >
> > As for timestamp, maybe refer to "pkt_copy_split" in csumonly.c is
> > better? Copy the extra to the last segment if it's not enough. Not
> > sure how to deal with this issue better.
> >
> > >  	 */
> > >  	tx_pkt_len = 0;
> > > +	tx_pkt_nb_min_segs = 0;
> > >  	for (i = 0; i < nb_segs; i++) {
> > >  		if (seg_lengths[i] > mbuf_data_size[0]) {
> > >  			fprintf(stderr,
> > > @@ -3849,11 +3850,16 @@ struct igb_ring_desc_16_bytes {
> > >  			return;
> > >  		}
> > >  		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
> > > +
> > > +		if (!tx_pkt_nb_min_segs &&
> > > +		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 + 16))
> > > +			tx_pkt_nb_min_segs = i + 1;
> > >  	}
> > > -	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
> > > +
> > > +	if (!tx_pkt_nb_min_segs) {
> > >  		fprintf(stderr, "total packet length=%u < %d - give up\n",
> > > -				(unsigned) tx_pkt_len,
> > > -				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
> > > +			(unsigned int) tx_pkt_len,
> > > +			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
> > >  		return;
> > >  	}
> > >
> > > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > > 6cbe9ba..c496e59 100644
> > > --- a/app/test-pmd/testpmd.c
> > > +++ b/app/test-pmd/testpmd.c
> > > @@ -232,6 +232,11 @@ struct fwd_engine * fwd_engines[] = {  };
> > > uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY
> > > packets */
> > >
> > > +/**< Minimum number of segments in TXONLY packets to accommodate
> > > +all packet
> > > + * headers.
> > > + */
> > > +uint8_t  tx_pkt_nb_min_segs = 1;
> > > +
> > >  enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;  /**< Split
> > > policy for packets to TX. */
> > >
> > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > > 16a3598..f5bc427 100644
> > > --- a/app/test-pmd/testpmd.h
> > > +++ b/app/test-pmd/testpmd.h
> > > @@ -464,6 +464,11 @@ enum dcb_mode_enable  extern uint16_t
> > > tx_pkt_length; /**< Length of TXONLY packet */  extern uint16_t
> > > tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
> > > extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX
> > > packets */
> > > +
> > > +/**< Minimum number of segments in TXONLY packets to accommodate
> > > +all packet
> > > + * headers.
> > > + */
> > > +extern uint8_t  tx_pkt_nb_min_segs;
> > >  extern uint32_t tx_pkt_times_intra;  extern uint32_t
> > > tx_pkt_times_inter;
> > >
> > > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > > aed820f..27e4458 100644
> > > --- a/app/test-pmd/txonly.c
> > > +++ b/app/test-pmd/txonly.c
> > > @@ -195,8 +195,11 @@
> > >  	uint32_t nb_segs, pkt_len;
> > >  	uint8_t i;
> > >
> > > -	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> > > -		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
> > > +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
> > > +	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
> > > +		nb_segs = rte_rand() %
> > > +			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
> > > +			  tx_pkt_nb_min_segs;
> > >  	else
> > >  		nb_segs = tx_pkt_nb_segs;
> > >
> > > --
> > > 1.8.3.1
Alvin Zhang Sept. 7, 2021, 2:25 a.m. UTC | #4
> -----Original Message-----
> From: Li, Xiaoyun <xiaoyun.li@intel.com>
> Sent: Monday, September 6, 2021 6:55 PM
> To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> 
> 
> 
> > -----Original Message-----
> > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > Sent: Monday, September 6, 2021 18:04
> > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; stable@dpdk.org
> > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> >
> > > -----Original Message-----
> > > From: Li, Xiaoyun <xiaoyun.li@intel.com>
> > > Sent: Monday, September 6, 2021 4:59 PM
> > > To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> > > <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; stable@dpdk.org
> > > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> > >
> > > Hi
> > >
> > > > -----Original Message-----
> > > > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > > > Sent: Thursday, September 2, 2021 16:20
> > > > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > > > <konstantin.ananyev@intel.com>
> > > > Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>;
> > > > stable@dpdk.org
> > > > Subject: [PATCH] app/testpmd: fix random number of Tx segments
> > > >
> > > > When random number of segments in Tx packets is enabled, the total
> > > > data space length of all segments must be greater or equal than
> > > > the size of an Eth/IP/UDP/timestamp packet, that's total 14 + 20 +
> > > > 8 +
> > > > 16 bytes. Otherwise the Tx engine may cause the application to crash.
> > > >
> > > > Bugzilla ID: 797
> > > > Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing
> > > > packets")
> > > > Cc: stable@dpdk.org
> > > >
> > > > Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> > > > ---
> > > >  app/test-pmd/config.c  | 16 +++++++++++-----
> > > > app/test-pmd/testpmd.c
> > > > |  5
> > > > +++++  app/test-pmd/testpmd.h |  5 +++++  app/test-pmd/txonly.c  |
> > > > +++++ 7
> > > > +++++ +++++--
> > > >  4 files changed, 26 insertions(+), 7 deletions(-)
> > > >
> > > > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > > > 31d8ba1..5105b3b 100644
> > > > --- a/app/test-pmd/config.c
> > > > +++ b/app/test-pmd/config.c
> > > > @@ -3837,10 +3837,11 @@ struct igb_ring_desc_16_bytes {
> > > >  	 * Check that each segment length is greater or equal than
> > > >  	 * the mbuf data size.
> > > >  	 * Check also that the total packet length is greater or equal than the
> > > > -	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
> > > > -	 * 20 + 8).
> > > > +	 * size of an Eth/IP/UDP + timestamp packet
> > > > +	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).
> > >
> > > I don't really agree on this. Most of the time, txonly generate
> > > packets with Eth/IP/UDP. It's not fair to limit the hdr length to
> > > include
> > timestamp in all cases.
> > > And to be honest, I don't see why you need to add
> > > "tx_pkt_nb_min_segs". It's only used in txonly when
> > > "TX_PKT_SPLIT_RND". So this issue is because when
> > > "TX_PKT_SPLIT_RND", the
> > random nb_segs is not enough for the hdr.
> > >
> > > But if you read txonly carefully, if "TX_PKT_SPLIT_RND", the first
> > > segment length should be equal or greater than 42 (14+20+8). Because
> > > when "TX_PKT_SPLIT_RND", update_pkt_header() should be called. And
> > > that function doesn't deal with header in multi-segments.
> > > I think there's bug here.
> > >
> > > So I think you should only add a check in pkt_burst_prepare() in txonly().
> > > 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) ||
> > > txonly_multi_flow)
> > > +                           if (tx_pkt_seg_lengths[0] < 42) {
> > > +			err_log;
> > > +			return false;
> > > +		}
> > > 		update_pkt_header(pkt, pkt_len);


As above, If user have below configuration, there will no one packet be sent out and have lots and lots of repeated annoying logs.
testpmd> set fwd txonly
Set txonly packet forwarding mode
testpmd> set txpkts 40,64
testpmd> set txsplit rand
testpmd> start
txonly packet forwarding - ports=1 - cores=1 - streams=4 - NUMA support enabled, MP allocation mode: native
Logical Core 2 (socket 0) forwards packets on 4 streams:
  RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
  RX P=0/Q=1 (socket 0) -> TX P=0/Q=1 (socket 0) peer=02:00:00:00:00:00
  RX P=0/Q=2 (socket 0) -> TX P=0/Q=2 (socket 0) peer=02:00:00:00:00:00
  RX P=0/Q=3 (socket 0) -> TX P=0/Q=3 (socket 0) peer=02:00:00:00:00:00

  txonly packet forwarding packets/burst=32
  packet len=104 - nb packet segments=2
  nb forwarding cores=1 - nb forwarding ports=1
  port 0: RX queue number: 4 Tx queue number: 4
    Rx offloads=0x0 Tx offloads=0x10000
    RX queue: 0
      RX desc=1024 - RX free threshold=32
      RX threshold registers: pthresh=0 hthresh=0  wthresh=0
      RX Offloads=0x0
    TX queue: 0
      TX desc=1024 - TX free threshold=32
      TX threshold registers: pthresh=32 hthresh=0  wthresh=0
      TX offloads=0x10000 - TX RS bit threshold=32
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tetx_pkt_seg_lengths[0] must bigger than 41 bytes
stpmd> tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
tx_pkt_seg_lengths[0] must bigger than 41 bytes
...
...
testpmd> stop
Telling cores to stop...
Waiting for lcores to finish...

  ---------------------- Forward statistics for port 0  ----------------------
  RX-packets: 0              RX-dropped: 0             RX-total: 0
  TX-packets: 0              TX-dropped: 0             TX-total: 0
  ----------------------------------------------------------------------------

  +++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
  RX-packets: 0              RX-dropped: 0             RX-total: 0
  TX-packets: 0              TX-dropped: 0             TX-total: 0
  ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

By the way, if multi flow was enabled, the ip header also will be updated, so we should put the check before below codes.

	if (txonly_multi_flow) {
		uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
		struct rte_ipv4_hdr *ip_hdr;
		uint32_t addr;

		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
				struct rte_ipv4_hdr *,
				sizeof(struct rte_ether_hdr));
		/*
		 * Generate multiple flows by varying IP src addr. This
		 * enables packets are well distributed by RSS in
		 * receiver side if any and txonly mode can be a decent
		 * packet generator for developer's quick performance
		 * regression test.
		 */
		addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
		RTE_PER_LCORE(_ip_var) = ip_var;
	}

> >
> > Yes, I didn't notice the updating for the UDP header, but the bug
> > first occurs in this function copy_buf_to_pkt(&pkt_udp_hdr,
> sizeof(pkt_udp_hdr), pkt,
> > 			sizeof(struct rte_ether_hdr) +
> > 			sizeof(struct rte_ipv4_hdr));
> > not in update_pkt_header.
> >
> > Here we expecting users should set minimum 42 byte for first segment
> > seems ok, But I think we putting the check in configuring the data
> > space length of first segment is more graceful.
> 
> No. You didn't get my point. It's not graceful at all.
> The segment fault will only happen when "TX_PKT_SPLIT_RND". Because the
> hdr may take 2 segs while random nb_segs is only 1.
> But if it's not "TX_PKT_SPLIT_RND", the current set_tx_pkt_segments() already
> make sure pkt_len is enough for 42.
> 
> So the only case you need to deal with is "TX_PKT_SPLIT_RND". And since
> "TX_PKT_SPLIT_RND" actually needs the first segment to be enough to contain
> 42 bytes.
> And in cmd_set_txpkts_parsed, you may not get the configuration to know if it
> will be configured as "TX_PKT_SPLIT_RND". That's why you should check before
> update_pkt_header().
> 
> In this way, when it's NOT "TX_PKT_SPLIT_RND", it will maintain the old
> behavior that hdrs may cross several segs which makes more sense.
> 
> >
> > >
> > > As for timestamp, maybe refer to "pkt_copy_split" in csumonly.c is
> > > better? Copy the extra to the last segment if it's not enough. Not
> > > sure how to deal with this issue better.
> > >
> > > >  	 */
> > > >  	tx_pkt_len = 0;
> > > > +	tx_pkt_nb_min_segs = 0;
> > > >  	for (i = 0; i < nb_segs; i++) {
> > > >  		if (seg_lengths[i] > mbuf_data_size[0]) {
> > > >  			fprintf(stderr,
> > > > @@ -3849,11 +3850,16 @@ struct igb_ring_desc_16_bytes {
> > > >  			return;
> > > >  		}
> > > >  		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
> > > > +
> > > > +		if (!tx_pkt_nb_min_segs &&
> > > > +		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 + 16))
> > > > +			tx_pkt_nb_min_segs = i + 1;
> > > >  	}
> > > > -	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
> > > > +
> > > > +	if (!tx_pkt_nb_min_segs) {
> > > >  		fprintf(stderr, "total packet length=%u < %d - give up\n",
> > > > -				(unsigned) tx_pkt_len,
> > > > -				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
> > > > +			(unsigned int) tx_pkt_len,
> > > > +			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
> > > >  		return;
> > > >  	}
> > > >
> > > > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > > > 6cbe9ba..c496e59 100644
> > > > --- a/app/test-pmd/testpmd.c
> > > > +++ b/app/test-pmd/testpmd.c
> > > > @@ -232,6 +232,11 @@ struct fwd_engine * fwd_engines[] = {  };
> > > > uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY
> > > > packets */
> > > >
> > > > +/**< Minimum number of segments in TXONLY packets to accommodate
> > > > +all packet
> > > > + * headers.
> > > > + */
> > > > +uint8_t  tx_pkt_nb_min_segs = 1;
> > > > +
> > > >  enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;  /**< Split
> > > > policy for packets to TX. */
> > > >
> > > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> > > > 16a3598..f5bc427 100644
> > > > --- a/app/test-pmd/testpmd.h
> > > > +++ b/app/test-pmd/testpmd.h
> > > > @@ -464,6 +464,11 @@ enum dcb_mode_enable  extern uint16_t
> > > > tx_pkt_length; /**< Length of TXONLY packet */  extern uint16_t
> > > > tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
> > > > extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX
> > > > packets */
> > > > +
> > > > +/**< Minimum number of segments in TXONLY packets to accommodate
> > > > +all packet
> > > > + * headers.
> > > > + */
> > > > +extern uint8_t  tx_pkt_nb_min_segs;
> > > >  extern uint32_t tx_pkt_times_intra;  extern uint32_t
> > > > tx_pkt_times_inter;
> > > >
> > > > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > > > aed820f..27e4458 100644
> > > > --- a/app/test-pmd/txonly.c
> > > > +++ b/app/test-pmd/txonly.c
> > > > @@ -195,8 +195,11 @@
> > > >  	uint32_t nb_segs, pkt_len;
> > > >  	uint8_t i;
> > > >
> > > > -	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> > > > -		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
> > > > +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
> > > > +	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
> > > > +		nb_segs = rte_rand() %
> > > > +			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
> > > > +			  tx_pkt_nb_min_segs;
> > > >  	else
> > > >  		nb_segs = tx_pkt_nb_segs;
> > > >
> > > > --
> > > > 1.8.3.1
Li, Xiaoyun Sept. 7, 2021, 8:05 a.m. UTC | #5
> -----Original Message-----
> From: Zhang, AlvinX <alvinx.zhang@intel.com>
> Sent: Tuesday, September 7, 2021 10:25
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> 
> > -----Original Message-----
> > From: Li, Xiaoyun <xiaoyun.li@intel.com>
> > Sent: Monday, September 6, 2021 6:55 PM
> > To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; stable@dpdk.org
> > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> >
> >
> >
> > > -----Original Message-----
> > > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > > Sent: Monday, September 6, 2021 18:04
> > > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > > <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; stable@dpdk.org
> > > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> > >
> > > > -----Original Message-----
> > > > From: Li, Xiaoyun <xiaoyun.li@intel.com>
> > > > Sent: Monday, September 6, 2021 4:59 PM
> > > > To: Zhang, AlvinX <alvinx.zhang@intel.com>; Ananyev, Konstantin
> > > > <konstantin.ananyev@intel.com>
> > > > Cc: dev@dpdk.org; stable@dpdk.org
> > > > Subject: RE: [PATCH] app/testpmd: fix random number of Tx segments
> > > >
> > > > Hi
> > > >
> > > > > -----Original Message-----
> > > > > From: Zhang, AlvinX <alvinx.zhang@intel.com>
> > > > > Sent: Thursday, September 2, 2021 16:20
> > > > > To: Li, Xiaoyun <xiaoyun.li@intel.com>; Ananyev, Konstantin
> > > > > <konstantin.ananyev@intel.com>
> > > > > Cc: dev@dpdk.org; Zhang, AlvinX <alvinx.zhang@intel.com>;
> > > > > stable@dpdk.org
> > > > > Subject: [PATCH] app/testpmd: fix random number of Tx segments
> > > > >
> > > > > When random number of segments in Tx packets is enabled, the
> > > > > total data space length of all segments must be greater or equal
> > > > > than the size of an Eth/IP/UDP/timestamp packet, that's total 14
> > > > > + 20 +
> > > > > 8 +
> > > > > 16 bytes. Otherwise the Tx engine may cause the application to crash.
> > > > >
> > > > > Bugzilla ID: 797
> > > > > Fixes: 79bec05b32b7 ("app/testpmd: add ability to split outgoing
> > > > > packets")
> > > > > Cc: stable@dpdk.org
> > > > >
> > > > > Signed-off-by: Alvin Zhang <alvinx.zhang@intel.com>
> > > > > ---
> > > > >  app/test-pmd/config.c  | 16 +++++++++++-----
> > > > > app/test-pmd/testpmd.c
> > > > > |  5
> > > > > +++++  app/test-pmd/testpmd.h |  5 +++++  app/test-pmd/txonly.c
> > > > > +++++ |
> > > > > +++++ 7
> > > > > +++++ +++++--
> > > > >  4 files changed, 26 insertions(+), 7 deletions(-)
> > > > >
> > > > > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > > > > 31d8ba1..5105b3b 100644
> > > > > --- a/app/test-pmd/config.c
> > > > > +++ b/app/test-pmd/config.c
> > > > > @@ -3837,10 +3837,11 @@ struct igb_ring_desc_16_bytes {
> > > > >  	 * Check that each segment length is greater or equal than
> > > > >  	 * the mbuf data size.
> > > > >  	 * Check also that the total packet length is greater or equal than the
> > > > > -	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
> > > > > -	 * 20 + 8).
> > > > > +	 * size of an Eth/IP/UDP + timestamp packet
> > > > > +	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).
> > > >
> > > > I don't really agree on this. Most of the time, txonly generate
> > > > packets with Eth/IP/UDP. It's not fair to limit the hdr length to
> > > > include
> > > timestamp in all cases.
> > > > And to be honest, I don't see why you need to add
> > > > "tx_pkt_nb_min_segs". It's only used in txonly when
> > > > "TX_PKT_SPLIT_RND". So this issue is because when
> > > > "TX_PKT_SPLIT_RND", the
> > > random nb_segs is not enough for the hdr.
> > > >
> > > > But if you read txonly carefully, if "TX_PKT_SPLIT_RND", the first
> > > > segment length should be equal or greater than 42 (14+20+8).
> > > > Because when "TX_PKT_SPLIT_RND", update_pkt_header() should be
> > > > called. And that function doesn't deal with header in multi-segments.
> > > > I think there's bug here.
> > > >
> > > > So I think you should only add a check in pkt_burst_prepare() in txonly().
> > > > 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) ||
> > > > txonly_multi_flow)
> > > > +                           if (tx_pkt_seg_lengths[0] < 42) {
> > > > +			err_log;
> > > > +			return false;
> > > > +		}
> > > > 		update_pkt_header(pkt, pkt_len);
> 
> 
> As above, If user have below configuration, there will no one packet be sent out
> and have lots and lots of repeated annoying logs.
> testpmd> set fwd txonly
> Set txonly packet forwarding mode
> testpmd> set txpkts 40,64
> testpmd> set txsplit rand
> testpmd> start
> txonly packet forwarding - ports=1 - cores=1 - streams=4 - NUMA support
> enabled, MP allocation mode: native Logical Core 2 (socket 0) forwards packets
> on 4 streams:
>   RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
>   RX P=0/Q=1 (socket 0) -> TX P=0/Q=1 (socket 0) peer=02:00:00:00:00:00
>   RX P=0/Q=2 (socket 0) -> TX P=0/Q=2 (socket 0) peer=02:00:00:00:00:00
>   RX P=0/Q=3 (socket 0) -> TX P=0/Q=3 (socket 0) peer=02:00:00:00:00:00
> 
>   txonly packet forwarding packets/burst=32
>   packet len=104 - nb packet segments=2
>   nb forwarding cores=1 - nb forwarding ports=1
>   port 0: RX queue number: 4 Tx queue number: 4
>     Rx offloads=0x0 Tx offloads=0x10000
>     RX queue: 0
>       RX desc=1024 - RX free threshold=32
>       RX threshold registers: pthresh=0 hthresh=0  wthresh=0
>       RX Offloads=0x0
>     TX queue: 0
>       TX desc=1024 - TX free threshold=32
>       TX threshold registers: pthresh=32 hthresh=0  wthresh=0
>       TX offloads=0x10000 - TX RS bit threshold=32 tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tetx_pkt_seg_lengths[0] must bigger than 41 bytes
> stpmd> tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes tx_pkt_seg_lengths[0] must
> bigger than 41 bytes tx_pkt_seg_lengths[0] must bigger than 41 bytes
> tx_pkt_seg_lengths[0] must bigger than 41 bytes ...
> ...
> testpmd> stop
> Telling cores to stop...
> Waiting for lcores to finish...
> 
>   ---------------------- Forward statistics for port 0  ----------------------
>   RX-packets: 0              RX-dropped: 0             RX-total: 0
>   TX-packets: 0              TX-dropped: 0             TX-total: 0
>   ----------------------------------------------------------------------------
> 
>   +++++++++++++++ Accumulated forward statistics for all
> ports+++++++++++++++
>   RX-packets: 0              RX-dropped: 0             RX-total: 0
>   TX-packets: 0              TX-dropped: 0             TX-total: 0
> 
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> +++++++++++
> 
> By the way, if multi flow was enabled, the ip header also will be updated, so we
> should put the check before below codes.
> 
> 	if (txonly_multi_flow) {
> 		uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> 		struct rte_ipv4_hdr *ip_hdr;
> 		uint32_t addr;
> 
> 		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> 				struct rte_ipv4_hdr *,
> 				sizeof(struct rte_ether_hdr));
> 		/*
> 		 * Generate multiple flows by varying IP src addr. This
> 		 * enables packets are well distributed by RSS in
> 		 * receiver side if any and txonly mode can be a decent
> 		 * packet generator for developer's quick performance
> 		 * regression test.
> 		 */
> 		addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id();
> 		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> 		RTE_PER_LCORE(_ip_var) = ip_var;
> 	}
> 

Yes. No matter where you put the check in txonly, there'll be annoying print.
Tricky.
I can only think of 2 solutions:
1. If needs to update hdr (ip or udp) and first seg len < 42, copy pkt_ip_hdr and pkt_udp_hdr to a temp value, update the temp value and use copy_buf_to_pkt() to copy the tmp hdr to segs.
And other cases keep the original code in case perf drop. Also, This one needs to keep your min_segs design.

The shortcoming is it's quite complicated change.

2. Force the first segment length is over 42 bytes in set_tx_pkt_segments()

This way is very simple but the shortcoming is the limit usage and you probably need to add a notice in doc to tell users this change.

> > >
> > > Yes, I didn't notice the updating for the UDP header, but the bug
> > > first occurs in this function copy_buf_to_pkt(&pkt_udp_hdr,
> > sizeof(pkt_udp_hdr), pkt,
> > > 			sizeof(struct rte_ether_hdr) +
> > > 			sizeof(struct rte_ipv4_hdr));
> > > not in update_pkt_header.
> > >
> > > Here we expecting users should set minimum 42 byte for first segment
> > > seems ok, But I think we putting the check in configuring the data
> > > space length of first segment is more graceful.
> >
> > No. You didn't get my point. It's not graceful at all.
> > The segment fault will only happen when "TX_PKT_SPLIT_RND". Because
> > the hdr may take 2 segs while random nb_segs is only 1.
> > But if it's not "TX_PKT_SPLIT_RND", the current set_tx_pkt_segments()
> > already make sure pkt_len is enough for 42.
> >
> > So the only case you need to deal with is "TX_PKT_SPLIT_RND". And
> > since "TX_PKT_SPLIT_RND" actually needs the first segment to be enough
> > to contain
> > 42 bytes.
> > And in cmd_set_txpkts_parsed, you may not get the configuration to
> > know if it will be configured as "TX_PKT_SPLIT_RND". That's why you
> > should check before update_pkt_header().
> >
> > In this way, when it's NOT "TX_PKT_SPLIT_RND", it will maintain the
> > old behavior that hdrs may cross several segs which makes more sense.
> >
> > >
> > > >
> > > > As for timestamp, maybe refer to "pkt_copy_split" in csumonly.c is
> > > > better? Copy the extra to the last segment if it's not enough. Not
> > > > sure how to deal with this issue better.
> > > >
> > > > >  	 */
> > > > >  	tx_pkt_len = 0;
> > > > > +	tx_pkt_nb_min_segs = 0;
> > > > >  	for (i = 0; i < nb_segs; i++) {
> > > > >  		if (seg_lengths[i] > mbuf_data_size[0]) {
> > > > >  			fprintf(stderr,
> > > > > @@ -3849,11 +3850,16 @@ struct igb_ring_desc_16_bytes {
> > > > >  			return;
> > > > >  		}
> > > > >  		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
> > > > > +
> > > > > +		if (!tx_pkt_nb_min_segs &&
> > > > > +		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 +
> 16))
> > > > > +			tx_pkt_nb_min_segs = i + 1;
> > > > >  	}
> > > > > -	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
> > > > > +
> > > > > +	if (!tx_pkt_nb_min_segs) {
> > > > >  		fprintf(stderr, "total packet length=%u < %d - give up\n",
> > > > > -				(unsigned) tx_pkt_len,
> > > > > -				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
> > > > > +			(unsigned int) tx_pkt_len,
> > > > > +			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
> > > > >  		return;
> > > > >  	}
> > > > >
> > > > > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> > > > > index
> > > > > 6cbe9ba..c496e59 100644
> > > > > --- a/app/test-pmd/testpmd.c
> > > > > +++ b/app/test-pmd/testpmd.c
> > > > > @@ -232,6 +232,11 @@ struct fwd_engine * fwd_engines[] = {  };
> > > > > uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY
> > > > > packets */
> > > > >
> > > > > +/**< Minimum number of segments in TXONLY packets to
> > > > > +accommodate all packet
> > > > > + * headers.
> > > > > + */
> > > > > +uint8_t  tx_pkt_nb_min_segs = 1;
> > > > > +
> > > > >  enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;  /**< Split
> > > > > policy for packets to TX. */
> > > > >
> > > > > diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> > > > > index
> > > > > 16a3598..f5bc427 100644
> > > > > --- a/app/test-pmd/testpmd.h
> > > > > +++ b/app/test-pmd/testpmd.h
> > > > > @@ -464,6 +464,11 @@ enum dcb_mode_enable  extern uint16_t
> > > > > tx_pkt_length; /**< Length of TXONLY packet */  extern uint16_t
> > > > > tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
> > > > > extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX
> > > > > packets */
> > > > > +
> > > > > +/**< Minimum number of segments in TXONLY packets to
> > > > > +accommodate all packet
> > > > > + * headers.
> > > > > + */
> > > > > +extern uint8_t  tx_pkt_nb_min_segs;
> > > > >  extern uint32_t tx_pkt_times_intra;  extern uint32_t
> > > > > tx_pkt_times_inter;
> > > > >
> > > > > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > > > > aed820f..27e4458 100644
> > > > > --- a/app/test-pmd/txonly.c
> > > > > +++ b/app/test-pmd/txonly.c
> > > > > @@ -195,8 +195,11 @@
> > > > >  	uint32_t nb_segs, pkt_len;
> > > > >  	uint8_t i;
> > > > >
> > > > > -	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> > > > > -		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
> > > > > +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
> > > > > +	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
> > > > > +		nb_segs = rte_rand() %
> > > > > +			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
> > > > > +			  tx_pkt_nb_min_segs;
> > > > >  	else
> > > > >  		nb_segs = tx_pkt_nb_segs;
> > > > >
> > > > > --
> > > > > 1.8.3.1
diff mbox series

Patch

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 31d8ba1..5105b3b 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -3837,10 +3837,11 @@  struct igb_ring_desc_16_bytes {
 	 * Check that each segment length is greater or equal than
 	 * the mbuf data size.
 	 * Check also that the total packet length is greater or equal than the
-	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
-	 * 20 + 8).
+	 * size of an Eth/IP/UDP + timestamp packet
+	 * (sizeof(struct rte_ether_hdr) + 20 + 8 + 16).
 	 */
 	tx_pkt_len = 0;
+	tx_pkt_nb_min_segs = 0;
 	for (i = 0; i < nb_segs; i++) {
 		if (seg_lengths[i] > mbuf_data_size[0]) {
 			fprintf(stderr,
@@ -3849,11 +3850,16 @@  struct igb_ring_desc_16_bytes {
 			return;
 		}
 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
+
+		if (!tx_pkt_nb_min_segs &&
+		    tx_pkt_len >= (sizeof(struct rte_ether_hdr) + 20 + 8 + 16))
+			tx_pkt_nb_min_segs = i + 1;
 	}
-	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
+
+	if (!tx_pkt_nb_min_segs) {
 		fprintf(stderr, "total packet length=%u < %d - give up\n",
-				(unsigned) tx_pkt_len,
-				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
+			(unsigned int) tx_pkt_len,
+			(int)(sizeof(struct rte_ether_hdr) + 20 + 8 + 16));
 		return;
 	}
 
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 6cbe9ba..c496e59 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -232,6 +232,11 @@  struct fwd_engine * fwd_engines[] = {
 };
 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
 
+/**< Minimum number of segments in TXONLY packets to accommodate all packet
+ * headers.
+ */
+uint8_t  tx_pkt_nb_min_segs = 1;
+
 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
 /**< Split policy for packets to TX. */
 
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 16a3598..f5bc427 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -464,6 +464,11 @@  enum dcb_mode_enable
 extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */
 extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
 extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX packets */
+
+/**< Minimum number of segments in TXONLY packets to accommodate all packet
+ * headers.
+ */
+extern uint8_t  tx_pkt_nb_min_segs;
 extern uint32_t tx_pkt_times_intra;
 extern uint32_t tx_pkt_times_inter;
 
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index aed820f..27e4458 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -195,8 +195,11 @@ 
 	uint32_t nb_segs, pkt_len;
 	uint8_t i;
 
-	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
-		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
+	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) &&
+	    tx_pkt_nb_segs > tx_pkt_nb_min_segs)
+		nb_segs = rte_rand() %
+			  (tx_pkt_nb_segs - tx_pkt_nb_min_segs + 1) +
+			  tx_pkt_nb_min_segs;
 	else
 		nb_segs = tx_pkt_nb_segs;