app/testpmd: fix segment number check

Message ID 1607699265-5238-1-git-send-email-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series app/testpmd: fix segment number check |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional fail Functional Testing issues
ci/iol-testing success Testing PASS
ci/iol-intel-Functional fail Functional Testing issues
ci/iol-intel-Performance success Performance Testing PASS
ci/travis-robot warning Travis build: failed

Commit Message

Slava Ovsiienko Dec. 11, 2020, 3:07 p.m. UTC
  The --txpkts command line parameter was silently ignored due to
application was unable to check the Tx queue ring sizes for non
configured ports [1].

The "set txpkts <len0[,len1]*>" was also rejected if there
was some stopped or /unconfigured port.

This provides the following:

  - number of segment check is performed against
    configured Tx queues only

  - the capability to send single packet is supposed to
    be very basic and always supported, the setting segment
    number to 1 is always allowed, no check performed

  - at the moment of Tx queue setup the descriptor number is
    checked against configured segment number

Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments set")
Cc: stable@dpdk.org
Bugzilla ID: 584

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 app/test-pmd/cmdline.c |  5 +++++
 app/test-pmd/config.c  | 21 ++++++++++++++++-----
 app/test-pmd/testpmd.c |  7 ++++++-
 3 files changed, 27 insertions(+), 6 deletions(-)
  

Comments

Andrew Boyer Dec. 11, 2020, 4 p.m. UTC | #1
> On Dec 11, 2020, at 10:07 AM, Viacheslav Ovsiienko <viacheslavo@nvidia.com> wrote:
> 
> The --txpkts command line parameter was silently ignored due to
> application was unable to check the Tx queue ring sizes for non
> configured ports [1].

... ignored because the application...

> The "set txpkts <len0[,len1]*>" was also rejected if there
> was some stopped or /unconfigured port.

... was a stopped or unconfigured ...

> 
> This provides the following:
> 
>  - number of segment check is performed against
>    configured Tx queues only
> 
>  - the capability to send single packet is supposed to
>    be very basic and always supported, the setting segment
>    number to 1 is always allowed, no check performed
> 
>  - at the moment of Tx queue setup the descriptor number is
>    checked against configured segment number
> 
> Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments set")
> Cc: stable@dpdk.org
> Bugzilla ID: 584
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
> app/test-pmd/cmdline.c |  5 +++++
> app/test-pmd/config.c  | 21 ++++++++++++++++-----
> app/test-pmd/testpmd.c |  7 ++++++-
> 3 files changed, 27 insertions(+), 6 deletions(-)
> 
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index 0d2d6aa..86388a2 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -2798,6 +2798,11 @@ struct cmd_setup_rxtx_queue {
> 		if (!numa_support || socket_id == NUMA_NO_CONFIG)
> 			socket_id = port->socket_id;
> 
> +		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
> +			printf("Failed to setup TX queue: "

setup -> set up
I find it helpful when the numbers are logged in the error message.  Like “nb_desc 8 < nb_segs 16”.

> +			       "not enough descriptors\n");
> +			return;
> +		}

Why is there a relationship between the number of descriptors and the number of segments? For our device, there isn’t. We can send 16 Tx segments per descriptor and (I suppose) you could try to create an 8 descriptor ring.

Maybe this is to protect a simpler device that consumes one descriptor per segment? If so, the check would ideally be conditioned on a related device capability flag. I’m not sure if there is such a flag today.

> 		ret = rte_eth_tx_queue_setup(res->portid,
> 					     res->qid,
> 					     port->nb_tx_desc[res->qid],
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index b51de59..a6fccfa 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -3911,12 +3911,18 @@ struct igb_ring_desc_16_bytes {
> 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
> 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
> 
> -			if (ret)
> +			/* Do the check only for the active/configured ports. */
> +			if (ret == -EINVAL)
> +				continue;
> +			if (ret) {
> +				printf("failed to get ring size for TX "
> +				       "queue(%u) Port(%u) - txpkts ignored\n",
> +				       port_id, queue_id);
> 				return true;
> -
> +			}
> 			if (ring_size < nb_segs) {
> -				printf("nb segments per TX packets=%u >= "
> -				       "TX queue(%u) ring_size=%u - ignored\n",
> +				printf("nb segments per TX packets=%u >= TX "
> +				       "queue(%u) ring_size=%u - txpkts ignored\n",
> 				       nb_segs, queue_id, ring_size);
> 				return true;
> 			}
> @@ -3932,7 +3938,12 @@ struct igb_ring_desc_16_bytes {
> 	uint16_t tx_pkt_len;
> 	unsigned int i;
> 
> -	if (nb_segs_is_invalid(nb_segs))
> +	/*
> +	 * For single sengment settings failed check is ignored.
> +	 * It is a very basic capability to send the single segment
> +	 * packets, suppose it is always supported.

sengment -> segment
... to send single segment...
suppose -> assume

> +	 */
> +	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs))
> 		return;
> 
> 	/*
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 33fc0fd..9ea0145 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2575,6 +2575,11 @@ struct extmem_param {
> 			port->need_reconfig_queues = 0;
> 			/* setup tx queues */
> 			for (qi = 0; qi < nb_txq; qi++) {
> +				if (port->nb_tx_desc[qi] < tx_pkt_nb_segs) {
> +					printf("Failed to setup TX queue: "
> +					       "not enough descriptors\n");

Same comments as above

> +					goto fail;
> +				}
> 				if ((numa_support) &&
> 					(txring_numa[pi] != NUMA_NO_CONFIG))
> 					diag = rte_eth_tx_queue_setup(pi, qi,
> @@ -2589,7 +2594,7 @@ struct extmem_param {
> 
> 				if (diag == 0)
> 					continue;
> -
> +fail:
> 				/* Fail to setup tx queue, return */
> 				if (rte_atomic16_cmpset(&(port->port_status),
> 							RTE_PORT_HANDLING,
> -- 
> 1.8.3.1
>
  
Slava Ovsiienko Dec. 11, 2020, 4:14 p.m. UTC | #2
Hi, Andrew

Thank you for the review, please, see below.

> -----Original Message-----
> From: Andrew Boyer <aboyer@pensando.io>
> Sent: Friday, December 11, 2020 18:00
> To: Slava Ovsiienko <viacheslavo@nvidia.com>
> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon <thomas@monjalon.net>;
> ferruh.yigit@intel.com; stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] app/testpmd: fix segment number check
> 
> 
> 
> > On Dec 11, 2020, at 10:07 AM, Viacheslav Ovsiienko
> <viacheslavo@nvidia.com> wrote:
> >
> > The --txpkts command line parameter was silently ignored due to
> > application was unable to check the Tx queue ring sizes for non
> > configured ports [1].
> 
> ... ignored because the application...
OK, will fix.

> 
> > The "set txpkts <len0[,len1]*>" was also rejected if there was some
> > stopped or /unconfigured port.
> 
> ... was a stopped or unconfigured ...
OK, will fix.

> 
> >
> > This provides the following:
> >
> >  - number of segment check is performed against
> >    configured Tx queues only
> >
> >  - the capability to send single packet is supposed to
> >    be very basic and always supported, the setting segment
> >    number to 1 is always allowed, no check performed
> >
> >  - at the moment of Tx queue setup the descriptor number is
> >    checked against configured segment number
> >
> > Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments
> > set")
> > Cc: stable@dpdk.org
> > Bugzilla ID: 584
> >
> > Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> > ---
> > app/test-pmd/cmdline.c |  5 +++++
> > app/test-pmd/config.c  | 21 ++++++++++++++++-----
> > app/test-pmd/testpmd.c |  7 ++++++-
> > 3 files changed, 27 insertions(+), 6 deletions(-)
> >
> > diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> > 0d2d6aa..86388a2 100644
> > --- a/app/test-pmd/cmdline.c
> > +++ b/app/test-pmd/cmdline.c
> > @@ -2798,6 +2798,11 @@ struct cmd_setup_rxtx_queue {
> > 		if (!numa_support || socket_id == NUMA_NO_CONFIG)
> > 			socket_id = port->socket_id;
> >
> > +		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
> > +			printf("Failed to setup TX queue: "
> 
> setup -> set up
Disagree, it is quite common in testpmd code to use "setup" wording,
 I just copy-pasted the message from the neighbor lines.

> I find it helpful when the numbers are logged in the error message.  Like
> “nb_desc 8 < nb_segs 16”.
> 
> > +			       "not enough descriptors\n");
> > +			return;
> > +		}
> 
Do you think it is worth to be informative so much? OK, will add.

> Why is there a relationship between the number of descriptors and the
> number of segments? For our device, there isn’t. We can send 16 Tx segments
> per descriptor and (I suppose) you could try to create an 8 descriptor ring.
> 
> Maybe this is to protect a simpler device that consumes one descriptor per
> segment? If so, the check would ideally be conditioned on a related device
> capability flag. I’m not sure if there is such a flag today.
There is no correlation between  n_desc and n_seg for Tx in mlx5 PMD either.
And there is no information provided how many descriptors should be
provided for the multi-segment packets.

If we have a look at original commit being fixed
("app/testpmd: remove restriction on Tx segments set") we'll see:

-       if (nb_segs >= (unsigned) nb_txd) {
-               printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
-                      nb_segs, (unsigned int) nb_txd);

So, the check was added in replacement for other, more strict, check.
Now we are just improving one a little bit.


> 
> > 		ret = rte_eth_tx_queue_setup(res->portid,
> > 					     res->qid,
> > 					     port->nb_tx_desc[res->qid],
> > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
> > b51de59..a6fccfa 100644
> > --- a/app/test-pmd/config.c
> > +++ b/app/test-pmd/config.c
> > @@ -3911,12 +3911,18 @@ struct igb_ring_desc_16_bytes {
> > 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
> > 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
> >
> > -			if (ret)
> > +			/* Do the check only for the active/configured ports.
> */
> > +			if (ret == -EINVAL)
> > +				continue;
> > +			if (ret) {
> > +				printf("failed to get ring size for TX "
> > +				       "queue(%u) Port(%u) - txpkts ignored\n",
> > +				       port_id, queue_id);
> > 				return true;
> > -
> > +			}
> > 			if (ring_size < nb_segs) {
> > -				printf("nb segments per TX packets=%u >= "
> > -				       "TX queue(%u) ring_size=%u - ignored\n",
> > +				printf("nb segments per TX packets=%u >= TX
> "
> > +				       "queue(%u) ring_size=%u - txpkts
> ignored\n",
> > 				       nb_segs, queue_id, ring_size);
> > 				return true;
> > 			}
> > @@ -3932,7 +3938,12 @@ struct igb_ring_desc_16_bytes {
> > 	uint16_t tx_pkt_len;
> > 	unsigned int i;
> >
> > -	if (nb_segs_is_invalid(nb_segs))
> > +	/*
> > +	 * For single sengment settings failed check is ignored.
> > +	 * It is a very basic capability to send the single segment
> > +	 * packets, suppose it is always supported.
> 
> sengment -> segment
> ... to send single segment...
> suppose -> assume
OK, np, will fix.

> 
> > +	 */
> > +	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs))
> > 		return;
> >
> > 	/*
> > diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> > 33fc0fd..9ea0145 100644
> > --- a/app/test-pmd/testpmd.c
> > +++ b/app/test-pmd/testpmd.c
> > @@ -2575,6 +2575,11 @@ struct extmem_param {
> > 			port->need_reconfig_queues = 0;
> > 			/* setup tx queues */
> > 			for (qi = 0; qi < nb_txq; qi++) {
> > +				if (port->nb_tx_desc[qi] < tx_pkt_nb_segs) {
> > +					printf("Failed to setup TX queue: "
> > +					       "not enough descriptors\n");
> 
> Same comments as above
OK.

> 
> > +					goto fail;
> > +				}
> > 				if ((numa_support) &&
> > 					(txring_numa[pi] !=
> NUMA_NO_CONFIG))
> > 					diag = rte_eth_tx_queue_setup(pi, qi,
> @@ -2589,7 +2594,7 @@
> > struct extmem_param {
> >
> > 				if (diag == 0)
> > 					continue;
> > -
> > +fail:
> > 				/* Fail to setup tx queue, return */
> > 				if (rte_atomic16_cmpset(&(port-
> >port_status),
> >
> 	RTE_PORT_HANDLING,
> > --
> > 1.8.3.1
> >

Thanks a lot, I will wait for a while for more comments and provide v2.

With best regards, Slava
  
Ferruh Yigit Dec. 16, 2020, 12:12 p.m. UTC | #3
On 12/11/2020 4:14 PM, Slava Ovsiienko wrote:
> Hi, Andrew
> 
> Thank you for the review, please, see below.
> 
>> -----Original Message-----
>> From: Andrew Boyer <aboyer@pensando.io>
>> Sent: Friday, December 11, 2020 18:00
>> To: Slava Ovsiienko <viacheslavo@nvidia.com>
>> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon <thomas@monjalon.net>;
>> ferruh.yigit@intel.com; stable@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] app/testpmd: fix segment number check
>>
>>
>>
>>> On Dec 11, 2020, at 10:07 AM, Viacheslav Ovsiienko
>> <viacheslavo@nvidia.com> wrote:
>>>
>>> The --txpkts command line parameter was silently ignored due to
>>> application was unable to check the Tx queue ring sizes for non
>>> configured ports [1].
>>
>> ... ignored because the application...
> OK, will fix.
> 
>>
>>> The "set txpkts <len0[,len1]*>" was also rejected if there was some
>>> stopped or /unconfigured port.
>>
>> ... was a stopped or unconfigured ...
> OK, will fix.
> 
>>
>>>
>>> This provides the following:
>>>
>>>   - number of segment check is performed against
>>>     configured Tx queues only
>>>
>>>   - the capability to send single packet is supposed to
>>>     be very basic and always supported, the setting segment
>>>     number to 1 is always allowed, no check performed
>>>
>>>   - at the moment of Tx queue setup the descriptor number is
>>>     checked against configured segment number
>>>
>>> Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments
>>> set")
>>> Cc: stable@dpdk.org
>>> Bugzilla ID: 584
>>>
>>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>> ---
>>> app/test-pmd/cmdline.c |  5 +++++
>>> app/test-pmd/config.c  | 21 ++++++++++++++++-----
>>> app/test-pmd/testpmd.c |  7 ++++++-
>>> 3 files changed, 27 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
>>> 0d2d6aa..86388a2 100644
>>> --- a/app/test-pmd/cmdline.c
>>> +++ b/app/test-pmd/cmdline.c
>>> @@ -2798,6 +2798,11 @@ struct cmd_setup_rxtx_queue {
>>> 		if (!numa_support || socket_id == NUMA_NO_CONFIG)
>>> 			socket_id = port->socket_id;
>>>
>>> +		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
>>> +			printf("Failed to setup TX queue: "
>>
>> setup -> set up
> Disagree, it is quite common in testpmd code to use "setup" wording,
>   I just copy-pasted the message from the neighbor lines.
> 
>> I find it helpful when the numbers are logged in the error message.  Like
>> “nb_desc 8 < nb_segs 16”.
>>
>>> +			       "not enough descriptors\n");
>>> +			return;
>>> +		}
>>
> Do you think it is worth to be informative so much? OK, will add.
> 
>> Why is there a relationship between the number of descriptors and the
>> number of segments? For our device, there isn’t. We can send 16 Tx segments
>> per descriptor and (I suppose) you could try to create an 8 descriptor ring.
>>
>> Maybe this is to protect a simpler device that consumes one descriptor per
>> segment? If so, the check would ideally be conditioned on a related device
>> capability flag. I’m not sure if there is such a flag today.
> There is no correlation between  n_desc and n_seg for Tx in mlx5 PMD either.
> And there is no information provided how many descriptors should be
> provided for the multi-segment packets.
> 
> If we have a look at original commit being fixed
> ("app/testpmd: remove restriction on Tx segments set") we'll see:
> 
> -       if (nb_segs >= (unsigned) nb_txd) {
> -               printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
> -                      nb_segs, (unsigned int) nb_txd);
> 
> So, the check was added in replacement for other, more strict, check.
> Now we are just improving one a little bit.
> 

Many devices use a descriptor per segment, and if there is no enough free 
descriptor to fit all segments they won't able to send the packet, I guess this 
check is to cover them.

Out of curiosity, is your device has 16 buffer address fields in the descriptor, 
can they be utilized to send multiple independent packets in single descriptor?


> 
>>
>>> 		ret = rte_eth_tx_queue_setup(res->portid,
>>> 					     res->qid,
>>> 					     port->nb_tx_desc[res->qid],
>>> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index
>>> b51de59..a6fccfa 100644
>>> --- a/app/test-pmd/config.c
>>> +++ b/app/test-pmd/config.c
>>> @@ -3911,12 +3911,18 @@ struct igb_ring_desc_16_bytes {
>>> 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
>>> 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
>>>
>>> -			if (ret)
>>> +			/* Do the check only for the active/configured ports.
>> */
>>> +			if (ret == -EINVAL)
>>> +				continue;
>>> +			if (ret) {
>>> +				printf("failed to get ring size for TX "
>>> +				       "queue(%u) Port(%u) - txpkts ignored\n",
>>> +				       port_id, queue_id);
>>> 				return true;
>>> -
>>> +			}
>>> 			if (ring_size < nb_segs) {
>>> -				printf("nb segments per TX packets=%u >= "
>>> -				       "TX queue(%u) ring_size=%u - ignored\n",
>>> +				printf("nb segments per TX packets=%u >= TX
>> "
>>> +				       "queue(%u) ring_size=%u - txpkts
>> ignored\n",
>>> 				       nb_segs, queue_id, ring_size);
>>> 				return true;
>>> 			}
>>> @@ -3932,7 +3938,12 @@ struct igb_ring_desc_16_bytes {
>>> 	uint16_t tx_pkt_len;
>>> 	unsigned int i;
>>>
>>> -	if (nb_segs_is_invalid(nb_segs))
>>> +	/*
>>> +	 * For single sengment settings failed check is ignored.
>>> +	 * It is a very basic capability to send the single segment
>>> +	 * packets, suppose it is always supported.
>>
>> sengment -> segment
>> ... to send single segment...
>> suppose -> assume
> OK, np, will fix.
> 
>>
>>> +	 */
>>> +	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs))
>>> 		return;
>>>
>>> 	/*
>>> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
>>> 33fc0fd..9ea0145 100644
>>> --- a/app/test-pmd/testpmd.c
>>> +++ b/app/test-pmd/testpmd.c
>>> @@ -2575,6 +2575,11 @@ struct extmem_param {
>>> 			port->need_reconfig_queues = 0;
>>> 			/* setup tx queues */
>>> 			for (qi = 0; qi < nb_txq; qi++) {
>>> +				if (port->nb_tx_desc[qi] < tx_pkt_nb_segs) {
>>> +					printf("Failed to setup TX queue: "
>>> +					       "not enough descriptors\n");
>>
>> Same comments as above
> OK.
> 
>>
>>> +					goto fail;
>>> +				}
>>> 				if ((numa_support) &&
>>> 					(txring_numa[pi] !=
>> NUMA_NO_CONFIG))
>>> 					diag = rte_eth_tx_queue_setup(pi, qi,
>> @@ -2589,7 +2594,7 @@
>>> struct extmem_param {
>>>
>>> 				if (diag == 0)
>>> 					continue;
>>> -
>>> +fail:
>>> 				/* Fail to setup tx queue, return */
>>> 				if (rte_atomic16_cmpset(&(port-
>>> port_status),
>>>
>> 	RTE_PORT_HANDLING,
>>> --
>>> 1.8.3.1
>>>
> 
> Thanks a lot, I will wait for a while for more comments and provide v2.
> 
> With best regards, Slava
>
  
Slava Ovsiienko Dec. 16, 2020, 12:33 p.m. UTC | #4
Hi, Ferruh

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Wednesday, December 16, 2020 14:12
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Andrew Boyer
> <aboyer@pensando.io>
> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon <thomas@monjalon.net>;
> stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] app/testpmd: fix segment number check
> 
> On 12/11/2020 4:14 PM, Slava Ovsiienko wrote:
> > Hi, Andrew
> >
> > Thank you for the review, please, see below.
> >
> >> -----Original Message-----
> >> From: Andrew Boyer <aboyer@pensando.io>
> >> Sent: Friday, December 11, 2020 18:00
> >> To: Slava Ovsiienko <viacheslavo@nvidia.com>
> >> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon
> <thomas@monjalon.net>;
> >> ferruh.yigit@intel.com; stable@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH] app/testpmd: fix segment number check
> >>
> >>
> >>
> >>> On Dec 11, 2020, at 10:07 AM, Viacheslav Ovsiienko
> >> <viacheslavo@nvidia.com> wrote:
> >>>
> >>> The --txpkts command line parameter was silently ignored due to
> >>> application was unable to check the Tx queue ring sizes for non
> >>> configured ports [1].
> >>
> >> ... ignored because the application...
> > OK, will fix.
> >
> >>
> >>> The "set txpkts <len0[,len1]*>" was also rejected if there was some
> >>> stopped or /unconfigured port.
> >>
> >> ... was a stopped or unconfigured ...
> > OK, will fix.
> >
> >>
> >>>
> >>> This provides the following:
> >>>
> >>>   - number of segment check is performed against
> >>>     configured Tx queues only
> >>>
> >>>   - the capability to send single packet is supposed to
> >>>     be very basic and always supported, the setting segment
> >>>     number to 1 is always allowed, no check performed
> >>>
> >>>   - at the moment of Tx queue setup the descriptor number is
> >>>     checked against configured segment number
> >>>
> >>> Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments
> >>> set")
> >>> Cc: stable@dpdk.org
> >>> Bugzilla ID: 584
> >>>
> >>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >>> ---
> >>> app/test-pmd/cmdline.c |  5 +++++
> >>> app/test-pmd/config.c  | 21 ++++++++++++++++-----
> >>> app/test-pmd/testpmd.c |  7 ++++++-
> >>> 3 files changed, 27 insertions(+), 6 deletions(-)
> >>>
> >>> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> >>> 0d2d6aa..86388a2 100644
> >>> --- a/app/test-pmd/cmdline.c
> >>> +++ b/app/test-pmd/cmdline.c
> >>> @@ -2798,6 +2798,11 @@ struct cmd_setup_rxtx_queue {
> >>> 		if (!numa_support || socket_id == NUMA_NO_CONFIG)
> >>> 			socket_id = port->socket_id;
> >>>
> >>> +		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
> >>> +			printf("Failed to setup TX queue: "
> >>
> >> setup -> set up
> > Disagree, it is quite common in testpmd code to use "setup" wording,
> >   I just copy-pasted the message from the neighbor lines.
> >
> >> I find it helpful when the numbers are logged in the error message.
> >> Like “nb_desc 8 < nb_segs 16”.
> >>
> >>> +			       "not enough descriptors\n");
> >>> +			return;
> >>> +		}
> >>
> > Do you think it is worth to be informative so much? OK, will add.
> >
> >> Why is there a relationship between the number of descriptors and the
> >> number of segments? For our device, there isn’t. We can send 16 Tx
> >> segments per descriptor and (I suppose) you could try to create an 8
> descriptor ring.
> >>
> >> Maybe this is to protect a simpler device that consumes one
> >> descriptor per segment? If so, the check would ideally be conditioned
> >> on a related device capability flag. I’m not sure if there is such a flag today.
> > There is no correlation between  n_desc and n_seg for Tx in mlx5 PMD either.
> > And there is no information provided how many descriptors should be
> > provided for the multi-segment packets.
> >
> > If we have a look at original commit being fixed
> > ("app/testpmd: remove restriction on Tx segments set") we'll see:
> >
> > -       if (nb_segs >= (unsigned) nb_txd) {
> > -               printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
> > -                      nb_segs, (unsigned int) nb_txd);
> >
> > So, the check was added in replacement for other, more strict, check.
> > Now we are just improving one a little bit.
> >
> 
> Many devices use a descriptor per segment, and if there is no enough free
> descriptor to fit all segments they won't able to send the packet, I guess this
> check is to cover them.
> 
> Out of curiosity, is your device has 16 buffer address fields in the descriptor,
> can they be utilized to send multiple independent packets in single descriptor?
> 
Regarding mlx5 - there is no strong correspondence between WQE (HW desc) and
mbufs. The ConnectX-5+ supports various method of placing data to the descriptors -
by direct data inline or by pointers. In average, with engaged MPW (multipacket-write)
feature we can put up to 4 mbuf pointers into one WQE. WQEs can be combined to
handle 16-or-even-more-mbufs-chain packets. Hence, check for descriptors being discussed 
is still relevant for mlx5 disregarding it is just evaluative.

With best regards, Slava

[snip]
  
Ferruh Yigit Dec. 16, 2020, 12:36 p.m. UTC | #5
On 12/11/2020 3:07 PM, Viacheslav Ovsiienko wrote:
> The --txpkts command line parameter was silently ignored due to
> application was unable to check the Tx queue ring sizes for non
> configured ports [1].
> 
> The "set txpkts <len0[,len1]*>" was also rejected if there
> was some stopped or /unconfigured port.
> 

May not be for the commit log but to understand the problem here,
what are the steps to reproduce the problem in "set txpkts" command?

> This provides the following:
> 
>    - number of segment check is performed against
>      configured Tx queues only
> 
>    - the capability to send single packet is supposed to
>      be very basic and always supported, the setting segment
>      number to 1 is always allowed, no check performed
> 
>    - at the moment of Tx queue setup the descriptor number is
>      checked against configured segment number

Not sure about this one, more comments below.

> 
> Fixes: 8dae835d88b7 ("app/testpmd: remove restriction on Tx segments set")
> Cc: stable@dpdk.org
> Bugzilla ID: 584
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>   app/test-pmd/cmdline.c |  5 +++++
>   app/test-pmd/config.c  | 21 ++++++++++++++++-----
>   app/test-pmd/testpmd.c |  7 ++++++-
>   3 files changed, 27 insertions(+), 6 deletions(-)
> 
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index 0d2d6aa..86388a2 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -2798,6 +2798,11 @@ struct cmd_setup_rxtx_queue {
>   		if (!numa_support || socket_id == NUMA_NO_CONFIG)
>   			socket_id = port->socket_id;
>   
> +		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
> +			printf("Failed to setup TX queue: "
> +			       "not enough descriptors\n");
> +			return;
> +		}

The "port->nb_tx_desc[res->qid]" can be '0', and that is the default value in 
the testpmd, in that case device suggested values are used. Above check fails 
when '--txd' arguments is not provided.
Same problem with the same check below in 'start_port()', I think both can be 
removed.

>   		ret = rte_eth_tx_queue_setup(res->portid,
>   					     res->qid,
>   					     port->nb_tx_desc[res->qid],
> diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
> index b51de59..a6fccfa 100644
> --- a/app/test-pmd/config.c
> +++ b/app/test-pmd/config.c
> @@ -3911,12 +3911,18 @@ struct igb_ring_desc_16_bytes {
>   		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
>   			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
>   
> -			if (ret)
> +			/* Do the check only for the active/configured ports. */
> +			if (ret == -EINVAL)
> +				continue;
> +			if (ret) {
> +				printf("failed to get ring size for TX "
> +				       "queue(%u) Port(%u) - txpkts ignored\n",
> +				       port_id, queue_id);
>   				return true;

Is there a need to filter the '-EINVAL' errors only, the other error this 
function can get is '-EINVAL' & '-ENODEV' (which is 'port_id' is not valid), to 
simplify the logic, what do you think just ignore any kind of error and not fail 
in that case?

> -
> +			}
>   			if (ring_size < nb_segs) {
> -				printf("nb segments per TX packets=%u >= "
> -				       "TX queue(%u) ring_size=%u - ignored\n",
> +				printf("nb segments per TX packets=%u >= TX "
> +				       "queue(%u) ring_size=%u - txpkts ignored\n",
>   				       nb_segs, queue_id, ring_size);
>   				return true;
>   			}
> @@ -3932,7 +3938,12 @@ struct igb_ring_desc_16_bytes {
>   	uint16_t tx_pkt_len;
>   	unsigned int i;
>   
> -	if (nb_segs_is_invalid(nb_segs))
> +	/*
> +	 * For single sengment settings failed check is ignored.
> +	 * It is a very basic capability to send the single segment
> +	 * packets, suppose it is always supported.
> +	 */
> +	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs))
>   		return;

if the user provided '--txd' in the command line, we can compare the segment 
size with that without going into the device configured values, as similar to 
what is done before:

At the very beginning of the 'get_tx_ring_size()':

if (nb_txd) {
   *ring_size = nb_txd;
   return 0;
}

So following combination of parameters will be supported

"--txpkts=X,Y --txd=Z"  (segment size checked against nb_txd)
"--txpkts=N "           (single segment, no check)
"--txpkts=X,Y"          (segment size not checked)

And setting same in the command line always should be supported with segment 
size checks against the
- nb_txd when '--txd=Z' provided
- dynamic device provided values when '--txd=Z' not provided

>   
>   	/*
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 33fc0fd..9ea0145 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2575,6 +2575,11 @@ struct extmem_param {
>   			port->need_reconfig_queues = 0;
>   			/* setup tx queues */
>   			for (qi = 0; qi < nb_txq; qi++) {
> +				if (port->nb_tx_desc[qi] < tx_pkt_nb_segs) {
> +					printf("Failed to setup TX queue: "
> +					       "not enough descriptors\n");
> +					goto fail;
> +				}

Please check above comment.

>   				if ((numa_support) &&
>   					(txring_numa[pi] != NUMA_NO_CONFIG))
>   					diag = rte_eth_tx_queue_setup(pi, qi,
> @@ -2589,7 +2594,7 @@ struct extmem_param {
>   
>   				if (diag == 0)
>   					continue;
> -
> +fail:
>   				/* Fail to setup tx queue, return */
>   				if (rte_atomic16_cmpset(&(port->port_status),
>   							RTE_PORT_HANDLING,
>
  

Patch

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0d2d6aa..86388a2 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -2798,6 +2798,11 @@  struct cmd_setup_rxtx_queue {
 		if (!numa_support || socket_id == NUMA_NO_CONFIG)
 			socket_id = port->socket_id;
 
+		if (port->nb_tx_desc[res->qid] < tx_pkt_nb_segs) {
+			printf("Failed to setup TX queue: "
+			       "not enough descriptors\n");
+			return;
+		}
 		ret = rte_eth_tx_queue_setup(res->portid,
 					     res->qid,
 					     port->nb_tx_desc[res->qid],
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index b51de59..a6fccfa 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -3911,12 +3911,18 @@  struct igb_ring_desc_16_bytes {
 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
 
-			if (ret)
+			/* Do the check only for the active/configured ports. */
+			if (ret == -EINVAL)
+				continue;
+			if (ret) {
+				printf("failed to get ring size for TX "
+				       "queue(%u) Port(%u) - txpkts ignored\n",
+				       port_id, queue_id);
 				return true;
-
+			}
 			if (ring_size < nb_segs) {
-				printf("nb segments per TX packets=%u >= "
-				       "TX queue(%u) ring_size=%u - ignored\n",
+				printf("nb segments per TX packets=%u >= TX "
+				       "queue(%u) ring_size=%u - txpkts ignored\n",
 				       nb_segs, queue_id, ring_size);
 				return true;
 			}
@@ -3932,7 +3938,12 @@  struct igb_ring_desc_16_bytes {
 	uint16_t tx_pkt_len;
 	unsigned int i;
 
-	if (nb_segs_is_invalid(nb_segs))
+	/*
+	 * For single sengment settings failed check is ignored.
+	 * It is a very basic capability to send the single segment
+	 * packets, suppose it is always supported.
+	 */
+	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs))
 		return;
 
 	/*
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 33fc0fd..9ea0145 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2575,6 +2575,11 @@  struct extmem_param {
 			port->need_reconfig_queues = 0;
 			/* setup tx queues */
 			for (qi = 0; qi < nb_txq; qi++) {
+				if (port->nb_tx_desc[qi] < tx_pkt_nb_segs) {
+					printf("Failed to setup TX queue: "
+					       "not enough descriptors\n");
+					goto fail;
+				}
 				if ((numa_support) &&
 					(txring_numa[pi] != NUMA_NO_CONFIG))
 					diag = rte_eth_tx_queue_setup(pi, qi,
@@ -2589,7 +2594,7 @@  struct extmem_param {
 
 				if (diag == 0)
 					continue;
-
+fail:
 				/* Fail to setup tx queue, return */
 				if (rte_atomic16_cmpset(&(port->port_status),
 							RTE_PORT_HANDLING,