[1/2] net/pcap: multiple queues fix

Message ID 1529163397-88330-1-git-send-email-ido@cgstowernetworks.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series [1/2] net/pcap: multiple queues fix |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Ido Goshen June 16, 2018, 3:36 p.m. UTC
  Change open_rx/tx_pcap/iface functions to open only a single pcap/dumper
and not loop num_of_queue times
The num_of_queue loop is already acheived by the caller rte_kvargs_process

Fixes:
1. Opens N requested pcaps/dumpers instead of N^2
2. Leak of pcap/dumper's which are being overwritten by
   the sequential calls to open_rx/tx_pcap/iface functions
3. Use the filename/iface args per queue and not just the last one
   that overwrites the previous names

Signed-off-by: ido goshen <ido@cgstowernetworks.com>
---
 drivers/net/pcap/rte_eth_pcap.c | 85 +++++++++++++++++------------------------
 1 file changed, 35 insertions(+), 50 deletions(-)
  

Comments

Ferruh Yigit June 18, 2018, 8:25 a.m. UTC | #1
On 6/16/2018 4:36 PM, ido goshen wrote:
> Change open_rx/tx_pcap/iface functions to open only a single pcap/dumper
> and not loop num_of_queue times
> The num_of_queue loop is already acheived by the caller rte_kvargs_process

You are right, thanks for fixing this, a few comments below.

> 
> Fixes:
> 1. Opens N requested pcaps/dumpers instead of N^2
> 2. Leak of pcap/dumper's which are being overwritten by
>    the sequential calls to open_rx/tx_pcap/iface functions
> 3. Use the filename/iface args per queue and not just the last one
>    that overwrites the previous names

Please add a "Fixes: xx" line, that is to trace initial commit the issue
introduced. More details in contribution guide.
Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too
and to help stable tree maintainers"

> 
> Signed-off-by: ido goshen <ido@cgstowernetworks.com>

<...>

> @@ -958,15 +950,8 @@ struct pmd_devargs {
>  	 * We check whether we want to open a RX stream from a real NIC or a
>  	 * pcap file
>  	 */
> -	pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
> -	if (pcaps.num_of_queue)
> -		is_rx_pcap = 1;
> -	else
> -		pcaps.num_of_queue = rte_kvargs_count(kvlist,
> -				ETH_PCAP_RX_IFACE_ARG);
> -
> -	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> -		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> +	is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
> +	pcaps.num_of_queue = 0;
>  
>  	if (is_rx_pcap)
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
> @@ -975,6 +960,10 @@ struct pmd_devargs {
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
>  				&open_rx_iface, &pcaps);
>  
> +	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> +		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;

Here is late for this check. You may be already access to rx->queue[],
tx->queue[] out of boundary at this point.

You should either check this value before rte_kvargs_process(), via
rte_kvargs_count(), OR you should add this check into callback functions.

>  	if (ret < 0)
>  		goto free_kvlist;
>  
> @@ -982,15 +971,8 @@ struct pmd_devargs {
>  	 * We check whether we want to open a TX stream to a real NIC or a
>  	 * pcap file
>  	 */
> -	dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
> -	if (dumpers.num_of_queue)
> -		is_tx_pcap = 1;
> -	else
> -		dumpers.num_of_queue = rte_kvargs_count(kvlist,
> -				ETH_PCAP_TX_IFACE_ARG);
> -
> -	if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> -		dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> +	is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;

Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything
preventing have a mixture of interface and pcap in multi queue case? With the
changes you are doing, I guess we can remove these checks and call following
sequentially:
rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..)
rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..)
What do you think?

But please be sure the fix and refactor patches are separate, so that fix patch
can be backported to stable trees. But refactor patches won't be backported.
  
Ido Goshen June 19, 2018, 9:45 a.m. UTC | #2
See Inline prefixed with [ido]

-----Original Message-----
From: Ferruh Yigit <ferruh.yigit@intel.com> 
Sent: Monday, June 18, 2018 11:25 AM
To: Ido Goshen <Ido@cgstowernetworks.com>
Cc: dev@dpdk.org
Subject: Re: [PATCH 1/2] net/pcap: multiple queues fix

On 6/16/2018 4:36 PM, ido goshen wrote:
> Change open_rx/tx_pcap/iface functions to open only a single 
> pcap/dumper and not loop num_of_queue times The num_of_queue loop is 
> already acheived by the caller rte_kvargs_process

You are right, thanks for fixing this, a few comments below.

> 
> Fixes:
> 1. Opens N requested pcaps/dumpers instead of N^2 2. Leak of 
> pcap/dumper's which are being overwritten by
>    the sequential calls to open_rx/tx_pcap/iface functions 3. Use the 
> filename/iface args per queue and not just the last one
>    that overwrites the previous names

Please add a "Fixes: xx" line, that is to trace initial commit the issue introduced. More details in contribution guide.
Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too and to help stable tree maintainers"
[ido] as far as I can trace back this is from day one (4c17330 pcap: add new driver), Would "Fixes: 4c17330" be ok?

> 
> Signed-off-by: ido goshen <ido@cgstowernetworks.com>

<...>

> @@ -958,15 +950,8 @@ struct pmd_devargs {
>  	 * We check whether we want to open a RX stream from a real NIC or a
>  	 * pcap file
>  	 */
> -	pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
> -	if (pcaps.num_of_queue)
> -		is_rx_pcap = 1;
> -	else
> -		pcaps.num_of_queue = rte_kvargs_count(kvlist,
> -				ETH_PCAP_RX_IFACE_ARG);
> -
> -	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> -		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> +	is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
> +	pcaps.num_of_queue = 0;
>  
>  	if (is_rx_pcap)
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, @@ -975,6 
> +960,10 @@ struct pmd_devargs {
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
>  				&open_rx_iface, &pcaps);
>  
> +	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> +		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;

Here is late for this check. You may be already access to rx->queue[],
tx->queue[] out of boundary at this point.

You should either check this value before rte_kvargs_process(), via rte_kvargs_count(), OR you should add this check into callback functions.
[ido] good catch  - will fix that

>  	if (ret < 0)
>  		goto free_kvlist;
>  
> @@ -982,15 +971,8 @@ struct pmd_devargs {
>  	 * We check whether we want to open a TX stream to a real NIC or a
>  	 * pcap file
>  	 */
> -	dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
> -	if (dumpers.num_of_queue)
> -		is_tx_pcap = 1;
> -	else
> -		dumpers.num_of_queue = rte_kvargs_count(kvlist,
> -				ETH_PCAP_TX_IFACE_ARG);
> -
> -	if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> -		dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> +	is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;

Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything preventing have a mixture of interface and pcap in multi queue case? With the changes you are doing, I guess we can remove these checks and call following
sequentially:
rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..) rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..) What do you think?
[ido] nice idea - will test if they can co-exist

But please be sure the fix and refactor patches are separate, so that fix patch can be backported to stable trees. But refactor patches won't be backported.
  
Ferruh Yigit June 19, 2018, 10 a.m. UTC | #3
On 6/19/2018 10:45 AM, Ido Goshen wrote:
> See Inline prefixed with [ido]
> 
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com> 
> Sent: Monday, June 18, 2018 11:25 AM
> To: Ido Goshen <Ido@cgstowernetworks.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH 1/2] net/pcap: multiple queues fix
> 
> On 6/16/2018 4:36 PM, ido goshen wrote:
>> Change open_rx/tx_pcap/iface functions to open only a single 
>> pcap/dumper and not loop num_of_queue times The num_of_queue loop is 
>> already acheived by the caller rte_kvargs_process
> 
> You are right, thanks for fixing this, a few comments below.
> 
>>
>> Fixes:
>> 1. Opens N requested pcaps/dumpers instead of N^2 2. Leak of 
>> pcap/dumper's which are being overwritten by
>>    the sequential calls to open_rx/tx_pcap/iface functions 3. Use the 
>> filename/iface args per queue and not just the last one
>>    that overwrites the previous names
> 
> Please add a "Fixes: xx" line, that is to trace initial commit the issue introduced. More details in contribution guide.
> Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too and to help stable tree maintainers"
> [ido] as far as I can trace back this is from day one (4c17330 pcap: add new driver), Would "Fixes: 4c17330" be ok?

As commit, it looks correct, thanks. For syntax we are using a git alias for
unified syntax [1], which makes output as [2].

[1]
git config alias.fixline "log -1 --abbrev=12 --format='Fixes: %h (\"%s\")%nCc: %ae'"

[2]
Fixes: 4c173302c307 ("pcap: add new driver")

> 
>>
>> Signed-off-by: ido goshen <ido@cgstowernetworks.com>
> 
> <...>
> 
>> @@ -958,15 +950,8 @@ struct pmd_devargs {
>>  	 * We check whether we want to open a RX stream from a real NIC or a
>>  	 * pcap file
>>  	 */
>> -	pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
>> -	if (pcaps.num_of_queue)
>> -		is_rx_pcap = 1;
>> -	else
>> -		pcaps.num_of_queue = rte_kvargs_count(kvlist,
>> -				ETH_PCAP_RX_IFACE_ARG);
>> -
>> -	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> -		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
>> +	is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
>> +	pcaps.num_of_queue = 0;
>>  
>>  	if (is_rx_pcap)
>>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, @@ -975,6 
>> +960,10 @@ struct pmd_devargs {
>>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
>>  				&open_rx_iface, &pcaps);
>>  
>> +	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> +		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> 
> Here is late for this check. You may be already access to rx->queue[],
> tx->queue[] out of boundary at this point.
> 
> You should either check this value before rte_kvargs_process(), via rte_kvargs_count(), OR you should add this check into callback functions.
> [ido] good catch  - will fix that
> 
>>  	if (ret < 0)
>>  		goto free_kvlist;
>>  
>> @@ -982,15 +971,8 @@ struct pmd_devargs {
>>  	 * We check whether we want to open a TX stream to a real NIC or a
>>  	 * pcap file
>>  	 */
>> -	dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
>> -	if (dumpers.num_of_queue)
>> -		is_tx_pcap = 1;
>> -	else
>> -		dumpers.num_of_queue = rte_kvargs_count(kvlist,
>> -				ETH_PCAP_TX_IFACE_ARG);
>> -
>> -	if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> -		dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
>> +	is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
> 
> Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything preventing have a mixture of interface and pcap in multi queue case? With the changes you are doing, I guess we can remove these checks and call following
> sequentially:
> rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..) rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..) What do you think?
> [ido] nice idea - will test if they can co-exist
> 
> But please be sure the fix and refactor patches are separate, so that fix patch can be backported to stable trees. But refactor patches won't be backported.
>
  

Patch

diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 6bd4a7d..444abbb 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -665,19 +665,17 @@  struct pmd_devargs {
 static int
 open_rx_pcap(const char *key, const char *value, void *extra_args)
 {
-	unsigned int i;
 	const char *pcap_filename = value;
 	struct pmd_devargs *rx = extra_args;
 	pcap_t *pcap = NULL;
 
-	for (i = 0; i < rx->num_of_queue; i++) {
-		if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
-			return -1;
+	if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+		return -1;
 
-		rx->queue[i].pcap = pcap;
-		rx->queue[i].name = pcap_filename;
-		rx->queue[i].type = key;
-	}
+	rx->queue[rx->num_of_queue].pcap = pcap;
+	rx->queue[rx->num_of_queue].name = pcap_filename;
+	rx->queue[rx->num_of_queue].type = key;
+	rx->num_of_queue++;
 
 	return 0;
 }
@@ -689,19 +687,17 @@  struct pmd_devargs {
 static int
 open_tx_pcap(const char *key, const char *value, void *extra_args)
 {
-	unsigned int i;
 	const char *pcap_filename = value;
 	struct pmd_devargs *dumpers = extra_args;
 	pcap_dumper_t *dumper;
 
-	for (i = 0; i < dumpers->num_of_queue; i++) {
-		if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
-			return -1;
+	if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+		return -1;
 
-		dumpers->queue[i].dumper = dumper;
-		dumpers->queue[i].name = pcap_filename;
-		dumpers->queue[i].type = key;
-	}
+	dumpers->queue[dumpers->num_of_queue].dumper = dumper;
+	dumpers->queue[dumpers->num_of_queue].name = pcap_filename;
+	dumpers->queue[dumpers->num_of_queue].type = key;
+	dumpers->num_of_queue++;
 
 	return 0;
 }
@@ -732,18 +728,16 @@  struct pmd_devargs {
 static inline int
 open_rx_iface(const char *key, const char *value, void *extra_args)
 {
-	unsigned int i;
 	const char *iface = value;
 	struct pmd_devargs *rx = extra_args;
 	pcap_t *pcap = NULL;
 
-	for (i = 0; i < rx->num_of_queue; i++) {
-		if (open_single_iface(iface, &pcap) < 0)
-			return -1;
-		rx->queue[i].pcap = pcap;
-		rx->queue[i].name = iface;
-		rx->queue[i].type = key;
-	}
+	if (open_single_iface(iface, &pcap) < 0)
+		return -1;
+	rx->queue[rx->num_of_queue].pcap = pcap;
+	rx->queue[rx->num_of_queue].name = iface;
+	rx->queue[rx->num_of_queue].type = key;
+	rx->num_of_queue++;
 
 	return 0;
 }
@@ -754,18 +748,16 @@  struct pmd_devargs {
 static int
 open_tx_iface(const char *key, const char *value, void *extra_args)
 {
-	unsigned int i;
 	const char *iface = value;
 	struct pmd_devargs *tx = extra_args;
 	pcap_t *pcap;
 
-	for (i = 0; i < tx->num_of_queue; i++) {
-		if (open_single_iface(iface, &pcap) < 0)
-			return -1;
-		tx->queue[i].pcap = pcap;
-		tx->queue[i].name = iface;
-		tx->queue[i].type = key;
-	}
+	if (open_single_iface(iface, &pcap) < 0)
+		return -1;
+	tx->queue[tx->num_of_queue].pcap = pcap;
+	tx->queue[tx->num_of_queue].name = iface;
+	tx->queue[tx->num_of_queue].type = key;
+	tx->num_of_queue++;
 
 	return 0;
 }
@@ -958,15 +950,8 @@  struct pmd_devargs {
 	 * We check whether we want to open a RX stream from a real NIC or a
 	 * pcap file
 	 */
-	pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
-	if (pcaps.num_of_queue)
-		is_rx_pcap = 1;
-	else
-		pcaps.num_of_queue = rte_kvargs_count(kvlist,
-				ETH_PCAP_RX_IFACE_ARG);
-
-	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
-		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+	is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+	pcaps.num_of_queue = 0;
 
 	if (is_rx_pcap)
 		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
@@ -975,6 +960,10 @@  struct pmd_devargs {
 		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
 				&open_rx_iface, &pcaps);
 
+	if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+		pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+
 	if (ret < 0)
 		goto free_kvlist;
 
@@ -982,15 +971,8 @@  struct pmd_devargs {
 	 * We check whether we want to open a TX stream to a real NIC or a
 	 * pcap file
 	 */
-	dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
-	if (dumpers.num_of_queue)
-		is_tx_pcap = 1;
-	else
-		dumpers.num_of_queue = rte_kvargs_count(kvlist,
-				ETH_PCAP_TX_IFACE_ARG);
-
-	if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
-		dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+	is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+	dumpers.num_of_queue = 0;
 
 	if (is_tx_pcap)
 		ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
@@ -999,6 +981,9 @@  struct pmd_devargs {
 		ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
 				&open_tx_iface, &dumpers);
 
+	if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+		dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
 	if (ret < 0)
 		goto free_kvlist;