[v8,4/4] app/testpmd: support mulitiple mbuf pools per Rx queue

Message ID 20221007172921.3325250-5-andrew.rybchenko@oktetlabs.ru (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series ethdev: support mulitiple mbuf pools per Rx queue |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Andrew Rybchenko Oct. 7, 2022, 5:29 p.m. UTC
  From: Hanumanth Pothula <hpothula@marvell.com>

Some of the HW has support for choosing memory pools based on the
packet's size. The pool sort capability allows PMD/NIC to choose
a memory pool based on the packet's length.

On multiple mempool support enabled, populate mempool array and
also print pool name on which packet is received.

Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
---
 app/test-pmd/testpmd.c | 34 ++++++++++++++++++++++++----------
 app/test-pmd/testpmd.h |  3 +++
 app/test-pmd/util.c    |  4 ++--
 3 files changed, 29 insertions(+), 12 deletions(-)
  

Comments

Andrew Rybchenko Oct. 7, 2022, 7:43 p.m. UTC | #1
On 10/7/22 21:16, Hanumanth Reddy Pothula wrote:
> Thanks Andrew for helping me in merging the changes.
> 
> In below if condition, rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT will be always true as are not setting RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT.
> I think we need to do && operation.
> 
> if (rx_pkt_nb_segs <= 1 ||
>       (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
>           rx_conf->rx_seg = NULL;
>           rx_conf->rx_nseg = 0;
>           ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
>                                        nb_rx_desc, socket_id,
>                                        rx_conf, mp);
>           return ret;
>   }

That's exactly what I'm talking about in the cover letter.
I'm not sure in testpmd patch at all. So, I vote for postponing
testpmd part to rc2 stage.

> 
> Applied changes locally with && operation in above if condition and confirmed its working fine.
> 
> Regards,
> Hanumanth
> 
>> -----Original Message-----
>> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>> Sent: Friday, October 7, 2022 10:59 PM
>> To: Aman Singh <aman.deep.singh@intel.com>; Yuying Zhang
>> <yuying.zhang@intel.com>
>> Cc: dev@dpdk.org; Hanumanth Reddy Pothula <hpothula@marvell.com>
>> Subject: [EXT] [PATCH v8 4/4] app/testpmd: support mulitiple mbuf pools per Rx
>> queue
>>
>> External Email
>>
>> ----------------------------------------------------------------------
>> From: Hanumanth Pothula <hpothula@marvell.com>
>>
>> Some of the HW has support for choosing memory pools based on the packet's
>> size. The pool sort capability allows PMD/NIC to choose a memory pool based
>> on the packet's length.
>>
>> On multiple mempool support enabled, populate mempool array and also print
>> pool name on which packet is received.
>>
>> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
>> ---
>>   app/test-pmd/testpmd.c | 34 ++++++++++++++++++++++++----------
>>   app/test-pmd/testpmd.h |  3 +++
>>   app/test-pmd/util.c    |  4 ++--
>>   3 files changed, 29 insertions(+), 12 deletions(-)
>>
>> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
>> de6ad00138..2ce9953c76 100644
>> --- a/app/test-pmd/testpmd.c
>> +++ b/app/test-pmd/testpmd.c
>> @@ -2624,6 +2624,7 @@ rx_queue_setup(uint16_t port_id, uint16_t
>> rx_queue_id,
>>   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)  {
>>   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
>> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
>>   	unsigned int i, mp_n;
>>   	int ret;
>>
>> @@ -2645,16 +2646,29 @@ rx_queue_setup(uint16_t port_id, uint16_t
>> rx_queue_id,
>>   		 */
>>   		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
>>   		mpx = mbuf_pool_find(socket_id, mp_n);
>> -		/* Handle zero as mbuf data buffer size. */
>> -		rx_seg->length = rx_pkt_seg_lengths[i] ?
>> -				   rx_pkt_seg_lengths[i] :
>> -				   mbuf_data_size[mp_n];
>> -		rx_seg->offset = i < rx_pkt_nb_offs ?
>> -				   rx_pkt_seg_offsets[i] : 0;
>> -		rx_seg->mp = mpx ? mpx : mp;
>> -	}
>> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
>> -	rx_conf->rx_seg = rx_useg;
>> +		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
>> {
>> +			/**
>> +			 * On Segment length zero, update length as,
>> +			 *      buffer size - headroom size
>> +			 * to make sure enough space is accomidate for header.
>> +			 */
>> +			rx_seg->length = rx_pkt_seg_lengths[i] ?
>> +					 rx_pkt_seg_lengths[i] :
>> +					 mbuf_data_size[mp_n] -
>> RTE_PKTMBUF_HEADROOM;
>> +			rx_seg->offset = i < rx_pkt_nb_offs ?
>> +					 rx_pkt_seg_offsets[i] : 0;
>> +			rx_seg->mp = mpx ? mpx : mp;
>> +		} else {
>> +			rx_mempool[i] = mpx ? mpx : mp;
>> +		}
>> +	}
>> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
>> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
>> +		rx_conf->rx_seg = rx_useg;
>> +	} else {
>> +		rx_conf->rx_mempools = rx_mempool;
>> +		rx_conf->rx_nmempool = rx_pkt_nb_segs;
>> +	}
>>   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>>   				    socket_id, rx_conf, NULL);
>>   	rx_conf->rx_seg = NULL;
>> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
>> cbbc7cc350..2f50a10d1f 100644
>> --- a/app/test-pmd/testpmd.h
>> +++ b/app/test-pmd/testpmd.h
>> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
>>
>>   #define MIN_TOTAL_NUM_MBUFS 1024
>>
>> +/* Maximum number of pools supported per Rx queue */ #define
>> +MAX_MEMPOOL 8
>> +
>>   typedef uint8_t  lcoreid_t;
>>   typedef uint16_t portid_t;
>>   typedef uint16_t queueid_t;
>> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
>> fd98e8b51d..f9df5f69ef 100644
>> --- a/app/test-pmd/util.c
>> +++ b/app/test-pmd/util.c
>> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
>> struct rte_mbuf *pkts[],
>>   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>>   				 print_buf, buf_size, &cur_len);
>>   		MKDUMPSTR(print_buf, buf_size, cur_len,
>> -			  " - type=0x%04x - length=%u - nb_segs=%d",
>> -			  eth_type, (unsigned int) mb->pkt_len,
>> +			  " - pool=%s - type=0x%04x - length=%u -
>> nb_segs=%d",
>> +			  mb->pool->name, eth_type, (unsigned int) mb-
>>> pkt_len,
>>   			  (int)mb->nb_segs);
>>   		ol_flags = mb->ol_flags;
>>   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
>> --
>> 2.30.2
>
  
Hanumanth Pothula Oct. 7, 2022, 7:56 p.m. UTC | #2
> -----Original Message-----
> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Sent: Saturday, October 8, 2022 1:13 AM
> To: Hanumanth Reddy Pothula <hpothula@marvell.com>
> Cc: dev@dpdk.org; Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit
> <ferruh.yigit@amd.com>
> Subject: Re: [EXT] [PATCH v8 4/4] app/testpmd: support mulitiple mbuf pools per
> Rx queue
> 
> On 10/7/22 21:16, Hanumanth Reddy Pothula wrote:
> > Thanks Andrew for helping me in merging the changes.
> >
> > In below if condition, rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT will be always true as are not setting
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT.
> > I think we need to do && operation.
> >
> > if (rx_pkt_nb_segs <= 1 ||
> >       (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> >           rx_conf->rx_seg = NULL;
> >           rx_conf->rx_nseg = 0;
> >           ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
> >                                        nb_rx_desc, socket_id,
> >                                        rx_conf, mp);
> >           return ret;
> >   }
> 
> That's exactly what I'm talking about in the cover letter.
> I'm not sure in testpmd patch at all. So, I vote for postponing testpmd part to rc2
> stage.
Okay, as  you mentioned lets have testpmd maintainers review.
Thanks Andrew.
> 
> >
> > Applied changes locally with && operation in above if condition and confirmed
> its working fine.
> >
> > Regards,
> > Hanumanth
> >
> >> -----Original Message-----
> >> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> >> Sent: Friday, October 7, 2022 10:59 PM
> >> To: Aman Singh <aman.deep.singh@intel.com>; Yuying Zhang
> >> <yuying.zhang@intel.com>
> >> Cc: dev@dpdk.org; Hanumanth Reddy Pothula <hpothula@marvell.com>
> >> Subject: [EXT] [PATCH v8 4/4] app/testpmd: support mulitiple mbuf
> >> pools per Rx queue
> >>
> >> External Email
> >>
> >> ---------------------------------------------------------------------
> >> -
> >> From: Hanumanth Pothula <hpothula@marvell.com>
> >>
> >> Some of the HW has support for choosing memory pools based on the
> >> packet's size. The pool sort capability allows PMD/NIC to choose a
> >> memory pool based on the packet's length.
> >>
> >> On multiple mempool support enabled, populate mempool array and also
> >> print pool name on which packet is received.
> >>
> >> Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
> >> ---
> >>   app/test-pmd/testpmd.c | 34 ++++++++++++++++++++++++----------
> >>   app/test-pmd/testpmd.h |  3 +++
> >>   app/test-pmd/util.c    |  4 ++--
> >>   3 files changed, 29 insertions(+), 12 deletions(-)
> >>
> >> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> >> de6ad00138..2ce9953c76 100644
> >> --- a/app/test-pmd/testpmd.c
> >> +++ b/app/test-pmd/testpmd.c
> >> @@ -2624,6 +2624,7 @@ rx_queue_setup(uint16_t port_id, uint16_t
> >> rx_queue_id,
> >>   	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)  {
> >>   	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> >> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> >>   	unsigned int i, mp_n;
> >>   	int ret;
> >>
> >> @@ -2645,16 +2646,29 @@ rx_queue_setup(uint16_t port_id, uint16_t
> >> rx_queue_id,
> >>   		 */
> >>   		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> >>   		mpx = mbuf_pool_find(socket_id, mp_n);
> >> -		/* Handle zero as mbuf data buffer size. */
> >> -		rx_seg->length = rx_pkt_seg_lengths[i] ?
> >> -				   rx_pkt_seg_lengths[i] :
> >> -				   mbuf_data_size[mp_n];
> >> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> >> -				   rx_pkt_seg_offsets[i] : 0;
> >> -		rx_seg->mp = mpx ? mpx : mp;
> >> -	}
> >> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> >> -	rx_conf->rx_seg = rx_useg;
> >> +		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
> >> {
> >> +			/**
> >> +			 * On Segment length zero, update length as,
> >> +			 *      buffer size - headroom size
> >> +			 * to make sure enough space is accomidate for header.
> >> +			 */
> >> +			rx_seg->length = rx_pkt_seg_lengths[i] ?
> >> +					 rx_pkt_seg_lengths[i] :
> >> +					 mbuf_data_size[mp_n] -
> >> RTE_PKTMBUF_HEADROOM;
> >> +			rx_seg->offset = i < rx_pkt_nb_offs ?
> >> +					 rx_pkt_seg_offsets[i] : 0;
> >> +			rx_seg->mp = mpx ? mpx : mp;
> >> +		} else {
> >> +			rx_mempool[i] = mpx ? mpx : mp;
> >> +		}
> >> +	}
> >> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> >> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> >> +		rx_conf->rx_seg = rx_useg;
> >> +	} else {
> >> +		rx_conf->rx_mempools = rx_mempool;
> >> +		rx_conf->rx_nmempool = rx_pkt_nb_segs;
> >> +	}
> >>   	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
> >>   				    socket_id, rx_conf, NULL);
> >>   	rx_conf->rx_seg = NULL;
> >> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> >> cbbc7cc350..2f50a10d1f 100644
> >> --- a/app/test-pmd/testpmd.h
> >> +++ b/app/test-pmd/testpmd.h
> >> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> >>
> >>   #define MIN_TOTAL_NUM_MBUFS 1024
> >>
> >> +/* Maximum number of pools supported per Rx queue */ #define
> >> +MAX_MEMPOOL 8
> >> +
> >>   typedef uint8_t  lcoreid_t;
> >>   typedef uint16_t portid_t;
> >>   typedef uint16_t queueid_t;
> >> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> >> fd98e8b51d..f9df5f69ef 100644
> >> --- a/app/test-pmd/util.c
> >> +++ b/app/test-pmd/util.c
> >> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
> >> struct rte_mbuf *pkts[],
> >>   		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
> >>   				 print_buf, buf_size, &cur_len);
> >>   		MKDUMPSTR(print_buf, buf_size, cur_len,
> >> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> >> -			  eth_type, (unsigned int) mb->pkt_len,
> >> +			  " - pool=%s - type=0x%04x - length=%u -
> >> nb_segs=%d",
> >> +			  mb->pool->name, eth_type, (unsigned int) mb-
> >>> pkt_len,
> >>   			  (int)mb->nb_segs);
> >>   		ol_flags = mb->ol_flags;
> >>   		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
> >> --
> >> 2.30.2
> >
  

Patch

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index de6ad00138..2ce9953c76 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2624,6 +2624,7 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
 {
 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
 	unsigned int i, mp_n;
 	int ret;
 
@@ -2645,16 +2646,29 @@  rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		 */
 		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
 		mpx = mbuf_pool_find(socket_id, mp_n);
-		/* Handle zero as mbuf data buffer size. */
-		rx_seg->length = rx_pkt_seg_lengths[i] ?
-				   rx_pkt_seg_lengths[i] :
-				   mbuf_data_size[mp_n];
-		rx_seg->offset = i < rx_pkt_nb_offs ?
-				   rx_pkt_seg_offsets[i] : 0;
-		rx_seg->mp = mpx ? mpx : mp;
-	}
-	rx_conf->rx_nseg = rx_pkt_nb_segs;
-	rx_conf->rx_seg = rx_useg;
+		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+			/**
+			 * On Segment length zero, update length as,
+			 *      buffer size - headroom size
+			 * to make sure enough space is accomidate for header.
+			 */
+			rx_seg->length = rx_pkt_seg_lengths[i] ?
+					 rx_pkt_seg_lengths[i] :
+					 mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM;
+			rx_seg->offset = i < rx_pkt_nb_offs ?
+					 rx_pkt_seg_offsets[i] : 0;
+			rx_seg->mp = mpx ? mpx : mp;
+		} else {
+			rx_mempool[i] = mpx ? mpx : mp;
+		}
+	}
+	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+		rx_conf->rx_nseg = rx_pkt_nb_segs;
+		rx_conf->rx_seg = rx_useg;
+	} else {
+		rx_conf->rx_mempools = rx_mempool;
+		rx_conf->rx_nmempool = rx_pkt_nb_segs;
+	}
 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
 				    socket_id, rx_conf, NULL);
 	rx_conf->rx_seg = NULL;
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index cbbc7cc350..2f50a10d1f 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -80,6 +80,9 @@  extern uint8_t cl_quit;
 
 #define MIN_TOTAL_NUM_MBUFS 1024
 
+/* Maximum number of pools supported per Rx queue */
+#define MAX_MEMPOOL 8
+
 typedef uint8_t  lcoreid_t;
 typedef uint16_t portid_t;
 typedef uint16_t queueid_t;
diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
index fd98e8b51d..f9df5f69ef 100644
--- a/app/test-pmd/util.c
+++ b/app/test-pmd/util.c
@@ -150,8 +150,8 @@  dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
 		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
 				 print_buf, buf_size, &cur_len);
 		MKDUMPSTR(print_buf, buf_size, cur_len,
-			  " - type=0x%04x - length=%u - nb_segs=%d",
-			  eth_type, (unsigned int) mb->pkt_len,
+			  " - pool=%s - type=0x%04x - length=%u - nb_segs=%d",
+			  mb->pool->name, eth_type, (unsigned int) mb->pkt_len,
 			  (int)mb->nb_segs);
 		ol_flags = mb->ol_flags;
 		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {