[v6,3/4] app/testpmd: move pkt prepare logic into a separate function

Message ID 20190402095255.848-3-pbhagavatula@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series [v6,1/4] app/testpmd: move eth header generation outside the loop |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Pavan Nikhilesh Bhagavatula April 2, 2019, 9:53 a.m. UTC
  From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Move the packet prepare logic into a separate function so that it
can be reused later.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 app/test-pmd/txonly.c | 163 +++++++++++++++++++++---------------------
 1 file changed, 83 insertions(+), 80 deletions(-)
  

Comments

Lin, Xueqin April 9, 2019, 9:28 a.m. UTC | #1
Hi NIkhilesh,

This patchset impacts some of 19.05 rc1 txonly/burst tests on Intel NIC. If set txonly fwd, IXIA or tester peer can't receive packets that sent from app generated. 
This is high issue, block some cases test. Detailed information as below, need you to check it soon.

*DPDK version:  19.05.0-rc1
*NIC hardware: Fortville_eagle/Fortville_spirit/Niantic
Environment: one NIC port connect with another NIC port, or one NIC port connect with IXIA

Test Setup
1. Bind port to igb_uio or vfio
2. On DUT, setup testpmd:
    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1e -n 4  -- -i --rxq=4 --txq=4 --port-topology=loop
3. Set txonly forward, start testpmd
testpmd>set fwd txonly
testpmd>start
4. Dump packets from tester NIC port or IXIA, find no packets were received on the PORT0.
tcpdump -i <tester_interface> -v

Best regards,
Xueqin

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
> Bhagavatula
> Sent: Tuesday, April 2, 2019 5:54 PM
> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> thomas@monjalon.net; arybchenko@solarflare.com; Yigit, Ferruh
> <ferruh.yigit@intel.com>; Iremonger, Bernard
> <bernard.iremonger@intel.com>; alialnu@mellanox.com
> Cc: dev@dpdk.org; Pavan Nikhilesh Bhagavatula
> <pbhagavatula@marvell.com>
> Subject: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare logic
> into a separate function
> 
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> 
> Move the packet prepare logic into a separate function so that it can be
> reused later.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  app/test-pmd/txonly.c | 163 +++++++++++++++++++++---------------------
>  1 file changed, 83 insertions(+), 80 deletions(-)
> 
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> 65171c1d1..56ca0ad24 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -148,6 +148,80 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
>  	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;  }
> 
> +static inline bool
> +pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
> +		struct ether_hdr *eth_hdr, const uint16_t vlan_tci,
> +		const uint16_t vlan_tci_outer, const uint64_t ol_flags) {
> +	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
> +	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> +	struct rte_mbuf *pkt_seg;
> +	uint32_t nb_segs, pkt_len;
> +	uint8_t i;
> +
> +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> +		nb_segs = random() % tx_pkt_nb_segs + 1;
> +	else
> +		nb_segs = tx_pkt_nb_segs;
> +
> +	if (nb_segs > 1) {
> +		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs))
> +			return false;
> +	}
> +
> +	rte_pktmbuf_reset_headroom(pkt);
> +	pkt->data_len = tx_pkt_seg_lengths[0];
> +	pkt->ol_flags = ol_flags;
> +	pkt->vlan_tci = vlan_tci;
> +	pkt->vlan_tci_outer = vlan_tci_outer;
> +	pkt->l2_len = sizeof(struct ether_hdr);
> +	pkt->l3_len = sizeof(struct ipv4_hdr);
> +
> +	pkt_len = pkt->data_len;
> +	pkt_seg = pkt;
> +	for (i = 1; i < nb_segs; i++) {
> +		pkt_seg->next = pkt_segs[i - 1];
> +		pkt_seg = pkt_seg->next;
> +		pkt_seg->data_len = tx_pkt_seg_lengths[i];
> +		pkt_len += pkt_seg->data_len;
> +	}
> +	pkt_seg->next = NULL; /* Last segment of packet. */
> +	/*
> +	 * Copy headers in first packet segment(s).
> +	 */
> +	copy_buf_to_pkt(eth_hdr, sizeof(eth_hdr), pkt, 0);
> +	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
> +			sizeof(struct ether_hdr));
> +	if (txonly_multi_flow) {
> +		struct ipv4_hdr *ip_hdr;
> +		uint32_t addr;
> +
> +		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> +				struct ipv4_hdr *,
> +				sizeof(struct ether_hdr));
> +		/*
> +		 * Generate multiple flows by varying IP src addr. This
> +		 * enables packets are well distributed by RSS in
> +		 * receiver side if any and txonly mode can be a decent
> +		 * packet generator for developer's quick performance
> +		 * regression test.
> +		 */
> +		addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
> +		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> +	}
> +	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> +			sizeof(struct ether_hdr) +
> +			sizeof(struct ipv4_hdr));
> +	/*
> +	 * Complete first mbuf of packet and append it to the
> +	 * burst of packets to be transmitted.
> +	 */
> +	pkt->nb_segs = nb_segs;
> +	pkt->pkt_len = pkt_len;
> +
> +	return true;
> +}
> +
>  /*
>   * Transmit a burst of multi-segments packets.
>   */
> @@ -155,10 +229,8 @@ static void
>  pkt_burst_transmit(struct fwd_stream *fs)  {
>  	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
> -	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
>  	struct rte_port *txp;
>  	struct rte_mbuf *pkt;
> -	struct rte_mbuf *pkt_seg;
>  	struct rte_mempool *mbp;
>  	struct ether_hdr eth_hdr;
>  	uint16_t nb_tx;
> @@ -166,15 +238,12 @@ pkt_burst_transmit(struct fwd_stream *fs)
>  	uint16_t vlan_tci, vlan_tci_outer;
>  	uint32_t retry;
>  	uint64_t ol_flags = 0;
> -	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> -	uint8_t  i;
>  	uint64_t tx_offloads;
>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
>  	uint64_t start_tsc;
>  	uint64_t end_tsc;
>  	uint64_t core_cycles;
>  #endif
> -	uint32_t nb_segs, pkt_len;
> 
>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
>  	start_tsc = rte_rdtsc();
> @@ -201,85 +270,19 @@ pkt_burst_transmit(struct fwd_stream *fs)
> 
>  	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
>  		pkt = rte_mbuf_raw_alloc(mbp);
> -		if (pkt == NULL) {
> -		nomore_mbuf:
> -			if (nb_pkt == 0)
> -				return;
> +		if (pkt == NULL)
> +			break;
> +		if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr, vlan_tci,
> +						vlan_tci_outer, ol_flags))) {
> +			rte_pktmbuf_free(pkt);
>  			break;
>  		}
> -
> -		/*
> -		 * Using raw alloc is good to improve performance,
> -		 * but some consumers may use the headroom and so
> -		 * decrement data_off. We need to make sure it is
> -		 * reset to default value.
> -		 */
> -		rte_pktmbuf_reset_headroom(pkt);
> -		pkt->data_len = tx_pkt_seg_lengths[0];
> -		pkt_seg = pkt;
> -
> -		if (tx_pkt_split == TX_PKT_SPLIT_RND)
> -			nb_segs = random() % tx_pkt_nb_segs + 1;
> -		else
> -			nb_segs = tx_pkt_nb_segs;
> -
> -		if (nb_segs > 1) {
> -			if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
> -							nb_segs)) {
> -				rte_pktmbuf_free(pkt);
> -				goto nomore_mbuf;
> -			}
> -		}
> -
> -		pkt_len = pkt->data_len;
> -		for (i = 1; i < nb_segs; i++) {
> -			pkt_seg->next = pkt_segs[i - 1];
> -			pkt_seg = pkt_seg->next;
> -			pkt_seg->data_len = tx_pkt_seg_lengths[i];
> -			pkt_len += pkt_seg->data_len;
> -		}
> -		pkt_seg->next = NULL; /* Last segment of packet. */
> -
> -		/*
> -		 * Copy headers in first packet segment(s).
> -		 */
> -		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
> -		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
> -				sizeof(struct ether_hdr));
> -		if (txonly_multi_flow) {
> -			struct ipv4_hdr *ip_hdr;
> -			uint32_t addr;
> -
> -			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> -					struct ipv4_hdr *,
> -					sizeof(struct ether_hdr));
> -			/*
> -			 * Generate multiple flows by varying IP src addr. This
> -			 * enables packets are well distributed by RSS in
> -			 * receiver side if any and txonly mode can be a
> decent
> -			 * packet generator for developer's quick
> performance
> -			 * regression test.
> -			 */
> -			addr = (IP_DST_ADDR | (ip_var++ << 8)) +
> rte_lcore_id();
> -			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> -		}
> -		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> -				sizeof(struct ether_hdr) +
> -				sizeof(struct ipv4_hdr));
> -
> -		/*
> -		 * Complete first mbuf of packet and append it to the
> -		 * burst of packets to be transmitted.
> -		 */
> -		pkt->nb_segs = nb_segs;
> -		pkt->pkt_len = pkt_len;
> -		pkt->ol_flags = ol_flags;
> -		pkt->vlan_tci = vlan_tci;
> -		pkt->vlan_tci_outer = vlan_tci_outer;
> -		pkt->l2_len = sizeof(struct ether_hdr);
> -		pkt->l3_len = sizeof(struct ipv4_hdr);
>  		pkts_burst[nb_pkt] = pkt;
>  	}
> +
> +	if (nb_pkt == 0)
> +		return;
> +
>  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> nb_pkt);
>  	/*
>  	 * Retry if necessary
> --
> 2.21.0
  
Pavan Nikhilesh Bhagavatula April 9, 2019, 9:32 a.m. UTC | #2
Hi Lin,

Can you check if the following patch fixes the issue?
http://patches.dpdk.org/patch/52395/

I wasn't able to catch this earlier.

Regards,
Pavan.

>-----Original Message-----
>From: Lin, Xueqin <xueqin.lin@intel.com>
>Sent: Tuesday, April 9, 2019 2:58 PM
>To: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>; Yigit, Ferruh
><ferruh.yigit@intel.com>
>Cc: dev@dpdk.org; Xu, Qian Q <qian.q.xu@intel.com>; Li, WenjieX A
><wenjiex.a.li@intel.com>; Wang, FengqinX <fengqinx.wang@intel.com>; Yao,
>Lei A <lei.a.yao@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jerin Jacob
>Kollanukkaran <jerinj@marvell.com>; thomas@monjalon.net;
>arybchenko@solarflare.com; Iremonger, Bernard
><bernard.iremonger@intel.com>; alialnu@mellanox.com; Zhang, Qi Z
><qi.z.zhang@intel.com>
>Subject: [EXT] RE: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare
>logic into a separate function
>
>External Email
>
>----------------------------------------------------------------------
>Hi NIkhilesh,
>
>This patchset impacts some of 19.05 rc1 txonly/burst tests on Intel NIC. If set
>txonly fwd, IXIA or tester peer can't receive packets that sent from app
>generated.
>This is high issue, block some cases test. Detailed information as below, need
>you to check it soon.
>
>*DPDK version:  19.05.0-rc1
>*NIC hardware: Fortville_eagle/Fortville_spirit/Niantic
>Environment: one NIC port connect with another NIC port, or one NIC port
>connect with IXIA
>
>Test Setup
>1. Bind port to igb_uio or vfio
>2. On DUT, setup testpmd:
>    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1e -n 4  -- -i --rxq=4 --txq=4 --
>port-topology=loop 3. Set txonly forward, start testpmd
>testpmd>set fwd txonly
>testpmd>start
>4. Dump packets from tester NIC port or IXIA, find no packets were received on
>the PORT0.
>tcpdump -i <tester_interface> -v
>
>Best regards,
>Xueqin
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
>> Bhagavatula
>> Sent: Tuesday, April 2, 2019 5:54 PM
>> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
>> thomas@monjalon.net; arybchenko@solarflare.com; Yigit, Ferruh
>> <ferruh.yigit@intel.com>; Iremonger, Bernard
>> <bernard.iremonger@intel.com>; alialnu@mellanox.com
>> Cc: dev@dpdk.org; Pavan Nikhilesh Bhagavatula
>> <pbhagavatula@marvell.com>
>> Subject: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare logic
>> into a separate function
>>
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Move the packet prepare logic into a separate function so that it can
>> be reused later.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> ---
>>  app/test-pmd/txonly.c | 163
>> +++++++++++++++++++++---------------------
>>  1 file changed, 83 insertions(+), 80 deletions(-)
>>
>> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
>> 65171c1d1..56ca0ad24 100644
>> --- a/app/test-pmd/txonly.c
>> +++ b/app/test-pmd/txonly.c
>> @@ -148,6 +148,80 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
>>  	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;  }
>>
>> +static inline bool
>> +pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
>> +		struct ether_hdr *eth_hdr, const uint16_t vlan_tci,
>> +		const uint16_t vlan_tci_outer, const uint64_t ol_flags) {
>> +	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
>> +	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
>> +	struct rte_mbuf *pkt_seg;
>> +	uint32_t nb_segs, pkt_len;
>> +	uint8_t i;
>> +
>> +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
>> +		nb_segs = random() % tx_pkt_nb_segs + 1;
>> +	else
>> +		nb_segs = tx_pkt_nb_segs;
>> +
>> +	if (nb_segs > 1) {
>> +		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs))
>> +			return false;
>> +	}
>> +
>> +	rte_pktmbuf_reset_headroom(pkt);
>> +	pkt->data_len = tx_pkt_seg_lengths[0];
>> +	pkt->ol_flags = ol_flags;
>> +	pkt->vlan_tci = vlan_tci;
>> +	pkt->vlan_tci_outer = vlan_tci_outer;
>> +	pkt->l2_len = sizeof(struct ether_hdr);
>> +	pkt->l3_len = sizeof(struct ipv4_hdr);
>> +
>> +	pkt_len = pkt->data_len;
>> +	pkt_seg = pkt;
>> +	for (i = 1; i < nb_segs; i++) {
>> +		pkt_seg->next = pkt_segs[i - 1];
>> +		pkt_seg = pkt_seg->next;
>> +		pkt_seg->data_len = tx_pkt_seg_lengths[i];
>> +		pkt_len += pkt_seg->data_len;
>> +	}
>> +	pkt_seg->next = NULL; /* Last segment of packet. */
>> +	/*
>> +	 * Copy headers in first packet segment(s).
>> +	 */
>> +	copy_buf_to_pkt(eth_hdr, sizeof(eth_hdr), pkt, 0);
>> +	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
>> +			sizeof(struct ether_hdr));
>> +	if (txonly_multi_flow) {
>> +		struct ipv4_hdr *ip_hdr;
>> +		uint32_t addr;
>> +
>> +		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
>> +				struct ipv4_hdr *,
>> +				sizeof(struct ether_hdr));
>> +		/*
>> +		 * Generate multiple flows by varying IP src addr. This
>> +		 * enables packets are well distributed by RSS in
>> +		 * receiver side if any and txonly mode can be a decent
>> +		 * packet generator for developer's quick performance
>> +		 * regression test.
>> +		 */
>> +		addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
>> +		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
>> +	}
>> +	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
>> +			sizeof(struct ether_hdr) +
>> +			sizeof(struct ipv4_hdr));
>> +	/*
>> +	 * Complete first mbuf of packet and append it to the
>> +	 * burst of packets to be transmitted.
>> +	 */
>> +	pkt->nb_segs = nb_segs;
>> +	pkt->pkt_len = pkt_len;
>> +
>> +	return true;
>> +}
>> +
>>  /*
>>   * Transmit a burst of multi-segments packets.
>>   */
>> @@ -155,10 +229,8 @@ static void
>>  pkt_burst_transmit(struct fwd_stream *fs)  {
>>  	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
>> -	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
>>  	struct rte_port *txp;
>>  	struct rte_mbuf *pkt;
>> -	struct rte_mbuf *pkt_seg;
>>  	struct rte_mempool *mbp;
>>  	struct ether_hdr eth_hdr;
>>  	uint16_t nb_tx;
>> @@ -166,15 +238,12 @@ pkt_burst_transmit(struct fwd_stream *fs)
>>  	uint16_t vlan_tci, vlan_tci_outer;
>>  	uint32_t retry;
>>  	uint64_t ol_flags = 0;
>> -	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
>> -	uint8_t  i;
>>  	uint64_t tx_offloads;
>>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
>>  	uint64_t start_tsc;
>>  	uint64_t end_tsc;
>>  	uint64_t core_cycles;
>>  #endif
>> -	uint32_t nb_segs, pkt_len;
>>
>>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
>>  	start_tsc = rte_rdtsc();
>> @@ -201,85 +270,19 @@ pkt_burst_transmit(struct fwd_stream *fs)
>>
>>  	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
>>  		pkt = rte_mbuf_raw_alloc(mbp);
>> -		if (pkt == NULL) {
>> -		nomore_mbuf:
>> -			if (nb_pkt == 0)
>> -				return;
>> +		if (pkt == NULL)
>> +			break;
>> +		if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr, vlan_tci,
>> +						vlan_tci_outer, ol_flags))) {
>> +			rte_pktmbuf_free(pkt);
>>  			break;
>>  		}
>> -
>> -		/*
>> -		 * Using raw alloc is good to improve performance,
>> -		 * but some consumers may use the headroom and so
>> -		 * decrement data_off. We need to make sure it is
>> -		 * reset to default value.
>> -		 */
>> -		rte_pktmbuf_reset_headroom(pkt);
>> -		pkt->data_len = tx_pkt_seg_lengths[0];
>> -		pkt_seg = pkt;
>> -
>> -		if (tx_pkt_split == TX_PKT_SPLIT_RND)
>> -			nb_segs = random() % tx_pkt_nb_segs + 1;
>> -		else
>> -			nb_segs = tx_pkt_nb_segs;
>> -
>> -		if (nb_segs > 1) {
>> -			if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
>> -							nb_segs)) {
>> -				rte_pktmbuf_free(pkt);
>> -				goto nomore_mbuf;
>> -			}
>> -		}
>> -
>> -		pkt_len = pkt->data_len;
>> -		for (i = 1; i < nb_segs; i++) {
>> -			pkt_seg->next = pkt_segs[i - 1];
>> -			pkt_seg = pkt_seg->next;
>> -			pkt_seg->data_len = tx_pkt_seg_lengths[i];
>> -			pkt_len += pkt_seg->data_len;
>> -		}
>> -		pkt_seg->next = NULL; /* Last segment of packet. */
>> -
>> -		/*
>> -		 * Copy headers in first packet segment(s).
>> -		 */
>> -		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
>> -		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
>> -				sizeof(struct ether_hdr));
>> -		if (txonly_multi_flow) {
>> -			struct ipv4_hdr *ip_hdr;
>> -			uint32_t addr;
>> -
>> -			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
>> -					struct ipv4_hdr *,
>> -					sizeof(struct ether_hdr));
>> -			/*
>> -			 * Generate multiple flows by varying IP src addr. This
>> -			 * enables packets are well distributed by RSS in
>> -			 * receiver side if any and txonly mode can be a
>> decent
>> -			 * packet generator for developer's quick
>> performance
>> -			 * regression test.
>> -			 */
>> -			addr = (IP_DST_ADDR | (ip_var++ << 8)) +
>> rte_lcore_id();
>> -			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
>> -		}
>> -		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
>> -				sizeof(struct ether_hdr) +
>> -				sizeof(struct ipv4_hdr));
>> -
>> -		/*
>> -		 * Complete first mbuf of packet and append it to the
>> -		 * burst of packets to be transmitted.
>> -		 */
>> -		pkt->nb_segs = nb_segs;
>> -		pkt->pkt_len = pkt_len;
>> -		pkt->ol_flags = ol_flags;
>> -		pkt->vlan_tci = vlan_tci;
>> -		pkt->vlan_tci_outer = vlan_tci_outer;
>> -		pkt->l2_len = sizeof(struct ether_hdr);
>> -		pkt->l3_len = sizeof(struct ipv4_hdr);
>>  		pkts_burst[nb_pkt] = pkt;
>>  	}
>> +
>> +	if (nb_pkt == 0)
>> +		return;
>> +
>>  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
>> nb_pkt);
>>  	/*
>>  	 * Retry if necessary
>> --
>> 2.21.0
  
Yao, Lei A April 9, 2019, 12:24 p.m. UTC | #3
> -----Original Message-----
> From: Pavan Nikhilesh Bhagavatula [mailto:pbhagavatula@marvell.com]
> Sent: Tuesday, April 9, 2019 5:33 PM
> To: Lin, Xueqin <xueqin.lin@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>
> Cc: dev@dpdk.org; Xu, Qian Q <qian.q.xu@intel.com>; Li, WenjieX A
> <wenjiex.a.li@intel.com>; Wang, FengqinX <fengqinx.wang@intel.com>;
> Yao, Lei A <lei.a.yao@intel.com>; Wang, Yinan <yinan.wang@intel.com>;
> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; thomas@monjalon.net;
> arybchenko@solarflare.com; Iremonger, Bernard
> <bernard.iremonger@intel.com>; alialnu@mellanox.com; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare logic
> into a separate function
> 
> Hi Lin,
> 
> Can you check if the following patch fixes the issue?
> http://patches.dpdk.org/patch/52395/
> 
> I wasn't able to catch this earlier.
> 
> Regards,
> Pavan

Hi, Pavan

With this patch, testpmd can generate packets with correct src
mac address at my side now. Thanks

BRs
Lei


> 
> >-----Original Message-----
> >From: Lin, Xueqin <xueqin.lin@intel.com>
> >Sent: Tuesday, April 9, 2019 2:58 PM
> >To: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>; Yigit,
> Ferruh
> ><ferruh.yigit@intel.com>
> >Cc: dev@dpdk.org; Xu, Qian Q <qian.q.xu@intel.com>; Li, WenjieX A
> ><wenjiex.a.li@intel.com>; Wang, FengqinX <fengqinx.wang@intel.com>;
> Yao,
> >Lei A <lei.a.yao@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jerin
> Jacob
> >Kollanukkaran <jerinj@marvell.com>; thomas@monjalon.net;
> >arybchenko@solarflare.com; Iremonger, Bernard
> ><bernard.iremonger@intel.com>; alialnu@mellanox.com; Zhang, Qi Z
> ><qi.z.zhang@intel.com>
> >Subject: [EXT] RE: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt
> prepare
> >logic into a separate function
> >
> >External Email
> >
> >----------------------------------------------------------------------
> >Hi NIkhilesh,
> >
> >This patchset impacts some of 19.05 rc1 txonly/burst tests on Intel NIC. If
> set
> >txonly fwd, IXIA or tester peer can't receive packets that sent from app
> >generated.
> >This is high issue, block some cases test. Detailed information as below,
> need
> >you to check it soon.
> >
> >*DPDK version:  19.05.0-rc1
> >*NIC hardware: Fortville_eagle/Fortville_spirit/Niantic
> >Environment: one NIC port connect with another NIC port, or one NIC port
> >connect with IXIA
> >
> >Test Setup
> >1. Bind port to igb_uio or vfio
> >2. On DUT, setup testpmd:
> >    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0x1e -n 4  -- -i --rxq=4 --
> txq=4 --
> >port-topology=loop 3. Set txonly forward, start testpmd
> >testpmd>set fwd txonly
> >testpmd>start
> >4. Dump packets from tester NIC port or IXIA, find no packets were
> received on
> >the PORT0.
> >tcpdump -i <tester_interface> -v
> >
> >Best regards,
> >Xueqin
> >
> >> -----Original Message-----
> >> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
> >> Bhagavatula
> >> Sent: Tuesday, April 2, 2019 5:54 PM
> >> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> >> thomas@monjalon.net; arybchenko@solarflare.com; Yigit, Ferruh
> >> <ferruh.yigit@intel.com>; Iremonger, Bernard
> >> <bernard.iremonger@intel.com>; alialnu@mellanox.com
> >> Cc: dev@dpdk.org; Pavan Nikhilesh Bhagavatula
> >> <pbhagavatula@marvell.com>
> >> Subject: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare logic
> >> into a separate function
> >>
> >> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >>
> >> Move the packet prepare logic into a separate function so that it can
> >> be reused later.
> >>
> >> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >> ---
> >>  app/test-pmd/txonly.c | 163
> >> +++++++++++++++++++++---------------------
> >>  1 file changed, 83 insertions(+), 80 deletions(-)
> >>
> >> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> >> 65171c1d1..56ca0ad24 100644
> >> --- a/app/test-pmd/txonly.c
> >> +++ b/app/test-pmd/txonly.c
> >> @@ -148,6 +148,80 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr
> *ip_hdr,
> >>  	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;  }
> >>
> >> +static inline bool
> >> +pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
> >> +		struct ether_hdr *eth_hdr, const uint16_t vlan_tci,
> >> +		const uint16_t vlan_tci_outer, const uint64_t ol_flags) {
> >> +	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
> >> +	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> >> +	struct rte_mbuf *pkt_seg;
> >> +	uint32_t nb_segs, pkt_len;
> >> +	uint8_t i;
> >> +
> >> +	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
> >> +		nb_segs = random() % tx_pkt_nb_segs + 1;
> >> +	else
> >> +		nb_segs = tx_pkt_nb_segs;
> >> +
> >> +	if (nb_segs > 1) {
> >> +		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
> nb_segs))
> >> +			return false;
> >> +	}
> >> +
> >> +	rte_pktmbuf_reset_headroom(pkt);
> >> +	pkt->data_len = tx_pkt_seg_lengths[0];
> >> +	pkt->ol_flags = ol_flags;
> >> +	pkt->vlan_tci = vlan_tci;
> >> +	pkt->vlan_tci_outer = vlan_tci_outer;
> >> +	pkt->l2_len = sizeof(struct ether_hdr);
> >> +	pkt->l3_len = sizeof(struct ipv4_hdr);
> >> +
> >> +	pkt_len = pkt->data_len;
> >> +	pkt_seg = pkt;
> >> +	for (i = 1; i < nb_segs; i++) {
> >> +		pkt_seg->next = pkt_segs[i - 1];
> >> +		pkt_seg = pkt_seg->next;
> >> +		pkt_seg->data_len = tx_pkt_seg_lengths[i];
> >> +		pkt_len += pkt_seg->data_len;
> >> +	}
> >> +	pkt_seg->next = NULL; /* Last segment of packet. */
> >> +	/*
> >> +	 * Copy headers in first packet segment(s).
> >> +	 */
> >> +	copy_buf_to_pkt(eth_hdr, sizeof(eth_hdr), pkt, 0);
> >> +	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
> >> +			sizeof(struct ether_hdr));
> >> +	if (txonly_multi_flow) {
> >> +		struct ipv4_hdr *ip_hdr;
> >> +		uint32_t addr;
> >> +
> >> +		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> >> +				struct ipv4_hdr *,
> >> +				sizeof(struct ether_hdr));
> >> +		/*
> >> +		 * Generate multiple flows by varying IP src addr. This
> >> +		 * enables packets are well distributed by RSS in
> >> +		 * receiver side if any and txonly mode can be a decent
> >> +		 * packet generator for developer's quick performance
> >> +		 * regression test.
> >> +		 */
> >> +		addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
> >> +		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> >> +	}
> >> +	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> >> +			sizeof(struct ether_hdr) +
> >> +			sizeof(struct ipv4_hdr));
> >> +	/*
> >> +	 * Complete first mbuf of packet and append it to the
> >> +	 * burst of packets to be transmitted.
> >> +	 */
> >> +	pkt->nb_segs = nb_segs;
> >> +	pkt->pkt_len = pkt_len;
> >> +
> >> +	return true;
> >> +}
> >> +
> >>  /*
> >>   * Transmit a burst of multi-segments packets.
> >>   */
> >> @@ -155,10 +229,8 @@ static void
> >>  pkt_burst_transmit(struct fwd_stream *fs)  {
> >>  	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
> >> -	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
> >>  	struct rte_port *txp;
> >>  	struct rte_mbuf *pkt;
> >> -	struct rte_mbuf *pkt_seg;
> >>  	struct rte_mempool *mbp;
> >>  	struct ether_hdr eth_hdr;
> >>  	uint16_t nb_tx;
> >> @@ -166,15 +238,12 @@ pkt_burst_transmit(struct fwd_stream *fs)
> >>  	uint16_t vlan_tci, vlan_tci_outer;
> >>  	uint32_t retry;
> >>  	uint64_t ol_flags = 0;
> >> -	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
> >> -	uint8_t  i;
> >>  	uint64_t tx_offloads;
> >>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
> >>  	uint64_t start_tsc;
> >>  	uint64_t end_tsc;
> >>  	uint64_t core_cycles;
> >>  #endif
> >> -	uint32_t nb_segs, pkt_len;
> >>
> >>  #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
> >>  	start_tsc = rte_rdtsc();
> >> @@ -201,85 +270,19 @@ pkt_burst_transmit(struct fwd_stream *fs)
> >>
> >>  	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
> >>  		pkt = rte_mbuf_raw_alloc(mbp);
> >> -		if (pkt == NULL) {
> >> -		nomore_mbuf:
> >> -			if (nb_pkt == 0)
> >> -				return;
> >> +		if (pkt == NULL)
> >> +			break;
> >> +		if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr, vlan_tci,
> >> +						vlan_tci_outer, ol_flags))) {
> >> +			rte_pktmbuf_free(pkt);
> >>  			break;
> >>  		}
> >> -
> >> -		/*
> >> -		 * Using raw alloc is good to improve performance,
> >> -		 * but some consumers may use the headroom and so
> >> -		 * decrement data_off. We need to make sure it is
> >> -		 * reset to default value.
> >> -		 */
> >> -		rte_pktmbuf_reset_headroom(pkt);
> >> -		pkt->data_len = tx_pkt_seg_lengths[0];
> >> -		pkt_seg = pkt;
> >> -
> >> -		if (tx_pkt_split == TX_PKT_SPLIT_RND)
> >> -			nb_segs = random() % tx_pkt_nb_segs + 1;
> >> -		else
> >> -			nb_segs = tx_pkt_nb_segs;
> >> -
> >> -		if (nb_segs > 1) {
> >> -			if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
> >> -							nb_segs)) {
> >> -				rte_pktmbuf_free(pkt);
> >> -				goto nomore_mbuf;
> >> -			}
> >> -		}
> >> -
> >> -		pkt_len = pkt->data_len;
> >> -		for (i = 1; i < nb_segs; i++) {
> >> -			pkt_seg->next = pkt_segs[i - 1];
> >> -			pkt_seg = pkt_seg->next;
> >> -			pkt_seg->data_len = tx_pkt_seg_lengths[i];
> >> -			pkt_len += pkt_seg->data_len;
> >> -		}
> >> -		pkt_seg->next = NULL; /* Last segment of packet. */
> >> -
> >> -		/*
> >> -		 * Copy headers in first packet segment(s).
> >> -		 */
> >> -		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
> >> -		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
> >> -				sizeof(struct ether_hdr));
> >> -		if (txonly_multi_flow) {
> >> -			struct ipv4_hdr *ip_hdr;
> >> -			uint32_t addr;
> >> -
> >> -			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
> >> -					struct ipv4_hdr *,
> >> -					sizeof(struct ether_hdr));
> >> -			/*
> >> -			 * Generate multiple flows by varying IP src addr. This
> >> -			 * enables packets are well distributed by RSS in
> >> -			 * receiver side if any and txonly mode can be a
> >> decent
> >> -			 * packet generator for developer's quick
> >> performance
> >> -			 * regression test.
> >> -			 */
> >> -			addr = (IP_DST_ADDR | (ip_var++ << 8)) +
> >> rte_lcore_id();
> >> -			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
> >> -		}
> >> -		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
> >> -				sizeof(struct ether_hdr) +
> >> -				sizeof(struct ipv4_hdr));
> >> -
> >> -		/*
> >> -		 * Complete first mbuf of packet and append it to the
> >> -		 * burst of packets to be transmitted.
> >> -		 */
> >> -		pkt->nb_segs = nb_segs;
> >> -		pkt->pkt_len = pkt_len;
> >> -		pkt->ol_flags = ol_flags;
> >> -		pkt->vlan_tci = vlan_tci;
> >> -		pkt->vlan_tci_outer = vlan_tci_outer;
> >> -		pkt->l2_len = sizeof(struct ether_hdr);
> >> -		pkt->l3_len = sizeof(struct ipv4_hdr);
> >>  		pkts_burst[nb_pkt] = pkt;
> >>  	}
> >> +
> >> +	if (nb_pkt == 0)
> >> +		return;
> >> +
> >>  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> >> nb_pkt);
> >>  	/*
> >>  	 * Retry if necessary
> >> --
> >> 2.21.0
  
Ferruh Yigit April 9, 2019, 12:29 p.m. UTC | #4
On 4/9/2019 1:24 PM, Yao, Lei A wrote:
> 
> 
>> -----Original Message-----
>> From: Pavan Nikhilesh Bhagavatula [mailto:pbhagavatula@marvell.com]
>> Sent: Tuesday, April 9, 2019 5:33 PM
>> To: Lin, Xueqin <xueqin.lin@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>
>> Cc: dev@dpdk.org; Xu, Qian Q <qian.q.xu@intel.com>; Li, WenjieX A
>> <wenjiex.a.li@intel.com>; Wang, FengqinX <fengqinx.wang@intel.com>;
>> Yao, Lei A <lei.a.yao@intel.com>; Wang, Yinan <yinan.wang@intel.com>;
>> Jerin Jacob Kollanukkaran <jerinj@marvell.com>; thomas@monjalon.net;
>> arybchenko@solarflare.com; Iremonger, Bernard
>> <bernard.iremonger@intel.com>; alialnu@mellanox.com; Zhang, Qi Z
>> <qi.z.zhang@intel.com>
>> Subject: RE: [dpdk-dev] [PATCH v6 3/4] app/testpmd: move pkt prepare logic
>> into a separate function
>>
>> Hi Lin,
>>
>> Can you check if the following patch fixes the issue?
>> http://patches.dpdk.org/patch/52395/
>>
>> I wasn't able to catch this earlier.
>>
>> Regards,
>> Pavan
> 
> Hi, Pavan
> 
> With this patch, testpmd can generate packets with correct src
> mac address at my side now. Thanks

There is a new version of that patch [1], which I will check and get now.

[1]
https://patches.dpdk.org/patch/52461/
  

Patch

diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 65171c1d1..56ca0ad24 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -148,6 +148,80 @@  setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
 	ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
 }
 
+static inline bool
+pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
+		struct ether_hdr *eth_hdr, const uint16_t vlan_tci,
+		const uint16_t vlan_tci_outer, const uint64_t ol_flags)
+{
+	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
+	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
+	struct rte_mbuf *pkt_seg;
+	uint32_t nb_segs, pkt_len;
+	uint8_t i;
+
+	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
+		nb_segs = random() % tx_pkt_nb_segs + 1;
+	else
+		nb_segs = tx_pkt_nb_segs;
+
+	if (nb_segs > 1) {
+		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs))
+			return false;
+	}
+
+	rte_pktmbuf_reset_headroom(pkt);
+	pkt->data_len = tx_pkt_seg_lengths[0];
+	pkt->ol_flags = ol_flags;
+	pkt->vlan_tci = vlan_tci;
+	pkt->vlan_tci_outer = vlan_tci_outer;
+	pkt->l2_len = sizeof(struct ether_hdr);
+	pkt->l3_len = sizeof(struct ipv4_hdr);
+
+	pkt_len = pkt->data_len;
+	pkt_seg = pkt;
+	for (i = 1; i < nb_segs; i++) {
+		pkt_seg->next = pkt_segs[i - 1];
+		pkt_seg = pkt_seg->next;
+		pkt_seg->data_len = tx_pkt_seg_lengths[i];
+		pkt_len += pkt_seg->data_len;
+	}
+	pkt_seg->next = NULL; /* Last segment of packet. */
+	/*
+	 * Copy headers in first packet segment(s).
+	 */
+	copy_buf_to_pkt(eth_hdr, sizeof(eth_hdr), pkt, 0);
+	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+			sizeof(struct ether_hdr));
+	if (txonly_multi_flow) {
+		struct ipv4_hdr *ip_hdr;
+		uint32_t addr;
+
+		ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+				struct ipv4_hdr *,
+				sizeof(struct ether_hdr));
+		/*
+		 * Generate multiple flows by varying IP src addr. This
+		 * enables packets are well distributed by RSS in
+		 * receiver side if any and txonly mode can be a decent
+		 * packet generator for developer's quick performance
+		 * regression test.
+		 */
+		addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
+		ip_hdr->src_addr = rte_cpu_to_be_32(addr);
+	}
+	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+			sizeof(struct ether_hdr) +
+			sizeof(struct ipv4_hdr));
+	/*
+	 * Complete first mbuf of packet and append it to the
+	 * burst of packets to be transmitted.
+	 */
+	pkt->nb_segs = nb_segs;
+	pkt->pkt_len = pkt_len;
+
+	return true;
+}
+
 /*
  * Transmit a burst of multi-segments packets.
  */
@@ -155,10 +229,8 @@  static void
 pkt_burst_transmit(struct fwd_stream *fs)
 {
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
-	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
 	struct rte_port *txp;
 	struct rte_mbuf *pkt;
-	struct rte_mbuf *pkt_seg;
 	struct rte_mempool *mbp;
 	struct ether_hdr eth_hdr;
 	uint16_t nb_tx;
@@ -166,15 +238,12 @@  pkt_burst_transmit(struct fwd_stream *fs)
 	uint16_t vlan_tci, vlan_tci_outer;
 	uint32_t retry;
 	uint64_t ol_flags = 0;
-	uint8_t  ip_var = RTE_PER_LCORE(_ip_var);
-	uint8_t  i;
 	uint64_t tx_offloads;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
 	uint64_t start_tsc;
 	uint64_t end_tsc;
 	uint64_t core_cycles;
 #endif
-	uint32_t nb_segs, pkt_len;
 
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
 	start_tsc = rte_rdtsc();
@@ -201,85 +270,19 @@  pkt_burst_transmit(struct fwd_stream *fs)
 
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
-		if (pkt == NULL) {
-		nomore_mbuf:
-			if (nb_pkt == 0)
-				return;
+		if (pkt == NULL)
+			break;
+		if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr, vlan_tci,
+						vlan_tci_outer, ol_flags))) {
+			rte_pktmbuf_free(pkt);
 			break;
 		}
-
-		/*
-		 * Using raw alloc is good to improve performance,
-		 * but some consumers may use the headroom and so
-		 * decrement data_off. We need to make sure it is
-		 * reset to default value.
-		 */
-		rte_pktmbuf_reset_headroom(pkt);
-		pkt->data_len = tx_pkt_seg_lengths[0];
-		pkt_seg = pkt;
-
-		if (tx_pkt_split == TX_PKT_SPLIT_RND)
-			nb_segs = random() % tx_pkt_nb_segs + 1;
-		else
-			nb_segs = tx_pkt_nb_segs;
-
-		if (nb_segs > 1) {
-			if (rte_mempool_get_bulk(mbp, (void **)pkt_segs,
-							nb_segs)) {
-				rte_pktmbuf_free(pkt);
-				goto nomore_mbuf;
-			}
-		}
-
-		pkt_len = pkt->data_len;
-		for (i = 1; i < nb_segs; i++) {
-			pkt_seg->next = pkt_segs[i - 1];
-			pkt_seg = pkt_seg->next;
-			pkt_seg->data_len = tx_pkt_seg_lengths[i];
-			pkt_len += pkt_seg->data_len;
-		}
-		pkt_seg->next = NULL; /* Last segment of packet. */
-
-		/*
-		 * Copy headers in first packet segment(s).
-		 */
-		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
-		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
-				sizeof(struct ether_hdr));
-		if (txonly_multi_flow) {
-			struct ipv4_hdr *ip_hdr;
-			uint32_t addr;
-
-			ip_hdr = rte_pktmbuf_mtod_offset(pkt,
-					struct ipv4_hdr *,
-					sizeof(struct ether_hdr));
-			/*
-			 * Generate multiple flows by varying IP src addr. This
-			 * enables packets are well distributed by RSS in
-			 * receiver side if any and txonly mode can be a decent
-			 * packet generator for developer's quick performance
-			 * regression test.
-			 */
-			addr = (IP_DST_ADDR | (ip_var++ << 8)) + rte_lcore_id();
-			ip_hdr->src_addr = rte_cpu_to_be_32(addr);
-		}
-		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
-				sizeof(struct ether_hdr) +
-				sizeof(struct ipv4_hdr));
-
-		/*
-		 * Complete first mbuf of packet and append it to the
-		 * burst of packets to be transmitted.
-		 */
-		pkt->nb_segs = nb_segs;
-		pkt->pkt_len = pkt_len;
-		pkt->ol_flags = ol_flags;
-		pkt->vlan_tci = vlan_tci;
-		pkt->vlan_tci_outer = vlan_tci_outer;
-		pkt->l2_len = sizeof(struct ether_hdr);
-		pkt->l3_len = sizeof(struct ipv4_hdr);
 		pkts_burst[nb_pkt] = pkt;
 	}
+
+	if (nb_pkt == 0)
+		return;
+
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
 	/*
 	 * Retry if necessary