Hi David,
Thx for this feedback I'll be sending a new version with your suggestion.
Kindest regards,
Raslan Darawsheh
-----Original Message-----
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Sent: Friday, September 14, 2018 9:35 AM
To: David Marchand <david.marchand@6wind.com>
Cc: Raslan Darawsheh <rasland@mellanox.com>; Wu, Jingjing <jingjing.wu@intel.com>; Thomas Monjalon <thomas@monjalon.net>; dev@dpdk.org; Shahaf Shuler <shahafs@mellanox.com>; Xueming(Steven) Li <xuemingl@mellanox.com>
Subject: Re: [dpdk-dev] [PATCH 2/2] app/testpmd: use the generic function to dump packets
-----Original Message-----
> Date: Thu, 13 Sep 2018 18:43:02 +0200
> From: David Marchand <david.marchand@6wind.com>
> To: Raslan Darawsheh <rasland@mellanox.com>
> Cc: "Wu, Jingjing" <jingjing.wu@intel.com>, Thomas Monjalon
> <thomas@monjalon.net>, dev@dpdk.org, Shahaf Shuler
> <shahafs@mellanox.com>, xuemingl@mellanox.com
> Subject: Re: [dpdk-dev] [PATCH 2/2] app/testpmd: use the generic
> function to dump packets
>
>
> On Wed, Sep 12, 2018 at 10:06 AM, Raslan Darawsheh <rasland@mellanox.com> wrote:
> > use the generic function to dump packets for several forwarding
> > engines.
>
> Rather than patching some engines, how about putting this in rx/tx callbacks ?
+1 to have zero performance impact on forward engines.
> A bit like something I sent earlier ?
>
> https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
> hes.dpdk.org%2Fpatch%2F44465%2F&data=02%7C01%7Crasland%40mellanox.
> com%7C9bbd72a1f31b4555b0be08d61a0c4402%7Ca652971c7d2e4d9ba6a4d149256f4
> 61b%7C0%7C0%7C636725037336863883&sdata=bWeN5Rsi0vrX5oPZYI5%2FChCZ4
> VogpRAiY6doOkT0Q8s%3D&reserved=0
>
>
> --
> David Marchand
@@ -69,6 +69,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
if (unlikely(nb_rx == 0))
return;
fs->rx_packets += nb_rx;
+ if (unlikely(verbose_level & 0x1))
+ dump_pkt_burst(fs, pkts_burst, nb_rx, 1);
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
@@ -78,6 +78,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
fs->rx_packets += nb_rx;
+ if (unlikely(verbose_level & 0x1))
+ dump_pkt_burst(fs, pkts_burst, nb_rx, 1);
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
@@ -117,6 +119,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
}
fs->tx_packets += nb_tx;
+ if (unlikely(verbose_level & 0x2))
+ dump_pkt_burst(fs, pkts_burst, nb_tx, 0);
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif
@@ -107,6 +107,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
fs->rx_packets += nb_rx;
+ if (unlikely(verbose_level & 0x1))
+ dump_pkt_burst(fs, pkts_burst, nb_rx, 1);
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
@@ -147,6 +149,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
}
}
fs->tx_packets += nb_tx;
+ if (unlikely(verbose_level & 0x2))
+ dump_pkt_burst(fs, pkts_burst, nb_tx, 0);
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif
@@ -40,14 +40,6 @@
#include "testpmd.h"
-static inline void
-print_ether_addr(const char *what, struct ether_addr *eth_addr)
-{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
- printf("%s%s", what, buf);
-}
-
/*
* Received a burst of packets.
*/
@@ -55,16 +47,8 @@ static void
pkt_burst_receive(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_mbuf *mb;
- struct ether_hdr *eth_hdr;
- uint16_t eth_type;
- uint64_t ol_flags;
uint16_t nb_rx;
- uint16_t i, packet_type;
- uint16_t is_encapsulation;
- char buf[256];
- struct rte_net_hdr_lens hdr_lens;
- uint32_t sw_packet_type;
+ uint16_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -90,120 +74,10 @@ pkt_burst_receive(struct fwd_stream *fs)
/*
* Dump each received packet if verbose_level > 0.
*/
- if (verbose_level > 0)
- printf("port %u/queue %u: received %u packets\n",
- fs->rx_port,
- (unsigned) fs->rx_queue,
- (unsigned) nb_rx);
- for (i = 0; i < nb_rx; i++) {
- mb = pkts_burst[i];
- if (verbose_level == 0) {
- rte_pktmbuf_free(mb);
- continue;
- }
- eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
- eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
- ol_flags = mb->ol_flags;
- packet_type = mb->packet_type;
- is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
-
- print_ether_addr(" src=", ð_hdr->s_addr);
- print_ether_addr(" - dst=", ð_hdr->d_addr);
- printf(" - type=0x%04x - length=%u - nb_segs=%d",
- eth_type, (unsigned) mb->pkt_len,
- (int)mb->nb_segs);
- if (ol_flags & PKT_RX_RSS_HASH) {
- printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
- printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
- }
- if (ol_flags & PKT_RX_FDIR) {
- printf(" - FDIR matched ");
- if (ol_flags & PKT_RX_FDIR_ID)
- printf("ID=0x%x",
- mb->hash.fdir.hi);
- else if (ol_flags & PKT_RX_FDIR_FLX)
- printf("flex bytes=0x%08x %08x",
- mb->hash.fdir.hi, mb->hash.fdir.lo);
- else
- printf("hash=0x%x ID=0x%x ",
- mb->hash.fdir.hash, mb->hash.fdir.id);
- }
- if (ol_flags & PKT_RX_TIMESTAMP)
- printf(" - timestamp %"PRIu64" ", mb->timestamp);
- if (ol_flags & PKT_RX_VLAN_STRIPPED)
- printf(" - VLAN tci=0x%x", mb->vlan_tci);
- if (ol_flags & PKT_RX_QINQ_STRIPPED)
- printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
- mb->vlan_tci, mb->vlan_tci_outer);
- if (mb->packet_type) {
- rte_get_ptype_name(mb->packet_type, buf, sizeof(buf));
- printf(" - hw ptype: %s", buf);
- }
- sw_packet_type = rte_net_get_ptype(mb, &hdr_lens,
- RTE_PTYPE_ALL_MASK);
- rte_get_ptype_name(sw_packet_type, buf, sizeof(buf));
- printf(" - sw ptype: %s", buf);
- if (sw_packet_type & RTE_PTYPE_L2_MASK)
- printf(" - l2_len=%d", hdr_lens.l2_len);
- if (sw_packet_type & RTE_PTYPE_L3_MASK)
- printf(" - l3_len=%d", hdr_lens.l3_len);
- if (sw_packet_type & RTE_PTYPE_L4_MASK)
- printf(" - l4_len=%d", hdr_lens.l4_len);
- if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK)
- printf(" - tunnel_len=%d", hdr_lens.tunnel_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK)
- printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK)
- printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len);
- if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
- printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
- if (is_encapsulation) {
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
- struct udp_hdr *udp_hdr;
- uint8_t l2_len;
- uint8_t l3_len;
- uint8_t l4_len;
- uint8_t l4_proto;
- struct vxlan_hdr *vxlan_hdr;
-
- l2_len = sizeof(struct ether_hdr);
-
- /* Do not support ipv4 option field */
- if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
- l3_len = sizeof(struct ipv4_hdr);
- ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv4_hdr *,
- l2_len);
- l4_proto = ipv4_hdr->next_proto_id;
- } else {
- l3_len = sizeof(struct ipv6_hdr);
- ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
- struct ipv6_hdr *,
- l2_len);
- l4_proto = ipv6_hdr->proto;
- }
- if (l4_proto == IPPROTO_UDP) {
- udp_hdr = rte_pktmbuf_mtod_offset(mb,
- struct udp_hdr *,
- l2_len + l3_len);
- l4_len = sizeof(struct udp_hdr);
- vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
- struct vxlan_hdr *,
- l2_len + l3_len + l4_len);
-
- printf(" - VXLAN packet: packet type =%d, "
- "Destination UDP port =%d, VNI = %d",
- packet_type, RTE_BE_TO_CPU_16(udp_hdr->dst_port),
- rte_be_to_cpu_32(vxlan_hdr->vx_vni) >> 8);
- }
- }
- printf(" - Receive queue=0x%x", (unsigned) fs->rx_queue);
- printf("\n");
- rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf));
- printf(" ol_flags: %s\n", buf);
- rte_pktmbuf_free(mb);
- }
+ if (unlikely(verbose_level & 0x1))
+ dump_pkt_burst(fs, pkts_burst, nb_rx, 1);
+ for (i = 0; i < nb_rx; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
end_tsc = rte_rdtsc();