[dpdk-dev,v3,4/6] mbuf: remove the rte_pktmbuf structure

Message ID 1409837138-4401-1-git-send-email-bruce.richardson@intel.com (mailing list archive)
State Accepted, archived
Headers

Commit Message

Bruce Richardson Sept. 4, 2014, 1:25 p.m. UTC
From: Olivier Matz <olivier.matz@6wind.com>

The rte_pktmbuf structure was initially included in the rte_mbuf
structure. This was needed when there was 2 types of mbuf (ctrl and
packet). As the control mbuf has been removed, we can merge the
rte_pktmbuf into the rte_mbuf structure.

Advantages of doing this:
  - the access to mbuf fields is easier (ex: m->data instead of m->pkt.data)
  - make the structure more consistent: for instance, there was no reason
    to have the ol_flags field in rte_mbuf
  - it will allow a deeper reorganization of the rte_mbuf structure in the
    next commits, allowing to gain several bytes in it

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>

Updated to work with latest code, and to include new example apps.

Changes in V2:
* Further updates to apply to latest HEAD on master

Changes in V3:
* Updated to compile cleanly on 1.7.1

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 app/test-pmd/cmdline.c                             |   1 -
 app/test-pmd/csumonly.c                            |   6 +-
 app/test-pmd/flowgen.c                             |  16 +--
 app/test-pmd/icmpecho.c                            |   4 +-
 app/test-pmd/ieee1588fwd.c                         |   6 +-
 app/test-pmd/macfwd-retry.c                        |   2 +-
 app/test-pmd/macfwd.c                              |   8 +-
 app/test-pmd/macswap.c                             |   8 +-
 app/test-pmd/rxonly.c                              |  12 +-
 app/test-pmd/testpmd.c                             |   8 +-
 app/test-pmd/testpmd.h                             |   2 +-
 app/test-pmd/txonly.c                              |  42 +++---
 app/test/commands.c                                |   1 -
 app/test/packet_burst_generator.c                  |  46 +++----
 app/test/test_distributor.c                        |  18 +--
 app/test/test_distributor_perf.c                   |   4 +-
 app/test/test_mbuf.c                               |  12 +-
 app/test/test_sched.c                              |   4 +-
 app/test/test_table_acl.c                          |   6 +-
 app/test/test_table_pipeline.c                     |   4 +-
 examples/dpdk_qat/crypto.c                         |  22 ++--
 examples/dpdk_qat/main.c                           |   2 +-
 examples/exception_path/main.c                     |  10 +-
 examples/ip_fragmentation/main.c                   |   6 +-
 examples/ip_pipeline/pipeline_rx.c                 |   4 +-
 examples/ip_pipeline/pipeline_tx.c                 |   2 +-
 examples/ip_reassembly/main.c                      |   8 +-
 examples/ipv4_multicast/main.c                     |  14 +-
 examples/l3fwd-acl/main.c                          |   2 +-
 examples/l3fwd-power/main.c                        |   2 +-
 examples/l3fwd-vf/main.c                           |   2 +-
 examples/l3fwd/main.c                              |  10 +-
 examples/load_balancer/runtime.c                   |   2 +-
 .../client_server_mp/mp_client/client.c            |   2 +-
 examples/quota_watermark/qw/main.c                 |   4 +-
 examples/vhost/main.c                              | 106 +++++++--------
 examples/vhost_xen/main.c                          |  22 ++--
 lib/librte_distributor/rte_distributor.c           |   2 +-
 lib/librte_ip_frag/ip_frag_common.h                |  14 +-
 lib/librte_ip_frag/rte_ipv4_fragmentation.c        |  40 +++---
 lib/librte_ip_frag/rte_ipv4_reassembly.c           |   6 +-
 lib/librte_ip_frag/rte_ipv6_fragmentation.c        |  38 +++---
 lib/librte_ip_frag/rte_ipv6_reassembly.c           |   4 +-
 lib/librte_mbuf/rte_mbuf.c                         |  26 ++--
 lib/librte_mbuf/rte_mbuf.h                         | 142 ++++++++++-----------
 lib/librte_pmd_bond/rte_eth_bond_pmd.c             |   4 +-
 lib/librte_pmd_e1000/em_rxtx.c                     |  64 +++++-----
 lib/librte_pmd_e1000/igb_rxtx.c                    |  68 +++++-----
 lib/librte_pmd_i40e/i40e_rxtx.c                    |  98 +++++++-------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c                  | 100 +++++++--------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h                  |   2 +-
 lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c              |  20 ++-
 lib/librte_pmd_pcap/rte_eth_pcap.c                 |  14 +-
 lib/librte_pmd_virtio/virtio_rxtx.c                |  58 ++++-----
 lib/librte_pmd_virtio/virtqueue.h                  |   2 +-
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c              |  26 ++--
 lib/librte_pmd_xenvirt/rte_eth_xenvirt.c           |  12 +-
 lib/librte_pmd_xenvirt/virtqueue.h                 |   4 +-
 lib/librte_port/rte_port_frag.c                    |   2 +-
 lib/librte_sched/rte_sched.c                       |  14 +-
 lib/librte_sched/rte_sched.h                       |  10 +-
 61 files changed, 597 insertions(+), 603 deletions(-)
  

Comments

De Lara Guarch, Pablo Sept. 5, 2014, 4:17 p.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Bruce Richardson
> Sent: Thursday, September 04, 2014 2:26 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v3 4/6] mbuf: remove the rte_pktmbuf
> structure
> 
> From: Olivier Matz <olivier.matz@6wind.com>
> 
> The rte_pktmbuf structure was initially included in the rte_mbuf
> structure. This was needed when there was 2 types of mbuf (ctrl and
> packet). As the control mbuf has been removed, we can merge the
> rte_pktmbuf into the rte_mbuf structure.
> 
> Advantages of doing this:
>   - the access to mbuf fields is easier (ex: m->data instead of m->pkt.data)
>   - make the structure more consistent: for instance, there was no reason
>     to have the ol_flags field in rte_mbuf
>   - it will allow a deeper reorganization of the rte_mbuf structure in the
>     next commits, allowing to gain several bytes in it
> 
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> 
> Updated to work with latest code, and to include new example apps.
> 
> Changes in V2:
> * Further updates to apply to latest HEAD on master
> 
> Changes in V3:
> * Updated to compile cleanly on 1.7.1
> 
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>

Acked-by Pablo de Lara <pablo.de.lara.guarch@intel.com>
  
Olivier Matz Sept. 8, 2014, 8:29 a.m. UTC | #2
On 09/04/2014 03:25 PM, Bruce Richardson wrote:
> From: Olivier Matz <olivier.matz@6wind.com>
> 
> The rte_pktmbuf structure was initially included in the rte_mbuf
> structure. This was needed when there was 2 types of mbuf (ctrl and
> packet). As the control mbuf has been removed, we can merge the
> rte_pktmbuf into the rte_mbuf structure.
> 
> Advantages of doing this:
>   - the access to mbuf fields is easier (ex: m->data instead of m->pkt.data)
>   - make the structure more consistent: for instance, there was no reason
>     to have the ol_flags field in rte_mbuf
>   - it will allow a deeper reorganization of the rte_mbuf structure in the
>     next commits, allowing to gain several bytes in it
> 
> Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
> 
> Updated to work with latest code, and to include new example apps.
> 
> Changes in V2:
> * Further updates to apply to latest HEAD on master
> 
> Changes in V3:
> * Updated to compile cleanly on 1.7.1
> 
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>

In app/test/packet_burst_generator.c, there is one remaining reference
to "pkt." in a comment:

  #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned
char. */


Acked-by: Olivier Matz <olivier.matz@6wind.com>
  

Patch

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 6de38e6..67321f7 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -6475,7 +6475,6 @@  dump_struct_sizes(void)
 {
 #define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
 	DUMP_SIZE(struct rte_mbuf);
-	DUMP_SIZE(struct rte_pktmbuf);
 	DUMP_SIZE(struct rte_mempool);
 	DUMP_SIZE(struct rte_ring);
 #undef DUMP_SIZE
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index e5a1f52..655b6d8 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -263,7 +263,7 @@  pkt_burst_checksum_forward(struct fwd_stream *fs)
 		pkt_ol_flags = mb->ol_flags;
 		ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
 
-		eth_hdr = (struct ether_hdr *) mb->pkt.data;
+		eth_hdr = (struct ether_hdr *) mb->data;
 		eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
 		if (eth_type == ETHER_TYPE_VLAN) {
 			/* Only allow single VLAN label here */
@@ -432,8 +432,8 @@  pkt_burst_checksum_forward(struct fwd_stream *fs)
 		}
 
 		/* Combine the packet header write. VLAN is not consider here */
-		mb->pkt.vlan_macip.f.l2_len = l2_len;
-		mb->pkt.vlan_macip.f.l3_len = l3_len;
+		mb->vlan_macip.f.l2_len = l2_len;
+		mb->vlan_macip.f.l3_len = l3_len;
 		mb->ol_flags = ol_flags;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index a8f2a65..17dbf83 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -171,11 +171,11 @@  pkt_burst_flow_gen(struct fwd_stream *fs)
 		if (!pkt)
 			break;
 
-		pkt->pkt.data_len = pkt_size;
-		pkt->pkt.next = NULL;
+		pkt->data_len = pkt_size;
+		pkt->next = NULL;
 
 		/* Initialize Ethernet header. */
-		eth_hdr = (struct ether_hdr *)pkt->pkt.data;
+		eth_hdr = (struct ether_hdr *)pkt->data;
 		ether_addr_copy(&cfg_ether_dst, &eth_hdr->d_addr);
 		ether_addr_copy(&cfg_ether_src, &eth_hdr->s_addr);
 		eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
@@ -205,12 +205,12 @@  pkt_burst_flow_gen(struct fwd_stream *fs)
 		udp_hdr->dgram_len	= RTE_CPU_TO_BE_16(pkt_size -
 							   sizeof(*eth_hdr) -
 							   sizeof(*ip_hdr));
-		pkt->pkt.nb_segs		= 1;
-		pkt->pkt.pkt_len		= pkt_size;
+		pkt->nb_segs			= 1;
+		pkt->pkt_len			= pkt_size;
 		pkt->ol_flags			= ol_flags;
-		pkt->pkt.vlan_macip.f.vlan_tci	= vlan_tci;
-		pkt->pkt.vlan_macip.f.l2_len	= sizeof(struct ether_hdr);
-		pkt->pkt.vlan_macip.f.l3_len	= sizeof(struct ipv4_hdr);
+		pkt->vlan_macip.f.vlan_tci	= vlan_tci;
+		pkt->vlan_macip.f.l2_len	= sizeof(struct ether_hdr);
+		pkt->vlan_macip.f.l3_len	= sizeof(struct ipv4_hdr);
 		pkts_burst[nb_pkt]		= pkt;
 
 		next_flow = (next_flow + 1) % cfg_n_flows;
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index c28ff5a..4a277b8 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -330,12 +330,12 @@  reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
 	nb_replies = 0;
 	for (i = 0; i < nb_rx; i++) {
 		pkt = pkts_burst[i];
-		eth_h = (struct ether_hdr *) pkt->pkt.data;
+		eth_h = (struct ether_hdr *) pkt->data;
 		eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
 		l2_len = sizeof(struct ether_hdr);
 		if (verbose_level > 0) {
 			printf("\nPort %d pkt-len=%u nb-segs=%u\n",
-			       fs->rx_port, pkt->pkt.pkt_len, pkt->pkt.nb_segs);
+			       fs->rx_port, pkt->pkt_len, pkt->nb_segs);
 			ether_addr_dump("  ETH:  src=", &eth_h->s_addr);
 			ether_addr_dump(" dst=", &eth_h->d_addr);
 		}
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index 3ce9979..ab5e06e 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -546,7 +546,7 @@  ieee1588_packet_fwd(struct fwd_stream *fs)
 	 * Check that the received packet is a PTP packet that was detected
 	 * by the hardware.
 	 */
-	eth_hdr = (struct ether_hdr *)mb->pkt.data;
+	eth_hdr = (struct ether_hdr *)mb->data;
 	eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
 	if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
 		if (eth_type == ETHER_TYPE_1588) {
@@ -557,7 +557,7 @@  ieee1588_packet_fwd(struct fwd_stream *fs)
 			printf("Port %u Received non PTP packet type=0x%4x "
 			       "len=%u\n",
 			       (unsigned) fs->rx_port, eth_type,
-			       (unsigned) mb->pkt.pkt_len);
+			       (unsigned) mb->pkt_len);
 		}
 		rte_pktmbuf_free(mb);
 		return;
@@ -574,7 +574,7 @@  ieee1588_packet_fwd(struct fwd_stream *fs)
 	 * Check that the received PTP packet is a PTP V2 packet of type
 	 * PTP_SYNC_MESSAGE.
 	 */
-	ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data +
+	ptp_hdr = (struct ptpv2_msg *) ((char *) mb->data +
 					sizeof(struct ether_hdr));
 	if (ptp_hdr->version != 0x02) {
 		printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
diff --git a/app/test-pmd/macfwd-retry.c b/app/test-pmd/macfwd-retry.c
index f4e06c4..5122983 100644
--- a/app/test-pmd/macfwd-retry.c
+++ b/app/test-pmd/macfwd-retry.c
@@ -119,7 +119,7 @@  pkt_burst_mac_retry_forward(struct fwd_stream *fs)
 	fs->rx_packets += nb_rx;
 	for (i = 0; i < nb_rx; i++) {
 		mb = pkts_burst[i];
-		eth_hdr = (struct ether_hdr *) mb->pkt.data;
+		eth_hdr = (struct ether_hdr *) mb->data;
 		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
 				&eth_hdr->d_addr);
 		ether_addr_copy(&ports[fs->tx_port].eth_addr,
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index fc8f749..999c8e3 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -110,15 +110,15 @@  pkt_burst_mac_forward(struct fwd_stream *fs)
 	txp = &ports[fs->tx_port];
 	for (i = 0; i < nb_rx; i++) {
 		mb = pkts_burst[i];
-		eth_hdr = (struct ether_hdr *) mb->pkt.data;
+		eth_hdr = (struct ether_hdr *) mb->data;
 		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
 				&eth_hdr->d_addr);
 		ether_addr_copy(&ports[fs->tx_port].eth_addr,
 				&eth_hdr->s_addr);
 		mb->ol_flags = txp->tx_ol_flags;
-		mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-		mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
-		mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+		mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+		mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+		mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
 	fs->tx_packets += nb_tx;
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 4ed6096..731f487 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -110,7 +110,7 @@  pkt_burst_mac_swap(struct fwd_stream *fs)
 	txp = &ports[fs->tx_port];
 	for (i = 0; i < nb_rx; i++) {
 		mb = pkts_burst[i];
-		eth_hdr = (struct ether_hdr *) mb->pkt.data;
+		eth_hdr = (struct ether_hdr *) mb->data;
 
 		/* Swap dest and src mac addresses. */
 		ether_addr_copy(&eth_hdr->d_addr, &addr);
@@ -118,9 +118,9 @@  pkt_burst_mac_swap(struct fwd_stream *fs)
 		ether_addr_copy(&addr, &eth_hdr->s_addr);
 
 		mb->ol_flags = txp->tx_ol_flags;
-		mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-		mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
-		mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+		mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+		mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+		mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
 	fs->tx_packets += nb_tx;
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 5f21a3e..c34a5e1 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -149,24 +149,24 @@  pkt_burst_receive(struct fwd_stream *fs)
 			rte_pktmbuf_free(mb);
 			continue;
 		}
-		eth_hdr = (struct ether_hdr *) mb->pkt.data;
+		eth_hdr = (struct ether_hdr *) mb->data;
 		eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
 		ol_flags = mb->ol_flags;
 		print_ether_addr("  src=", &eth_hdr->s_addr);
 		print_ether_addr(" - dst=", &eth_hdr->d_addr);
 		printf(" - type=0x%04x - length=%u - nb_segs=%d",
-		       eth_type, (unsigned) mb->pkt.pkt_len,
-		       (int)mb->pkt.nb_segs);
+		       eth_type, (unsigned) mb->pkt_len,
+		       (int)mb->nb_segs);
 		if (ol_flags & PKT_RX_RSS_HASH) {
-			printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss);
+			printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
 			printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
 		}
 		else if (ol_flags & PKT_RX_FDIR)
 			printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
-			       mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id);
+			       mb->hash.fdir.hash, mb->hash.fdir.id);
 		if (ol_flags & PKT_RX_VLAN_PKT)
 			printf(" - VLAN tci=0x%x",
-				mb->pkt.vlan_macip.f.vlan_tci);
+				mb->vlan_macip.f.vlan_tci);
 		printf("\n");
 		if (ol_flags != 0) {
 			int rxf;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 2d74c5c..d13a53a 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -404,10 +404,10 @@  testpmd_mbuf_ctor(struct rte_mempool *mp,
 			mb_ctor_arg->seg_buf_offset);
 	mb->buf_len      = mb_ctor_arg->seg_buf_size;
 	mb->ol_flags     = 0;
-	mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-	mb->pkt.nb_segs  = 1;
-	mb->pkt.vlan_macip.data = 0;
-	mb->pkt.hash.rss = 0;
+	mb->data         = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+	mb->nb_segs      = 1;
+	mb->vlan_macip.data = 0;
+	mb->hash.rss     = 0;
 }
 
 static void
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index b8322a2..09923a8 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -60,7 +60,7 @@  int main(int argc, char **argv);
  * The maximum number of segments per packet is used when creating
  * scattered transmit packets composed of a list of mbufs.
  */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */
 
 #define MAX_PKT_BURST 512
 #define DEF_PKT_BURST 32
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index d634096..1b2f661 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -106,18 +106,18 @@  copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
 	unsigned copy_len;
 
 	seg = pkt;
-	while (offset >= seg->pkt.data_len) {
-		offset -= seg->pkt.data_len;
-		seg = seg->pkt.next;
+	while (offset >= seg->data_len) {
+		offset -= seg->data_len;
+		seg = seg->next;
 	}
-	copy_len = seg->pkt.data_len - offset;
-	seg_buf = ((char *) seg->pkt.data + offset);
+	copy_len = seg->data_len - offset;
+	seg_buf = ((char *) seg->data + offset);
 	while (len > copy_len) {
 		rte_memcpy(seg_buf, buf, (size_t) copy_len);
 		len -= copy_len;
 		buf = ((char*) buf + copy_len);
-		seg = seg->pkt.next;
-		seg_buf = seg->pkt.data;
+		seg = seg->next;
+		seg_buf = seg->data;
 	}
 	rte_memcpy(seg_buf, buf, (size_t) len);
 }
@@ -125,8 +125,8 @@  copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
 static inline void
 copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
 {
-	if (offset + len <= pkt->pkt.data_len) {
-		rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+	if (offset + len <= pkt->data_len) {
+		rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
 		return;
 	}
 	copy_buf_to_pkt_segs(buf, len, pkt, offset);
@@ -225,19 +225,19 @@  pkt_burst_transmit(struct fwd_stream *fs)
 				return;
 			break;
 		}
-		pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+		pkt->data_len = tx_pkt_seg_lengths[0];
 		pkt_seg = pkt;
 		for (i = 1; i < tx_pkt_nb_segs; i++) {
-			pkt_seg->pkt.next = tx_mbuf_alloc(mbp);
-			if (pkt_seg->pkt.next == NULL) {
-				pkt->pkt.nb_segs = i;
+			pkt_seg->next = tx_mbuf_alloc(mbp);
+			if (pkt_seg->next == NULL) {
+				pkt->nb_segs = i;
 				rte_pktmbuf_free(pkt);
 				goto nomore_mbuf;
 			}
-			pkt_seg = pkt_seg->pkt.next;
-			pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+			pkt_seg = pkt_seg->next;
+			pkt_seg->data_len = tx_pkt_seg_lengths[i];
 		}
-		pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+		pkt_seg->next = NULL; /* Last segment of packet. */
 
 		/*
 		 * Initialize Ethernet header.
@@ -260,12 +260,12 @@  pkt_burst_transmit(struct fwd_stream *fs)
 		 * Complete first mbuf of packet and append it to the
 		 * burst of packets to be transmitted.
 		 */
-		pkt->pkt.nb_segs = tx_pkt_nb_segs;
-		pkt->pkt.pkt_len = tx_pkt_length;
+		pkt->nb_segs = tx_pkt_nb_segs;
+		pkt->pkt_len = tx_pkt_length;
 		pkt->ol_flags = ol_flags;
-		pkt->pkt.vlan_macip.f.vlan_tci  = vlan_tci;
-		pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-		pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+		pkt->vlan_macip.f.vlan_tci  = vlan_tci;
+		pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+		pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
 		pkts_burst[nb_pkt] = pkt;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
diff --git a/app/test/commands.c b/app/test/commands.c
index 9ea3c62..a9e36b1 100644
--- a/app/test/commands.c
+++ b/app/test/commands.c
@@ -136,7 +136,6 @@  dump_struct_sizes(void)
 {
 #define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
 	DUMP_SIZE(struct rte_mbuf);
-	DUMP_SIZE(struct rte_pktmbuf);
 	DUMP_SIZE(struct rte_mempool);
 	DUMP_SIZE(struct rte_ring);
 #undef DUMP_SIZE
diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index 5d539f1..8740348 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -54,18 +54,18 @@  copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
 	unsigned copy_len;
 
 	seg = pkt;
-	while (offset >= seg->pkt.data_len) {
-		offset -= seg->pkt.data_len;
-		seg = seg->pkt.next;
+	while (offset >= seg->data_len) {
+		offset -= seg->data_len;
+		seg = seg->next;
 	}
-	copy_len = seg->pkt.data_len - offset;
-	seg_buf = ((char *) seg->pkt.data + offset);
+	copy_len = seg->data_len - offset;
+	seg_buf = ((char *) seg->data + offset);
 	while (len > copy_len) {
 		rte_memcpy(seg_buf, buf, (size_t) copy_len);
 		len -= copy_len;
 		buf = ((char *) buf + copy_len);
-		seg = seg->pkt.next;
-		seg_buf = seg->pkt.data;
+		seg = seg->next;
+		seg_buf = seg->data;
 	}
 	rte_memcpy(seg_buf, buf, (size_t) len);
 }
@@ -73,8 +73,8 @@  copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
 static inline void
 copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
 {
-	if (offset + len <= pkt->pkt.data_len) {
-		rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+	if (offset + len <= pkt->data_len) {
+		rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
 		return;
 	}
 	copy_buf_to_pkt_segs(buf, len, pkt, offset);
@@ -220,19 +220,19 @@  nomore_mbuf:
 			break;
 		}
 
-		pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+		pkt->data_len = tx_pkt_seg_lengths[0];
 		pkt_seg = pkt;
 		for (i = 1; i < tx_pkt_nb_segs; i++) {
-			pkt_seg->pkt.next = rte_pktmbuf_alloc(mp);
-			if (pkt_seg->pkt.next == NULL) {
-				pkt->pkt.nb_segs = i;
+			pkt_seg->next = rte_pktmbuf_alloc(mp);
+			if (pkt_seg->next == NULL) {
+				pkt->nb_segs = i;
 				rte_pktmbuf_free(pkt);
 				goto nomore_mbuf;
 			}
-			pkt_seg = pkt_seg->pkt.next;
-			pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+			pkt_seg = pkt_seg->next;
+			pkt_seg->data_len = tx_pkt_seg_lengths[i];
 		}
-		pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+		pkt_seg->next = NULL; /* Last segment of packet. */
 
 		/*
 		 * Copy headers in first packet segment(s).
@@ -258,21 +258,21 @@  nomore_mbuf:
 		 * Complete first mbuf of packet and append it to the
 		 * burst of packets to be transmitted.
 		 */
-		pkt->pkt.nb_segs = tx_pkt_nb_segs;
-		pkt->pkt.pkt_len = tx_pkt_length;
-		pkt->pkt.vlan_macip.f.l2_len = eth_hdr_size;
+		pkt->nb_segs = tx_pkt_nb_segs;
+		pkt->pkt_len = tx_pkt_length;
+		pkt->vlan_macip.f.l2_len = eth_hdr_size;
 
 		if (ipv4) {
-			pkt->pkt.vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv4;
-			pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+			pkt->vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv4;
+			pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
 
 			if (vlan_enabled)
 				pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
 			else
 				pkt->ol_flags = PKT_RX_IPV4_HDR;
 		} else {
-			pkt->pkt.vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv6;
-			pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+			pkt->vlan_macip.f.vlan_tci  = ETHER_TYPE_IPv6;
+			pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
 
 			if (vlan_enabled)
 				pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index eca974d..ce06436 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -120,7 +120,7 @@  sanity_test(struct rte_distributor *d, struct rte_mempool *p)
 	/* now set all hash values in all buffers to zero, so all pkts go to the
 	 * one worker thread */
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = 0;
+		bufs[i]->hash.rss = 0;
 
 	rte_distributor_process(d, bufs, BURST);
 	rte_distributor_flush(d);
@@ -142,7 +142,7 @@  sanity_test(struct rte_distributor *d, struct rte_mempool *p)
 	if (rte_lcore_count() >= 3) {
 		clear_packet_count();
 		for (i = 0; i < BURST; i++)
-			bufs[i]->pkt.hash.rss = (i & 1) << 8;
+			bufs[i]->hash.rss = (i & 1) << 8;
 
 		rte_distributor_process(d, bufs, BURST);
 		rte_distributor_flush(d);
@@ -167,7 +167,7 @@  sanity_test(struct rte_distributor *d, struct rte_mempool *p)
 	 * so load gets distributed */
 	clear_packet_count();
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = i;
+		bufs[i]->hash.rss = i;
 
 	rte_distributor_process(d, bufs, BURST);
 	rte_distributor_flush(d);
@@ -199,7 +199,7 @@  sanity_test(struct rte_distributor *d, struct rte_mempool *p)
 		return -1;
 	}
 	for (i = 0; i < BIG_BATCH; i++)
-		many_bufs[i]->pkt.hash.rss = i << 2;
+		many_bufs[i]->hash.rss = i << 2;
 
 	for (i = 0; i < BIG_BATCH/BURST; i++) {
 		rte_distributor_process(d, &many_bufs[i*BURST], BURST);
@@ -280,7 +280,7 @@  sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
 		while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
 			rte_distributor_process(d, NULL, 0);
 		for (j = 0; j < BURST; j++) {
-			bufs[j]->pkt.hash.rss = (i+j) << 1;
+			bufs[j]->hash.rss = (i+j) << 1;
 			rte_mbuf_refcnt_set(bufs[j], 1);
 		}
 
@@ -359,7 +359,7 @@  sanity_test_with_worker_shutdown(struct rte_distributor *d,
 	/* now set all hash values in all buffers to zero, so all pkts go to the
 	 * one worker thread */
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = 0;
+		bufs[i]->hash.rss = 0;
 
 	rte_distributor_process(d, bufs, BURST);
 	/* at this point, we will have processed some packets and have a full
@@ -372,7 +372,7 @@  sanity_test_with_worker_shutdown(struct rte_distributor *d,
 		return -1;
 	}
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = 0;
+		bufs[i]->hash.rss = 0;
 
 	/* get worker zero to quit */
 	zero_quit = 1;
@@ -416,7 +416,7 @@  test_flush_with_worker_shutdown(struct rte_distributor *d,
 	/* now set all hash values in all buffers to zero, so all pkts go to the
 	 * one worker thread */
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = 0;
+		bufs[i]->hash.rss = 0;
 
 	rte_distributor_process(d, bufs, BURST);
 	/* at this point, we will have processed some packets and have a full
@@ -488,7 +488,7 @@  quit_workers(struct rte_distributor *d, struct rte_mempool *p)
 	zero_quit = 0;
 	quit = 1;
 	for (i = 0; i < num_workers; i++)
-		bufs[i]->pkt.hash.rss = i << 1;
+		bufs[i]->hash.rss = i << 1;
 	rte_distributor_process(d, bufs, num_workers);
 
 	rte_mempool_put_bulk(p, (void *)bufs, num_workers);
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index 849387d..b04864c 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -159,7 +159,7 @@  perf_test(struct rte_distributor *d, struct rte_mempool *p)
 	}
 	/* ensure we have different hash value for each pkt */
 	for (i = 0; i < BURST; i++)
-		bufs[i]->pkt.hash.rss = i;
+		bufs[i]->hash.rss = i;
 
 	start = rte_rdtsc();
 	for (i = 0; i < (1<<ITER_POWER); i++)
@@ -198,7 +198,7 @@  quit_workers(struct rte_distributor *d, struct rte_mempool *p)
 
 	quit = 1;
 	for (i = 0; i < num_workers; i++)
-		bufs[i]->pkt.hash.rss = i << 1;
+		bufs[i]->hash.rss = i << 1;
 	rte_distributor_process(d, bufs, num_workers);
 
 	rte_mempool_put_bulk(p, (void *)bufs, num_workers);
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 280b1ab..b81e622 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -344,8 +344,8 @@  testclone_testupdate_testdetach(void)
 		GOTO_FAIL("cannot clone data\n");
 	rte_pktmbuf_free(clone);
 
-	mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
-	if(mc->pkt.next == NULL)
+	mc->next = rte_pktmbuf_alloc(pktmbuf_pool);
+	if(mc->next == NULL)
 		GOTO_FAIL("Next Pkt Null\n");
 
 	clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
@@ -432,7 +432,7 @@  test_pktmbuf_pool_ptr(void)
 			printf("rte_pktmbuf_alloc() failed (%u)\n", i);
 			ret = -1;
 		}
-		m[i]->pkt.data = RTE_PTR_ADD(m[i]->pkt.data, 64);
+		m[i]->data = RTE_PTR_ADD(m[i]->data, 64);
 	}
 
 	/* free them */
@@ -451,8 +451,8 @@  test_pktmbuf_pool_ptr(void)
 			printf("rte_pktmbuf_alloc() failed (%u)\n", i);
 			ret = -1;
 		}
-		if (m[i]->pkt.data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
-			printf ("pkt.data pointer not set properly\n");
+		if (m[i]->data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
+			printf ("data pointer not set properly\n");
 			ret = -1;
 		}
 	}
@@ -493,7 +493,7 @@  test_pktmbuf_free_segment(void)
 			mb = m[i];
 			while(mb != NULL) {
 				mt = mb;
-				mb = mb->pkt.next;
+				mb = mb->next;
 				rte_pktmbuf_free_seg(mt);
 			}
 		}
diff --git a/app/test/test_sched.c b/app/test/test_sched.c
index ce47084..c957d80 100644
--- a/app/test/test_sched.c
+++ b/app/test/test_sched.c
@@ -145,8 +145,8 @@  prepare_pkt(struct rte_mbuf *mbuf)
 	rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
 
 	/* 64 byte packet */
-	mbuf->pkt.pkt_len  = 60;
-	mbuf->pkt.data_len = 60;
+	mbuf->pkt_len  = 60;
+	mbuf->data_len = 60;
 }
 
 
diff --git a/app/test/test_table_acl.c b/app/test/test_table_acl.c
index dda0570..4db680a 100644
--- a/app/test/test_table_acl.c
+++ b/app/test/test_table_acl.c
@@ -513,7 +513,7 @@  test_pipeline_single_filter(int expected_count)
 			struct rte_mbuf *mbuf;
 
 			mbuf = rte_pktmbuf_alloc(pool);
-			memset(mbuf->pkt.data, 0x00,
+			memset(mbuf->data, 0x00,
 				sizeof(struct ipv4_5tuple));
 
 			five_tuple.proto = j;
@@ -522,7 +522,7 @@  test_pipeline_single_filter(int expected_count)
 			five_tuple.port_src = rte_bswap16(100 + j);
 			five_tuple.port_dst = rte_bswap16(200 + j);
 
-			memcpy(mbuf->pkt.data, &five_tuple,
+			memcpy(mbuf->data, &five_tuple,
 				sizeof(struct ipv4_5tuple));
 			RTE_LOG(INFO, PIPELINE, "%s: Enqueue onto ring %d\n",
 				__func__, i);
@@ -549,7 +549,7 @@  test_pipeline_single_filter(int expected_count)
 			printf("Got %d object(s) from ring %d!\n", ret, i);
 			for (j = 0; j < ret; j++) {
 				mbuf = (struct rte_mbuf *)objs[j];
-				rte_hexdump(stdout, "mbuf", mbuf->pkt.data, 64);
+				rte_hexdump(stdout, "mbuf", mbuf->data, 64);
 				rte_pktmbuf_free(mbuf);
 			}
 			tx_count += ret;
diff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c
index 70b1015..15a038b 100644
--- a/app/test/test_table_pipeline.c
+++ b/app/test/test_table_pipeline.c
@@ -498,8 +498,8 @@  test_pipeline_single_filter(int test_type, int expected_count)
 			printf("Got %d object(s) from ring %d!\n", ret, i);
 			for (j = 0; j < ret; j++) {
 				mbuf = (struct rte_mbuf *)objs[j];
-				rte_hexdump(stdout, "Object:", mbuf->pkt.data,
-					mbuf->pkt.data_len);
+				rte_hexdump(stdout, "Object:", mbuf->data,
+					mbuf->data_len);
 				rte_pktmbuf_free(mbuf);
 			}
 			tx_count += ret;
diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c
index 577ab32..318d47c 100644
--- a/examples/dpdk_qat/crypto.c
+++ b/examples/dpdk_qat/crypto.c
@@ -183,7 +183,7 @@  struct glob_keys g_crypto_hash_keys = {
  *
  */
 #define PACKET_DATA_START_PHYS(p) \
-		((p)->buf_physaddr + ((char *)p->pkt.data - (char *)p->buf_addr))
+		((p)->buf_physaddr + ((char *)p->data - (char *)p->buf_addr))
 
 /*
  * A fixed offset to where the crypto is to be performed, which is the first
@@ -773,7 +773,7 @@  enum crypto_result
 crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 {
 	CpaCySymDpOpData *opData =
-			(CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data)
+			(CpaCySymDpOpData *) ((char *) (rte_buff->data)
 					+ CRYPTO_OFFSET_TO_OPDATA);
 	uint32_t lcore_id;
 
@@ -785,7 +785,7 @@  crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 	bzero(opData, sizeof(CpaCySymDpOpData));
 
 	opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
-	opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len;
+	opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
 	opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
 	opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
 			+ CRYPTO_OFFSET_TO_OPDATA;
@@ -805,7 +805,7 @@  crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 			opData->ivLenInBytes = IV_LENGTH_8_BYTES;
 
 		opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
-		opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+		opData->messageLenToCipherInBytes = rte_buff->data_len
 				- CRYPTO_START_OFFSET;
 		/*
 		 * Work around for padding, message length has to be a multiple of
@@ -818,7 +818,7 @@  crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 	if (NO_HASH != h) {
 
 		opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
-		opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+		opData->messageLenToHashInBytes = rte_buff->data_len
 				- HASH_START_OFFSET;
 		/*
 		 * Work around for padding, message length has to be a multiple of block
@@ -831,7 +831,7 @@  crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 		 * Assumption: Ok ignore the passed digest pointer and place HMAC at end
 		 * of packet.
 		 */
-		opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+		opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
 	}
 
 	if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
@@ -848,7 +848,7 @@  enum crypto_result
 crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 {
 
-	CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data)
+	CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->data)
 			+ CRYPTO_OFFSET_TO_OPDATA);
 	uint32_t lcore_id;
 
@@ -860,7 +860,7 @@  crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 	bzero(opData, sizeof(CpaCySymDpOpData));
 
 	opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
-	opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len;
+	opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
 	opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
 			+ CRYPTO_OFFSET_TO_OPDATA;
 	opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
@@ -880,7 +880,7 @@  crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 			opData->ivLenInBytes = IV_LENGTH_8_BYTES;
 
 		opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
-		opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+		opData->messageLenToCipherInBytes = rte_buff->data_len
 				- CRYPTO_START_OFFSET;
 
 		/*
@@ -892,7 +892,7 @@  crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 	}
 	if (NO_HASH != h) {
 		opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
-		opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+		opData->messageLenToHashInBytes = rte_buff->data_len
 				- HASH_START_OFFSET;
 		/*
 		 * Work around for padding, message length has to be a multiple of block
@@ -900,7 +900,7 @@  crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 		 */
 		opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
 				% HASH_BLOCK_DEFAULT_SIZE;
-		opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+		opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
 	}
 
 	if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
index d61db4c..75c9876 100644
--- a/examples/dpdk_qat/main.c
+++ b/examples/dpdk_qat/main.c
@@ -384,7 +384,7 @@  main_loop(__attribute__((unused)) void *dummy)
 			}
 		}
 
-		port = dst_ports[pkt->pkt.in_port];
+		port = dst_ports[pkt->in_port];
 
 		/* Transmit the packet */
 		nic_tx_send_packet(pkt, (uint8_t)port);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 0204116..5045ef8 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -302,16 +302,16 @@  main_loop(__attribute__((unused)) void *arg)
 			if (m == NULL)
 				continue;
 
-			ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
+			ret = read(tap_fd, m->data, MAX_PACKET_SZ);
 			lcore_stats[lcore_id].rx++;
 			if (unlikely(ret < 0)) {
 				FATAL_ERROR("Reading from %s interface failed",
 				            tap_name);
 			}
-			m->pkt.nb_segs = 1;
-			m->pkt.next = NULL;
-			m->pkt.pkt_len = (uint16_t)ret;
-			m->pkt.data_len = (uint16_t)ret;
+			m->nb_segs = 1;
+			m->next = NULL;
+			m->pkt_len = (uint16_t)ret;
+			m->data_len = (uint16_t)ret;
 			ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
 			if (unlikely(ret < 1)) {
 				rte_pktmbuf_free(m);
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 72cd2b2..ac8d4f7 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -342,7 +342,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		}
 
 		/* if we don't need to do any fragmentation */
-		if (likely (IPV4_MTU_DEFAULT >= m->pkt.pkt_len)) {
+		if (likely (IPV4_MTU_DEFAULT >= m->pkt_len)) {
 			qconf->tx_mbufs[port_out].m_table[len] = m;
 			len2 = 1;
 		} else {
@@ -379,7 +379,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 		}
 
 		/* if we don't need to do any fragmentation */
-		if (likely (IPV6_MTU_DEFAULT >= m->pkt.pkt_len)) {
+		if (likely (IPV6_MTU_DEFAULT >= m->pkt_len)) {
 			qconf->tx_mbufs[port_out].m_table[len] = m;
 			len2 = 1;
 		} else {
@@ -413,7 +413,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
 			rte_panic("No headroom in mbuf.\n");
 		}
 
-		m->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+		m->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
 
 		/* 02:00:00:00:00:xx */
 		d_addr_bytes = &eth_hdr->d_addr.addr_bytes[0];
diff --git a/examples/ip_pipeline/pipeline_rx.c b/examples/ip_pipeline/pipeline_rx.c
index e43ebfa..7a8309c 100644
--- a/examples/ip_pipeline/pipeline_rx.c
+++ b/examples/ip_pipeline/pipeline_rx.c
@@ -255,8 +255,8 @@  app_pkt_metadata_fill(struct rte_mbuf *m)
 	/* Pop Ethernet header */
 	if (app.ether_hdr_pop_push) {
 		rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
-		m->pkt.vlan_macip.f.l2_len = 0;
-		m->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+		m->vlan_macip.f.l2_len = 0;
+		m->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
 	}
 }
 
diff --git a/examples/ip_pipeline/pipeline_tx.c b/examples/ip_pipeline/pipeline_tx.c
index 3bf2c8b..b9491e3 100644
--- a/examples/ip_pipeline/pipeline_tx.c
+++ b/examples/ip_pipeline/pipeline_tx.c
@@ -66,7 +66,7 @@  app_pkt_metadata_flush(struct rte_mbuf *pkt)
 	ether_addr_copy(&pkt_meta->nh_arp, &ether_hdr->d_addr);
 	ether_addr_copy(&local_ether_addr, &ether_hdr->s_addr);
 	ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
-	pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+	pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
 }
 
 static int
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 3bb6afd..8184aad 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -412,8 +412,8 @@  reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
 			dr = &qconf->death_row;
 
 			/* prepare mbuf: setup l2_len/l3_len. */
-			m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
-			m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr);
+			m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+			m->vlan_macip.f.l3_len = sizeof(*ip_hdr);
 
 			/* process this fragment. */
 			mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
@@ -455,8 +455,8 @@  reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
 			dr  = &qconf->death_row;
 
 			/* prepare mbuf: setup l2_len/l3_len. */
-			m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
-			m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+			m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+			m->vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
 
 			mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
 			if (mo == NULL)
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 7b53296..cc12d9d 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -329,17 +329,17 @@  mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
 	}
 
 	/* prepend new header */
-	hdr->pkt.next = pkt;
+	hdr->next = pkt;
 
 
 	/* update header's fields */
-	hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
-	hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+	hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
+	hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
 
 	/* copy metadata from source packet*/
-	hdr->pkt.in_port = pkt->pkt.in_port;
-	hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
-	hdr->pkt.hash = pkt->pkt.hash;
+	hdr->in_port = pkt->in_port;
+	hdr->vlan_macip = pkt->vlan_macip;
+	hdr->hash = pkt->hash;
 
 	hdr->ol_flags = pkt->ol_flags;
 
@@ -412,7 +412,7 @@  mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
 
 	/* Should we use rte_pktmbuf_clone() or not. */
 	use_clone = (port_num <= MCAST_CLONE_PORTS &&
-	    m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+	    m->nb_segs <= MCAST_CLONE_SEGS);
 
 	/* Mark all packet's segments as referenced port_num times */
 	if (use_clone == 0)
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index eac0eab..bafd26a 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -700,7 +700,7 @@  prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
 			unsigned char *) + sizeof(struct ether_hdr));
 
 		/* Check to make sure the packet is valid (RFC1812) */
-		if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt.pkt_len) >= 0) {
+		if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt_len) >= 0) {
 
 			/* Update time to live and header checksum */
 			--(ipv4_hdr->time_to_live);
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 57fc371..a9d5c80 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -687,7 +687,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
 
 #ifdef DO_RFC_1812_CHECKS
 		/* Check to make sure the packet is valid (RFC1812) */
-		if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+		if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
 			rte_pktmbuf_free(m);
 			return;
 		}
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 2ca5c21..7b1e08a 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -489,7 +489,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd
 
 #ifdef DO_RFC_1812_CHECKS
 	/* Check to make sure the packet is valid (RFC1812) */
-	if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+	if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
 		rte_pktmbuf_free(m);
 		return;
 	}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index bef409a..e3e3463 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -809,19 +809,19 @@  simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *
 #ifdef DO_RFC_1812_CHECKS
 	/* Check to make sure the packet is valid (RFC1812) */
 	uint8_t valid_mask = MASK_ALL_PKTS;
-	if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt.pkt_len) < 0) {
+	if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
 		rte_pktmbuf_free(m[0]);
 		valid_mask &= EXECLUDE_1ST_PKT;
 	}
-	if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt.pkt_len) < 0) {
+	if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
 		rte_pktmbuf_free(m[1]);
 		valid_mask &= EXECLUDE_2ND_PKT;
 	}
-	if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt.pkt_len) < 0) {
+	if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
 		rte_pktmbuf_free(m[2]);
 		valid_mask &= EXECLUDE_3RD_PKT;
 	}
-	if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt.pkt_len) < 0) {
+	if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
 		rte_pktmbuf_free(m[3]);
 		valid_mask &= EXECLUDE_4TH_PKT;
 	}
@@ -1009,7 +1009,7 @@  l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
 
 #ifdef DO_RFC_1812_CHECKS
 		/* Check to make sure the packet is valid (RFC1812) */
-		if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+		if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
 			rte_pktmbuf_free(m);
 			return;
 		}
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 9612392..b69917b 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -540,7 +540,7 @@  app_lcore_worker(
 			ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
 
 			if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
-				port = pkt->pkt.in_port;
+				port = pkt->in_port;
 			}
 
 			pos = lp->mbuf_out[port].n_mbufs;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index 91f70eb..71e4a48 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -211,7 +211,7 @@  enqueue_packet(struct rte_mbuf *buf, uint8_t port)
 static void
 handle_packet(struct rte_mbuf *buf)
 {
-	const uint8_t in_port = buf->pkt.in_port;
+	const uint8_t in_port = buf->in_port;
 	const uint8_t out_port = output_ports[in_port];
 
 	enqueue_packet(buf, out_port);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 579698b..c8bd62f 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -104,8 +104,8 @@  static void send_pause_frame(uint8_t port_id, uint16_t duration)
     pause_frame->opcode = rte_cpu_to_be_16(0x0001);
     pause_frame->param  = rte_cpu_to_be_16(duration);
 
-    mbuf->pkt.pkt_len  = 60;
-    mbuf->pkt.data_len = 60;
+    mbuf->pkt_len  = 60;
+    mbuf->data_len = 60;
 
     rte_eth_tx_burst(port_id, 0, &mbuf, 1);
 }
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index f9ae8bc..f0f8cfa 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1033,7 +1033,7 @@  virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
 
 		/* Copy mbuf data to buffer */
 		rte_memcpy((void *)(uintptr_t)buff_addr,
-			(const void *)buff->pkt.data,
+			(const void *)buff->data,
 			rte_pktmbuf_data_len(buff));
 		PRINT_PACKET(dev, (uintptr_t)buff_addr,
 			rte_pktmbuf_data_len(buff), 0);
@@ -1198,7 +1198,7 @@  copy_from_mbuf_to_vring(struct virtio_net *dev,
 			 * This current segment complete, need continue to
 			 * check if the whole packet complete or not.
 			 */
-			pkt = pkt->pkt.next;
+			pkt = pkt->next;
 			if (pkt != NULL) {
 				/*
 				 * There are more segments.
@@ -1302,7 +1302,7 @@  virtio_dev_merge_rx(struct virtio_net *dev, struct rte_mbuf **pkts,
 		uint32_t secure_len = 0;
 		uint16_t need_cnt;
 		uint32_t vec_idx = 0;
-		uint32_t pkt_len = pkts[pkt_idx]->pkt.pkt_len + vq->vhost_hlen;
+		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
 		uint16_t i, id;
 
 		do {
@@ -1438,7 +1438,7 @@  link_vmdq(struct virtio_net *dev, struct rte_mbuf *m)
 	int i, ret;
 
 	/* Learn MAC address of guest device from packet */
-	pkt_hdr = (struct ether_hdr *)m->pkt.data;
+	pkt_hdr = (struct ether_hdr *)m->data;
 
 	dev_ll = ll_root_used;
 
@@ -1525,7 +1525,7 @@  virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
 	struct ether_hdr *pkt_hdr;
 	uint64_t ret = 0;
 
-	pkt_hdr = (struct ether_hdr *)m->pkt.data;
+	pkt_hdr = (struct ether_hdr *)m->data;
 
 	/*get the used devices list*/
 	dev_ll = ll_root_used;
@@ -1593,7 +1593,7 @@  virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
 	unsigned len, ret, offset = 0;
 	const uint16_t lcore_id = rte_lcore_id();
 	struct virtio_net_data_ll *dev_ll = ll_root_used;
-	struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+	struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
 
 	/*check if destination is local VM*/
 	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0))
@@ -1647,27 +1647,27 @@  virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
 		return;
 	}
 
-	mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN + offset;
-	mbuf->pkt.pkt_len = m->pkt.pkt_len + VLAN_HLEN + offset;
-	mbuf->pkt.nb_segs = m->pkt.nb_segs;
+	mbuf->data_len = m->data_len + VLAN_HLEN + offset;
+	mbuf->pkt_len = m->pkt_len + VLAN_HLEN + offset;
+	mbuf->nb_segs = m->nb_segs;
 
 	/* Copy ethernet header to mbuf. */
-	rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+	rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
 
 
 	/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
-	vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+	vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
 	vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
 	vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
 	vlan_hdr->h_vlan_TCI = htons(vlan_tag);
 
 	/* Copy the remaining packet contents to the mbuf. */
-	rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
-		(const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+	rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+		(const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
 
 	/* Copy the remaining segments for the whole packet. */
 	prev = mbuf;
-	while (m->pkt.next) {
+	while (m->next) {
 		/* Allocate an mbuf and populate the structure. */
 		struct rte_mbuf *next_mbuf = rte_pktmbuf_alloc(mbuf_pool);
 		if (unlikely(next_mbuf == NULL)) {
@@ -1677,14 +1677,14 @@  virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
 			return;
 		}
 
-		m = m->pkt.next;
-		prev->pkt.next = next_mbuf;
+		m = m->next;
+		prev->next = next_mbuf;
 		prev = next_mbuf;
-		next_mbuf->pkt.data_len = m->pkt.data_len;
+		next_mbuf->data_len = m->data_len;
 
 		/* Copy data to next mbuf. */
 		rte_memcpy(rte_pktmbuf_mtod(next_mbuf, void *),
-			rte_pktmbuf_mtod(m, const void *), m->pkt.data_len);
+			rte_pktmbuf_mtod(m, const void *), m->data_len);
 	}
 
 	tx_q->m_table[len] = mbuf;
@@ -1776,9 +1776,9 @@  virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
 		vq->used->ring[used_idx].len = 0;
 
 		/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
-		m.pkt.data_len = desc->len;
-		m.pkt.pkt_len = desc->len;
-		m.pkt.data = (void*)(uintptr_t)buff_addr;
+		m.data_len = desc->len;
+		m.pkt_len = desc->len;
+		m.data = (void*)(uintptr_t)buff_addr;
 
 		PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
 
@@ -1914,8 +1914,8 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 				 * while the virtio buffer in TX vring has
 				 * more data to be copied.
 				 */
-				cur->pkt.data_len = seg_offset;
-				m->pkt.pkt_len += seg_offset;
+				cur->data_len = seg_offset;
+				m->pkt_len += seg_offset;
 				/* Allocate mbuf and populate the structure. */
 				cur = rte_pktmbuf_alloc(mbuf_pool);
 				if (unlikely(cur == NULL)) {
@@ -1927,7 +1927,7 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 				}
 
 				seg_num++;
-				prev->pkt.next = cur;
+				prev->next = cur;
 				prev = cur;
 				seg_offset = 0;
 				seg_avail = buf_size;
@@ -1943,8 +1943,8 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 						 * room to accomodate more
 						 * data.
 						 */
-						cur->pkt.data_len = seg_offset;
-						m->pkt.pkt_len += seg_offset;
+						cur->data_len = seg_offset;
+						m->pkt_len += seg_offset;
 						/*
 						 * Allocate an mbuf and
 						 * populate the structure.
@@ -1961,7 +1961,7 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 							break;
 						}
 						seg_num++;
-						prev->pkt.next = cur;
+						prev->next = cur;
 						prev = cur;
 						seg_offset = 0;
 						seg_avail = buf_size;
@@ -1980,8 +1980,8 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 						desc->len, 0);
 				} else {
 					/* The whole packet completes. */
-					cur->pkt.data_len = seg_offset;
-					m->pkt.pkt_len += seg_offset;
+					cur->data_len = seg_offset;
+					m->pkt_len += seg_offset;
 					vb_avail = 0;
 				}
 			}
@@ -1992,7 +1992,7 @@  virtio_dev_merge_tx(struct virtio_net *dev, struct rte_mempool *mbuf_pool)
 		if (unlikely(alloc_err == 1))
 			break;
 
-		m->pkt.nb_segs = seg_num;
+		m->nb_segs = seg_num;
 
 		/*
 		 * If this is the first received packet we need to learn
@@ -2333,9 +2333,9 @@  attach_rxmbuf_zcp(struct virtio_net *dev)
 	}
 
 	mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
-	mbuf->pkt.data = (void *)(uintptr_t)(buff_addr);
+	mbuf->data = (void *)(uintptr_t)(buff_addr);
 	mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
-	mbuf->pkt.data_len = desc->len;
+	mbuf->data_len = desc->len;
 	MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
 
 	LOG_DEBUG(VHOST_DATA,
@@ -2370,9 +2370,9 @@  static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
 
 	buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
 			RTE_PKTMBUF_HEADROOM : m->buf_len;
-	m->pkt.data = (char *) m->buf_addr + buf_ofs;
+	m->data = (char *) m->buf_addr + buf_ofs;
 
-	m->pkt.data_len = 0;
+	m->data_len = 0;
 }
 
 /*
@@ -2604,7 +2604,7 @@  virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
 	unsigned len, ret, offset = 0;
 	struct vpool *vpool;
 	struct virtio_net_data_ll *dev_ll = ll_root_used;
-	struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+	struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
 	uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
 
 	/*Add packet to the port tx queue*/
@@ -2675,24 +2675,24 @@  virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
 		}
 	}
 
-	mbuf->pkt.nb_segs = m->pkt.nb_segs;
-	mbuf->pkt.next = m->pkt.next;
-	mbuf->pkt.data_len = m->pkt.data_len + offset;
-	mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+	mbuf->nb_segs = m->nb_segs;
+	mbuf->next = m->next;
+	mbuf->data_len = m->data_len + offset;
+	mbuf->pkt_len = mbuf->data_len;
 	if (unlikely(need_copy)) {
 		/* Copy the packet contents to the mbuf. */
-		rte_memcpy((void *)((uint8_t *)mbuf->pkt.data),
-			(const void *) ((uint8_t *)m->pkt.data),
-			m->pkt.data_len);
+		rte_memcpy((void *)((uint8_t *)mbuf->data),
+			(const void *) ((uint8_t *)m->data),
+			m->data_len);
 	} else {
-		mbuf->pkt.data = m->pkt.data;
+		mbuf->data = m->data;
 		mbuf->buf_physaddr = m->buf_physaddr;
 		mbuf->buf_addr = m->buf_addr;
 	}
 	mbuf->ol_flags = PKT_TX_VLAN_PKT;
-	mbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag;
-	mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-	mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+	mbuf->vlan_macip.f.vlan_tci = vlan_tag;
+	mbuf->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+	mbuf->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
 	MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
 
 	tx_q->m_table[len] = mbuf;
@@ -2701,8 +2701,8 @@  virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
 	LOG_DEBUG(VHOST_DATA,
 		"(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
 		dev->device_fh,
-		mbuf->pkt.nb_segs,
-		(mbuf->pkt.next == NULL) ? "null" : "non-null");
+		mbuf->nb_segs,
+		(mbuf->next == NULL) ? "null" : "non-null");
 
 	if (enable_stats) {
 		dev_statistics[dev->device_fh].tx_total++;
@@ -2816,11 +2816,11 @@  virtio_dev_tx_zcp(struct virtio_net *dev)
 		 * Setup dummy mbuf. This is copied to a real mbuf if
 		 * transmitted out the physical port.
 		 */
-		m.pkt.data_len = desc->len;
-		m.pkt.nb_segs = 1;
-		m.pkt.next = NULL;
-		m.pkt.data = (void *)(uintptr_t)buff_addr;
-		m.buf_addr = m.pkt.data;
+		m.data_len = desc->len;
+		m.nb_segs = 1;
+		m.next = NULL;
+		m.data = (void *)(uintptr_t)buff_addr;
+		m.buf_addr = m.data;
 		m.buf_physaddr = phys_addr;
 
 		/*
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index b275747..8162cd8 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -677,7 +677,7 @@  virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
 		vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
 
 		/* Copy mbuf data to buffer */
-		rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+		rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
 
 		res_cur_idx++;
 		packet_success++;
@@ -808,7 +808,7 @@  virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
 	struct ether_hdr *pkt_hdr;
 	uint64_t ret = 0;
 
-	pkt_hdr = (struct ether_hdr *)m->pkt.data;
+	pkt_hdr = (struct ether_hdr *)m->data;
 
 	/*get the used devices list*/
 	dev_ll = ll_root_used;
@@ -879,22 +879,22 @@  virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
 	if(!mbuf)
 		return;
 
-	mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
-	mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+	mbuf->data_len = m->data_len + VLAN_HLEN;
+	mbuf->pkt_len = mbuf->data_len;
 
 	/* Copy ethernet header to mbuf. */
-	rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+	rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
 
 
 	/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
-	vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+	vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
 	vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
 	vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
 	vlan_hdr->h_vlan_TCI = htons(vlan_tag);
 
 	/* Copy the remaining packet contents to the mbuf. */
-	rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
-		(const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+	rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+		(const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
 	tx_q->m_table[len] = mbuf;
 	len++;
 	if (enable_stats) {
@@ -980,9 +980,9 @@  virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
 		rte_prefetch0((void*)(uintptr_t)buff_addr);
 
 		/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
-		m.pkt.data_len = desc->len;
-		m.pkt.data = (void*)(uintptr_t)buff_addr;
-		m.pkt.nb_segs = 1;
+		m.data_len = desc->len;
+		m.data = (void*)(uintptr_t)buff_addr;
+		m.nb_segs = 1;
 
 		virtio_tx_route(dev, &m, mbuf_pool, 0);
 
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index 2d92e45..585ff88 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -282,7 +282,7 @@  rte_distributor_process(struct rte_distributor *d,
 			next_mb = mbufs[next_idx++];
 			next_value = (((int64_t)(uintptr_t)next_mb)
 					<< RTE_DISTRIB_FLAG_BITS);
-			new_tag = (next_mb->pkt.hash.rss | 1);
+			new_tag = (next_mb->hash.rss | 1);
 
 			uint32_t match = 0;
 			unsigned i;
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index 70be949..81ca23a 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -173,20 +173,20 @@  ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
 	struct rte_mbuf *ms;
 
 	/* adjust start of the last fragment data. */
-	rte_pktmbuf_adj(mp, (uint16_t)(mp->pkt.vlan_macip.f.l2_len +
-		mp->pkt.vlan_macip.f.l3_len));
+	rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
+		mp->vlan_macip.f.l3_len));
 
 	/* chain two fragments. */
 	ms = rte_pktmbuf_lastseg(mn);
-	ms->pkt.next = mp;
+	ms->next = mp;
 
 	/* accumulate number of segments and total length. */
-	mn->pkt.nb_segs = (uint8_t)(mn->pkt.nb_segs + mp->pkt.nb_segs);
-	mn->pkt.pkt_len += mp->pkt.pkt_len;
+	mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
+	mn->pkt_len += mp->pkt_len;
 
 	/* reset pkt_len and nb_segs for chained fragment. */
-	mp->pkt.pkt_len = mp->pkt.data_len;
-	mp->pkt.nb_segs = 1;
+	mp->pkt_len = mp->data_len;
+	mp->nb_segs = 1;
 }
 
 
diff --git a/lib/librte_ip_frag/rte_ipv4_fragmentation.c b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
index 9d4e1f7..0b10310 100644
--- a/lib/librte_ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
@@ -109,7 +109,7 @@  rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 	/* Fragment size should be a multiply of 8. */
 	IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
 
-	in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
+	in_hdr = (struct ipv4_hdr *) pkt_in->data;
 	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
 
 	/* If Don't Fragment flag is set */
@@ -118,7 +118,7 @@  rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 
 	/* Check that pkts_out is big enough to hold all fragments */
 	if (unlikely(frag_size * nb_pkts_out <
-	    (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
+	    (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
 		return -EINVAL;
 
 	in_seg = pkt_in;
@@ -140,8 +140,8 @@  rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 		}
 
 		/* Reserve space for the IP header that will be built later */
-		out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
-		out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
+		out_pkt->data_len = sizeof(struct ipv4_hdr);
+		out_pkt->pkt_len = sizeof(struct ipv4_hdr);
 
 		out_seg_prev = out_pkt;
 		more_out_segs = 1;
@@ -156,29 +156,29 @@  rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 				__free_fragments(pkts_out, out_pkt_pos);
 				return -ENOMEM;
 			}
-			out_seg_prev->pkt.next = out_seg;
+			out_seg_prev->next = out_seg;
 			out_seg_prev = out_seg;
 
 			/* Prepare indirect buffer */
 			rte_pktmbuf_attach(out_seg, in_seg);
-			len = mtu_size - out_pkt->pkt.pkt_len;
-			if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
-				len = in_seg->pkt.data_len - in_seg_data_pos;
+			len = mtu_size - out_pkt->pkt_len;
+			if (len > (in_seg->data_len - in_seg_data_pos)) {
+				len = in_seg->data_len - in_seg_data_pos;
 			}
-			out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos;
-			out_seg->pkt.data_len = (uint16_t)len;
-			out_pkt->pkt.pkt_len = (uint16_t)(len +
-			    out_pkt->pkt.pkt_len);
-			out_pkt->pkt.nb_segs += 1;
+			out_seg->data = (char*) in_seg->data + (uint16_t)in_seg_data_pos;
+			out_seg->data_len = (uint16_t)len;
+			out_pkt->pkt_len = (uint16_t)(len +
+			    out_pkt->pkt_len);
+			out_pkt->nb_segs += 1;
 			in_seg_data_pos += len;
 
 			/* Current output packet (i.e. fragment) done ? */
-			if (unlikely(out_pkt->pkt.pkt_len >= mtu_size))
+			if (unlikely(out_pkt->pkt_len >= mtu_size))
 				more_out_segs = 0;
 
 			/* Current input segment done ? */
-			if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
-				in_seg = in_seg->pkt.next;
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
 				in_seg_data_pos = 0;
 
 				if (unlikely(in_seg == NULL))
@@ -188,17 +188,17 @@  rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 
 		/* Build the IP header */
 
-		out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
+		out_hdr = (struct ipv4_hdr*) out_pkt->data;
 
 		__fill_ipv4hdr_frag(out_hdr, in_hdr,
-		    (uint16_t)out_pkt->pkt.pkt_len,
+		    (uint16_t)out_pkt->pkt_len,
 		    flag_offset, fragment_offset, more_in_segs);
 
 		fragment_offset = (uint16_t)(fragment_offset +
-		    out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
+		    out_pkt->pkt_len - sizeof(struct ipv4_hdr));
 
 		out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
-		out_pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+		out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
 
 		/* Write the fragment to the output list */
 		pkts_out[out_pkt_pos] = out_pkt;
diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c
index a27b23a..06c37af 100644
--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c
@@ -87,10 +87,10 @@  ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
 
 	/* update ipv4 header for the reassmebled packet */
 	ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
-		m->pkt.vlan_macip.f.l2_len);
+		m->vlan_macip.f.l2_len);
 
 	ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
-		m->pkt.vlan_macip.f.l3_len));
+		m->vlan_macip.f.l3_len));
 	ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
 		rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
 	ip_hdr->hdr_checksum = 0;
@@ -137,7 +137,7 @@  rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
 
 	ip_ofs *= IPV4_HDR_OFFSET_UNITS;
 	ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
-		mb->pkt.vlan_macip.f.l3_len);
+		mb->vlan_macip.f.l3_len);
 
 	IP_FRAG_LOG(DEBUG, "%s:%d:\n"
 		"mbuf: %p, tms: %" PRIu64
diff --git a/lib/librte_ip_frag/rte_ipv6_fragmentation.c b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
index fa04991..e007662 100644
--- a/lib/librte_ip_frag/rte_ipv6_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
@@ -122,10 +122,10 @@  rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
 
 	/* Check that pkts_out is big enough to hold all fragments */
 	if (unlikely (frag_size * nb_pkts_out <
-	    (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv6_hdr))))
+	    (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
 		return (-EINVAL);
 
-	in_hdr = (struct ipv6_hdr *) pkt_in->pkt.data;
+	in_hdr = (struct ipv6_hdr *) pkt_in->data;
 
 	in_seg = pkt_in;
 	in_seg_data_pos = sizeof(struct ipv6_hdr);
@@ -146,8 +146,8 @@  rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
 		}
 
 		/* Reserve space for the IP header that will be built later */
-		out_pkt->pkt.data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
-		out_pkt->pkt.pkt_len  = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+		out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+		out_pkt->pkt_len  = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
 
 		out_seg_prev = out_pkt;
 		more_out_segs = 1;
@@ -162,30 +162,30 @@  rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
 				__free_fragments(pkts_out, out_pkt_pos);
 				return (-ENOMEM);
 			}
-			out_seg_prev->pkt.next = out_seg;
+			out_seg_prev->next = out_seg;
 			out_seg_prev = out_seg;
 
 			/* Prepare indirect buffer */
 			rte_pktmbuf_attach(out_seg, in_seg);
-			len = mtu_size - out_pkt->pkt.pkt_len;
-			if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
-				len = in_seg->pkt.data_len - in_seg_data_pos;
+			len = mtu_size - out_pkt->pkt_len;
+			if (len > (in_seg->data_len - in_seg_data_pos)) {
+				len = in_seg->data_len - in_seg_data_pos;
 			}
-			out_seg->pkt.data = (char *) in_seg->pkt.data + (uint16_t) in_seg_data_pos;
-			out_seg->pkt.data_len = (uint16_t)len;
-			out_pkt->pkt.pkt_len = (uint16_t)(len +
-			    out_pkt->pkt.pkt_len);
-			out_pkt->pkt.nb_segs += 1;
+			out_seg->data = (char *) in_seg->data + (uint16_t) in_seg_data_pos;
+			out_seg->data_len = (uint16_t)len;
+			out_pkt->pkt_len = (uint16_t)(len +
+			    out_pkt->pkt_len);
+			out_pkt->nb_segs += 1;
 			in_seg_data_pos += len;
 
 			/* Current output packet (i.e. fragment) done ? */
-			if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+			if (unlikely(out_pkt->pkt_len >= mtu_size)) {
 				more_out_segs = 0;
 			}
 
 			/* Current input segment done ? */
-			if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
-				in_seg = in_seg->pkt.next;
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
 				in_seg_data_pos = 0;
 
 				if (unlikely(in_seg == NULL)) {
@@ -196,14 +196,14 @@  rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
 
 		/* Build the IP header */
 
-		out_hdr = (struct ipv6_hdr *) out_pkt->pkt.data;
+		out_hdr = (struct ipv6_hdr *) out_pkt->data;
 
 		__fill_ipv6hdr_frag(out_hdr, in_hdr,
-		    (uint16_t) out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr),
+		    (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
 		    fragment_offset, more_in_segs);
 
 		fragment_offset = (uint16_t)(fragment_offset +
-		    out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr)
+		    out_pkt->pkt_len - sizeof(struct ipv6_hdr)
 			- sizeof(struct ipv6_extension_fragment));
 
 		/* Write the fragment to the output list */
diff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c
index 3f06960..dee3425 100644
--- a/lib/librte_ip_frag/rte_ipv6_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c
@@ -109,7 +109,7 @@  ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
 
 	/* update ipv6 header for the reassembled datagram */
 	ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
-								  m->pkt.vlan_macip.f.l2_len);
+								  m->vlan_macip.f.l2_len);
 
 	ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
 
@@ -120,7 +120,7 @@  ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
 	 * other headers, so we assume there are no other headers and thus update
 	 * the main IPv6 header instead.
 	 */
-	move_len = m->pkt.vlan_macip.f.l2_len + m->pkt.vlan_macip.f.l3_len -
+	move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
 			sizeof(*frag_hdr);
 	frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
 	ip_hdr->proto = frag_hdr->next_header;
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 3e72a70..d12a0bf 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -117,12 +117,12 @@  rte_pktmbuf_init(struct rte_mempool *mp,
 	m->buf_len = (uint16_t)buf_len;
 
 	/* keep some headroom between start of buffer and data */
-	m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
+	m->data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
 
 	/* init some constant fields */
 	m->pool = mp;
-	m->pkt.nb_segs = 1;
-	m->pkt.in_port = 0xff;
+	m->nb_segs = 1;
+	m->in_port = 0xff;
 }
 
 /* do some sanity checks on a mbuf: panic if it fails */
@@ -153,10 +153,10 @@  rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
 	if (is_header == 0)
 		return;
 
-	nb_segs = m->pkt.nb_segs;
+	nb_segs = m->nb_segs;
 	m_seg = m;
 	while (m_seg && nb_segs != 0) {
-		m_seg = m_seg->pkt.next;
+		m_seg = m_seg->next;
 		nb_segs--;
 	}
 	if (nb_segs != 0)
@@ -175,22 +175,22 @@  rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
 	fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
 	       m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
 	fprintf(f, "  pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
-	       "in_port=%u\n", m->pkt.pkt_len, m->ol_flags,
-	       (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port);
-	nb_segs = m->pkt.nb_segs;
+	       "in_port=%u\n", m->pkt_len, m->ol_flags,
+	       (unsigned)m->nb_segs, (unsigned)m->in_port);
+	nb_segs = m->nb_segs;
 
 	while (m && nb_segs != 0) {
 		__rte_mbuf_sanity_check(m, 0);
 
 		fprintf(f, "  segment at 0x%p, data=0x%p, data_len=%u\n",
-		       m, m->pkt.data, (unsigned)m->pkt.data_len);
+		       m, m->data, (unsigned)m->data_len);
 		len = dump_len;
-		if (len > m->pkt.data_len)
-			len = m->pkt.data_len;
+		if (len > m->data_len)
+			len = m->data_len;
 		if (len != 0)
-			rte_hexdump(f, NULL, m->pkt.data, len);
+			rte_hexdump(f, NULL, m->data, len);
 		dump_len -= len;
-		m = m->pkt.next;
+		m = m->next;
 		nb_segs --;
 	}
 }
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index a0ae2e9..d66b9bd 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -133,32 +133,6 @@  union rte_vlan_macip {
 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
 
 /**
- * A packet message buffer.
- */
-struct rte_pktmbuf {
-	/* valid for any segment */
-	struct rte_mbuf *next;  /**< Next segment of scattered packet. */
-	void* data;             /**< Start address of data in segment buffer. */
-	uint16_t data_len;      /**< Amount of data in segment buffer. */
-
-	/* these fields are valid for first segment only */
-	uint8_t nb_segs;        /**< Number of segments. */
-	uint8_t in_port;        /**< Input port. */
-	uint32_t pkt_len;       /**< Total pkt len: sum of all segment data_len. */
-
-	/* offload features */
-	union rte_vlan_macip vlan_macip;
-	union {
-		uint32_t rss;       /**< RSS hash result if RSS enabled */
-		struct {
-			uint16_t hash;
-			uint16_t id;
-		} fdir;             /**< Filter identifier if FDIR enabled */
-		uint32_t sched;     /**< Hierarchical scheduler */
-	} hash;                 /**< hash information */
-};
-
-/**
  * The generic rte_mbuf, containing a packet mbuf.
  */
 struct rte_mbuf {
@@ -185,7 +159,26 @@  struct rte_mbuf {
 	uint16_t reserved;            /**< Unused field. Required for padding */
 	uint16_t ol_flags;            /**< Offload features. */
 
-	struct rte_pktmbuf pkt;
+	/* valid for any segment */
+	struct rte_mbuf *next;  /**< Next segment of scattered packet. */
+	void* data;             /**< Start address of data in segment buffer. */
+	uint16_t data_len;      /**< Amount of data in segment buffer. */
+
+	/* these fields are valid for first segment only */
+	uint8_t nb_segs;        /**< Number of segments. */
+	uint8_t in_port;        /**< Input port. */
+	uint32_t pkt_len;       /**< Total pkt len: sum of all segment data_len. */
+
+	/* offload features, valid for first segment only */
+	union rte_vlan_macip vlan_macip;
+	union {
+		uint32_t rss;       /**< RSS hash result if RSS enabled */
+		struct {
+			uint16_t hash;
+			uint16_t id;
+		} fdir;             /**< Filter identifier if FDIR enabled */
+		uint32_t sched;     /**< Hierarchical scheduler */
+	} hash;                 /**< hash information */
 
 	union {
 		uint8_t metadata[0];
@@ -478,7 +471,7 @@  void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
  * @param m
  *   The control mbuf.
  */
-#define rte_ctrlmbuf_data(m) ((m)->pkt.data)
+#define rte_ctrlmbuf_data(m) ((m)->data)
 
 /**
  * A macro that returns the length of the carried data.
@@ -545,18 +538,18 @@  static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
 {
 	uint32_t buf_ofs;
 
-	m->pkt.next = NULL;
-	m->pkt.pkt_len = 0;
-	m->pkt.vlan_macip.data = 0;
-	m->pkt.nb_segs = 1;
-	m->pkt.in_port = 0xff;
+	m->next = NULL;
+	m->pkt_len = 0;
+	m->vlan_macip.data = 0;
+	m->nb_segs = 1;
+	m->in_port = 0xff;
 
 	m->ol_flags = 0;
 	buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
 			RTE_PKTMBUF_HEADROOM : m->buf_len;
-	m->pkt.data = (char*) m->buf_addr + buf_ofs;
+	m->data = (char*) m->buf_addr + buf_ofs;
 
-	m->pkt.data_len = 0;
+	m->data_len = 0;
 	__rte_mbuf_sanity_check(m, 1);
 }
 
@@ -610,11 +603,16 @@  static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
 	mi->buf_addr = md->buf_addr;
 	mi->buf_len = md->buf_len;
 
-	mi->pkt = md->pkt;
+	mi->next = md->next;
+	mi->data = md->data;
+	mi->data_len = md->data_len;
+	mi->in_port = md->in_port;
+	mi->vlan_macip = md->vlan_macip;
+	mi->hash = md->hash;
 
-	mi->pkt.next = NULL;
-	mi->pkt.pkt_len = mi->pkt.data_len;
-	mi->pkt.nb_segs = 1;
+	mi->next = NULL;
+	mi->pkt_len = mi->data_len;
+	mi->nb_segs = 1;
 	mi->ol_flags = md->ol_flags;
 
 	__rte_mbuf_sanity_check(mi, 1);
@@ -644,9 +642,9 @@  static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
 
 	buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
 			RTE_PKTMBUF_HEADROOM : m->buf_len;
-	m->pkt.data = (char*) m->buf_addr + buf_ofs;
+	m->data = (char*) m->buf_addr + buf_ofs;
 
-	m->pkt.data_len = 0;
+	m->data_len = 0;
 }
 
 #endif /* RTE_MBUF_REFCNT */
@@ -713,7 +711,7 @@  static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 	__rte_mbuf_sanity_check(m, 1);
 
 	while (m != NULL) {
-		m_next = m->pkt.next;
+		m_next = m->next;
 		rte_pktmbuf_free_seg(m);
 		m = m_next;
 	}
@@ -749,21 +747,21 @@  static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
 		return (NULL);
 
 	mi = mc;
-	prev = &mi->pkt.next;
-	pktlen = md->pkt.pkt_len;
+	prev = &mi->next;
+	pktlen = md->pkt_len;
 	nseg = 0;
 
 	do {
 		nseg++;
 		rte_pktmbuf_attach(mi, md);
 		*prev = mi;
-		prev = &mi->pkt.next;
-	} while ((md = md->pkt.next) != NULL &&
+		prev = &mi->next;
+	} while ((md = md->next) != NULL &&
 	    (mi = rte_pktmbuf_alloc(mp)) != NULL);
 
 	*prev = NULL;
-	mc->pkt.nb_segs = nseg;
-	mc->pkt.pkt_len = pktlen;
+	mc->nb_segs = nseg;
+	mc->pkt_len = pktlen;
 
 	/* Allocation of new indirect segment failed */
 	if (unlikely (mi == NULL)) {
@@ -792,7 +790,7 @@  static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
 
 	do {
 		rte_mbuf_refcnt_update(m, v);
-	} while ((m = m->pkt.next) != NULL);
+	} while ((m = m->next) != NULL);
 }
 
 #endif /* RTE_MBUF_REFCNT */
@@ -808,7 +806,7 @@  static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
 {
 	__rte_mbuf_sanity_check(m, 1);
-	return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr);
+	return (uint16_t) ((char*) m->data - (char*) m->buf_addr);
 }
 
 /**
@@ -823,7 +821,7 @@  static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
 {
 	__rte_mbuf_sanity_check(m, 1);
 	return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
-			  m->pkt.data_len);
+			  m->data_len);
 }
 
 /**
@@ -839,8 +837,8 @@  static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
 	struct rte_mbuf *m2 = (struct rte_mbuf *)m;
 
 	__rte_mbuf_sanity_check(m, 1);
-	while (m2->pkt.next != NULL)
-		m2 = m2->pkt.next;
+	while (m2->next != NULL)
+		m2 = m2->next;
 	return m2;
 }
 
@@ -856,7 +854,7 @@  static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
  * @param t
  *   The type to cast the result into.
  */
-#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data))
+#define rte_pktmbuf_mtod(m, t) ((t)((m)->data))
 
 /**
  * A macro that returns the length of the packet.
@@ -866,7 +864,7 @@  static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
  * @param m
  *   The packet mbuf.
  */
-#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len)
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
 
 /**
  * A macro that returns the length of the segment.
@@ -876,7 +874,7 @@  static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
  * @param m
  *   The packet mbuf.
  */
-#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len)
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
 
 /**
  * Prepend len bytes to an mbuf data area.
@@ -901,11 +899,11 @@  static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
 	if (unlikely(len > rte_pktmbuf_headroom(m)))
 		return NULL;
 
-	m->pkt.data = (char*) m->pkt.data - len;
-	m->pkt.data_len = (uint16_t)(m->pkt.data_len + len);
-	m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+	m->data = (char*) m->data - len;
+	m->data_len = (uint16_t)(m->data_len + len);
+	m->pkt_len  = (m->pkt_len + len);
 
-	return (char*) m->pkt.data;
+	return (char*) m->data;
 }
 
 /**
@@ -934,9 +932,9 @@  static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
 	if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
 		return NULL;
 
-	tail = (char*) m_last->pkt.data + m_last->pkt.data_len;
-	m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len);
-	m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+	tail = (char*) m_last->data + m_last->data_len;
+	m_last->data_len = (uint16_t)(m_last->data_len + len);
+	m->pkt_len  = (m->pkt_len + len);
 	return (char*) tail;
 }
 
@@ -958,13 +956,13 @@  static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
 {
 	__rte_mbuf_sanity_check(m, 1);
 
-	if (unlikely(len > m->pkt.data_len))
+	if (unlikely(len > m->data_len))
 		return NULL;
 
-	m->pkt.data_len = (uint16_t)(m->pkt.data_len - len);
-	m->pkt.data = ((char*) m->pkt.data + len);
-	m->pkt.pkt_len  = (m->pkt.pkt_len - len);
-	return (char*) m->pkt.data;
+	m->data_len = (uint16_t)(m->data_len - len);
+	m->data = ((char*) m->data + len);
+	m->pkt_len  = (m->pkt_len - len);
+	return (char*) m->data;
 }
 
 /**
@@ -988,11 +986,11 @@  static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
 	__rte_mbuf_sanity_check(m, 1);
 
 	m_last = rte_pktmbuf_lastseg(m);
-	if (unlikely(len > m_last->pkt.data_len))
+	if (unlikely(len > m_last->data_len))
 		return -1;
 
-	m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len);
-	m->pkt.pkt_len  = (m->pkt.pkt_len - len);
+	m_last->data_len = (uint16_t)(m_last->data_len - len);
+	m->pkt_len  = (m->pkt_len - len);
 	return 0;
 }
 
@@ -1008,7 +1006,7 @@  static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
 {
 	__rte_mbuf_sanity_check(m, 1);
-	return !!(m->pkt.nb_segs == 1);
+	return !!(m->nb_segs == 1);
 }
 
 /**
diff --git a/lib/librte_pmd_bond/rte_eth_bond_pmd.c b/lib/librte_pmd_bond/rte_eth_bond_pmd.c
index d72d6ed..5979ce5 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_pmd.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_pmd.c
@@ -198,14 +198,14 @@  xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
 
 	switch (policy) {
 	case BALANCE_XMIT_POLICY_LAYER2:
-		eth_hdr = (struct ether_hdr *)buf->pkt.data;
+		eth_hdr = (struct ether_hdr *)buf->data;
 
 		hash = ether_hash(eth_hdr);
 		hash ^= hash >> 8;
 		return hash % slave_count;
 
 	case BALANCE_XMIT_POLICY_LAYER23:
-		eth_hdr = (struct ether_hdr *)buf->pkt.data;
+		eth_hdr = (struct ether_hdr *)buf->data;
 
 		if (buf->ol_flags & PKT_RX_VLAN_PKT)
 			eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 3304f50..058e1bd 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -91,7 +91,7 @@  rte_rxmbuf_alloc(struct rte_mempool *mp)
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb)             \
 	(uint64_t) ((mb)->buf_physaddr +       \
-	(uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
+	(uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
 	(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@ -421,7 +421,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
 							PKT_TX_L4_MASK));
 		if (tx_ol_req) {
-			hdrlen = tx_pkt->pkt.vlan_macip;
+			hdrlen = tx_pkt->vlan_macip;
 			/* If new context to be built or reuse the exist ctx. */
 			ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
 
@@ -434,7 +434,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * This will always be the number of segments + the number of
 		 * Context descriptors required to transmit the packet
 		 */
-		nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
 
 		/*
 		 * The number of descriptors that must be allocated for a
@@ -454,7 +454,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			" tx_first=%u tx_last=%u\n",
 			(unsigned) txq->port_id,
 			(unsigned) txq->queue_id,
-			(unsigned) tx_pkt->pkt.pkt_len,
+			(unsigned) tx_pkt->pkt_len,
 			(unsigned) tx_id,
 			(unsigned) tx_last);
 
@@ -516,7 +516,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* Set VLAN Tag offload fields. */
 		if (ol_flags & PKT_TX_VLAN_PKT) {
 			cmd_type_len |= E1000_TXD_CMD_VLE;
-			popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+			popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
 				E1000_TXD_VLAN_SHIFT;
 		}
 
@@ -566,7 +566,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/*
 			 * Set up Transmit Data Descriptor.
 			 */
-			slen = m_seg->pkt.data_len;
+			slen = m_seg->data_len;
 			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
 
 			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
@@ -576,7 +576,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			txe->last_id = tx_last;
 			tx_id = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->pkt.next;
+			m_seg = m_seg->next;
 		} while (m_seg != NULL);
 
 		/*
@@ -771,20 +771,20 @@  eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
 				rxq->crc_len);
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rte_packet_prefetch(rxm->pkt.data);
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = pkt_len;
-		rxm->pkt.data_len = pkt_len;
-		rxm->pkt.in_port = rxq->port_id;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch(rxm->data);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->in_port = rxq->port_id;
 
 		rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
 		rxm->ol_flags = (uint16_t)(rxm->ol_flags |
 				rx_desc_error_to_pkt_flags(rxd.errors));
 
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+		rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
 
 		/*
 		 * Store the mbuf address into the next entry of the array
@@ -940,8 +940,8 @@  eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * Set data length & data buffer address of mbuf.
 		 */
 		data_len = rte_le_to_cpu_16(rxd.length);
-		rxm->pkt.data_len = data_len;
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->data_len = data_len;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
 		/*
 		 * If this is the first buffer of the received packet,
@@ -953,12 +953,12 @@  eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		if (first_seg == NULL) {
 			first_seg = rxm;
-			first_seg->pkt.pkt_len = data_len;
-			first_seg->pkt.nb_segs = 1;
+			first_seg->pkt_len = data_len;
+			first_seg->nb_segs = 1;
 		} else {
-			first_seg->pkt.pkt_len += data_len;
-			first_seg->pkt.nb_segs++;
-			last_seg->pkt.next = rxm;
+			first_seg->pkt_len += data_len;
+			first_seg->nb_segs++;
+			last_seg->next = rxm;
 		}
 
 		/*
@@ -981,18 +981,18 @@  eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *     mbuf, subtract the length of that CRC part from the
 		 *     data length of the previous mbuf.
 		 */
-		rxm->pkt.next = NULL;
+		rxm->next = NULL;
 		if (unlikely(rxq->crc_len > 0)) {
-			first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+			first_seg->pkt_len -= ETHER_CRC_LEN;
 			if (data_len <= ETHER_CRC_LEN) {
 				rte_pktmbuf_free_seg(rxm);
-				first_seg->pkt.nb_segs--;
-				last_seg->pkt.data_len = (uint16_t)
-					(last_seg->pkt.data_len -
+				first_seg->nb_segs--;
+				last_seg->data_len = (uint16_t)
+					(last_seg->data_len -
 					 (ETHER_CRC_LEN - data_len));
-				last_seg->pkt.next = NULL;
+				last_seg->next = NULL;
 			} else
-				rxm->pkt.data_len =
+				rxm->data_len =
 					(uint16_t) (data_len - ETHER_CRC_LEN);
 		}
 
@@ -1003,17 +1003,17 @@  eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *      - IP checksum flag,
 		 *      - error flags.
 		 */
-		first_seg->pkt.in_port = rxq->port_id;
+		first_seg->in_port = rxq->port_id;
 
 		first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
 		first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
 					rx_desc_error_to_pkt_flags(rxd.errors));
 
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+		rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
 
 		/* Prefetch data of first segment, if configured to do so. */
-		rte_packet_prefetch(first_seg->pkt.data);
+		rte_packet_prefetch(first_seg->data);
 
 		/*
 		 * Store the mbuf address into the next entry of the array
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index b0112be..99bb9d9 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -96,7 +96,7 @@  rte_rxmbuf_alloc(struct rte_mempool *mp)
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
 	(uint64_t) ((mb)->buf_physaddr +		   \
-			(uint64_t) ((char *)((mb)->pkt.data) -     \
+			(uint64_t) ((char *)((mb)->data) -     \
 				(char *)(mb)->buf_addr))
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -365,7 +365,7 @@  eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		tx_pkt = *tx_pkts++;
-		pkt_len = tx_pkt->pkt.pkt_len;
+		pkt_len = tx_pkt->pkt_len;
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
@@ -377,10 +377,10 @@  eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * for the packet, starting from the current position (tx_id)
 		 * in the ring.
 		 */
-		tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+		tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
 
 		ol_flags = tx_pkt->ol_flags;
-		vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+		vlan_macip_lens = tx_pkt->vlan_macip.data;
 		tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
 
 		/* If a Context Descriptor need be built . */
@@ -527,7 +527,7 @@  eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/*
 			 * Set up transmit descriptor.
 			 */
-			slen = (uint16_t) m_seg->pkt.data_len;
+			slen = (uint16_t) m_seg->data_len;
 			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
 			txd->read.buffer_addr =
 				rte_cpu_to_le_64(buf_dma_addr);
@@ -538,7 +538,7 @@  eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			txe->last_id = tx_last;
 			tx_id = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->pkt.next;
+			m_seg = m_seg->next;
 		} while (m_seg != NULL);
 
 		/*
@@ -753,18 +753,18 @@  eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
 				      rxq->crc_len);
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rte_packet_prefetch(rxm->pkt.data);
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = pkt_len;
-		rxm->pkt.data_len = pkt_len;
-		rxm->pkt.in_port = rxq->port_id;
-
-		rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch(rxm->data);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->in_port = rxq->port_id;
+
+		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->pkt.vlan_macip.f.vlan_tci =
+		rxm->vlan_macip.f.vlan_tci =
 			rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -929,8 +929,8 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * Set data length & data buffer address of mbuf.
 		 */
 		data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-		rxm->pkt.data_len = data_len;
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->data_len = data_len;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
 		/*
 		 * If this is the first buffer of the received packet,
@@ -942,12 +942,12 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		if (first_seg == NULL) {
 			first_seg = rxm;
-			first_seg->pkt.pkt_len = data_len;
-			first_seg->pkt.nb_segs = 1;
+			first_seg->pkt_len = data_len;
+			first_seg->nb_segs = 1;
 		} else {
-			first_seg->pkt.pkt_len += data_len;
-			first_seg->pkt.nb_segs++;
-			last_seg->pkt.next = rxm;
+			first_seg->pkt_len += data_len;
+			first_seg->nb_segs++;
+			last_seg->next = rxm;
 		}
 
 		/*
@@ -970,18 +970,18 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *     mbuf, subtract the length of that CRC part from the
 		 *     data length of the previous mbuf.
 		 */
-		rxm->pkt.next = NULL;
+		rxm->next = NULL;
 		if (unlikely(rxq->crc_len > 0)) {
-			first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+			first_seg->pkt_len -= ETHER_CRC_LEN;
 			if (data_len <= ETHER_CRC_LEN) {
 				rte_pktmbuf_free_seg(rxm);
-				first_seg->pkt.nb_segs--;
-				last_seg->pkt.data_len = (uint16_t)
-					(last_seg->pkt.data_len -
+				first_seg->nb_segs--;
+				last_seg->data_len = (uint16_t)
+					(last_seg->data_len -
 					 (ETHER_CRC_LEN - data_len));
-				last_seg->pkt.next = NULL;
+				last_seg->next = NULL;
 			} else
-				rxm->pkt.data_len =
+				rxm->data_len =
 					(uint16_t) (data_len - ETHER_CRC_LEN);
 		}
 
@@ -994,14 +994,14 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *      - VLAN TCI, if any,
 		 *      - error flags.
 		 */
-		first_seg->pkt.in_port = rxq->port_id;
-		first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+		first_seg->in_port = rxq->port_id;
+		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
 
 		/*
 		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
 		 * set in the pkt_flags field.
 		 */
-		first_seg->pkt.vlan_macip.f.vlan_tci =
+		first_seg->vlan_macip.f.vlan_tci =
 			rte_le_to_cpu_16(rxd.wb.upper.vlan);
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1012,7 +1012,7 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = pkt_flags;
 
 		/* Prefetch data of first segment, if configured to do so. */
-		rte_packet_prefetch(first_seg->pkt.data);
+		rte_packet_prefetch(first_seg->data);
 
 		/*
 		 * Store the mbuf address into the next entry of the array
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index e1b3e2b..08a42ac 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -79,7 +79,7 @@ 
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
 	((uint64_t)((mb)->buf_physaddr + \
-	(uint64_t)((char *)((mb)->pkt.data) - \
+	(uint64_t)((char *)((mb)->data) - \
 	(char *)(mb)->buf_addr)))
 
 static const struct rte_memzone *
@@ -611,9 +611,9 @@  i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
 						I40E_RXD_QW1_STATUS_SHIFT;
 			pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
-			mb->pkt.data_len = pkt_len;
-			mb->pkt.pkt_len = pkt_len;
-			mb->pkt.vlan_macip.f.vlan_tci = rx_status &
+			mb->data_len = pkt_len;
+			mb->pkt_len = pkt_len;
+			mb->vlan_macip.f.vlan_tci = rx_status &
 				(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
 			rte_le_to_cpu_16(\
 				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
@@ -622,7 +622,7 @@  i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
 			pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
 			mb->ol_flags = pkt_flags;
 			if (pkt_flags & PKT_RX_RSS_HASH)
-				mb->pkt.hash.rss = rte_le_to_cpu_32(\
+				mb->hash.rss = rte_le_to_cpu_32(\
 					rxdp->wb.qword0.hi_dword.rss);
 		}
 
@@ -684,10 +684,10 @@  i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
 	for (i = 0; i < rxq->rx_free_thresh; i++) {
 		mb = rxep[i].mbuf;
 		rte_mbuf_refcnt_set(mb, 1);
-		mb->pkt.next = NULL;
-		mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-		mb->pkt.nb_segs = 1;
-		mb->pkt.in_port = rxq->port_id;
+		mb->next = NULL;
+		mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+		mb->nb_segs = 1;
+		mb->in_port = rxq->port_id;
 		dma_addr = rte_cpu_to_le_64(\
 			RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
 		rxdp[i].read.hdr_addr = dma_addr;
@@ -842,15 +842,15 @@  i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
 
-		rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rte_prefetch0(rxm->pkt.data);
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = rx_packet_len;
-		rxm->pkt.data_len = rx_packet_len;
-		rxm->pkt.in_port = rxq->port_id;
+		rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rte_prefetch0(rxm->data);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = rx_packet_len;
+		rxm->data_len = rx_packet_len;
+		rxm->in_port = rxq->port_id;
 
-		rxm->pkt.vlan_macip.f.vlan_tci = rx_status &
+		rxm->vlan_macip.f.vlan_tci = rx_status &
 			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
 			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
 		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -858,7 +858,7 @@  i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
 		rxm->ol_flags = pkt_flags;
 		if (pkt_flags & PKT_RX_RSS_HASH)
-			rxm->pkt.hash.rss =
+			rxm->hash.rss =
 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
 
 		rx_pkts[nb_rx++] = rxm;
@@ -945,8 +945,8 @@  i40e_recv_scattered_pkts(void *rx_queue,
 		rxdp->read.pkt_addr = dma_addr;
 		rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
 					I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-		rxm->pkt.data_len = rx_packet_len;
-		rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->data_len = rx_packet_len;
+		rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
 		/**
 		 * If this is the first buffer of the received packet, set the
@@ -957,14 +957,14 @@  i40e_recv_scattered_pkts(void *rx_queue,
 		 */
 		if (!first_seg) {
 			first_seg = rxm;
-			first_seg->pkt.nb_segs = 1;
-			first_seg->pkt.pkt_len = rx_packet_len;
+			first_seg->nb_segs = 1;
+			first_seg->pkt_len = rx_packet_len;
 		} else {
-			first_seg->pkt.pkt_len =
-				(uint16_t)(first_seg->pkt.pkt_len +
+			first_seg->pkt_len =
+				(uint16_t)(first_seg->pkt_len +
 						rx_packet_len);
-			first_seg->pkt.nb_segs++;
-			last_seg->pkt.next = rxm;
+			first_seg->nb_segs++;
+			last_seg->next = rxm;
 		}
 
 		/**
@@ -987,23 +987,23 @@  i40e_recv_scattered_pkts(void *rx_queue,
 		 *  the length of that CRC part from the data length of the
 		 *  previous mbuf.
 		 */
-		rxm->pkt.next = NULL;
+		rxm->next = NULL;
 		if (unlikely(rxq->crc_len > 0)) {
-			first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+			first_seg->pkt_len -= ETHER_CRC_LEN;
 			if (rx_packet_len <= ETHER_CRC_LEN) {
 				rte_pktmbuf_free_seg(rxm);
-				first_seg->pkt.nb_segs--;
-				last_seg->pkt.data_len =
-					(uint16_t)(last_seg->pkt.data_len -
+				first_seg->nb_segs--;
+				last_seg->data_len =
+					(uint16_t)(last_seg->data_len -
 					(ETHER_CRC_LEN - rx_packet_len));
-				last_seg->pkt.next = NULL;
+				last_seg->next = NULL;
 			} else
-				rxm->pkt.data_len = (uint16_t)(rx_packet_len -
+				rxm->data_len = (uint16_t)(rx_packet_len -
 								ETHER_CRC_LEN);
 		}
 
-		first_seg->pkt.in_port = rxq->port_id;
-		first_seg->pkt.vlan_macip.f.vlan_tci = (rx_status &
+		first_seg->in_port = rxq->port_id;
+		first_seg->vlan_macip.f.vlan_tci = (rx_status &
 			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
 			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
 		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1011,11 +1011,11 @@  i40e_recv_scattered_pkts(void *rx_queue,
 		pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
 		first_seg->ol_flags = pkt_flags;
 		if (pkt_flags & PKT_RX_RSS_HASH)
-			rxm->pkt.hash.rss =
+			rxm->hash.rss =
 				rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
 
 		/* Prefetch data of first segment, if configured to do so. */
-		rte_prefetch0(first_seg->pkt.data);
+		rte_prefetch0(first_seg->data);
 		rx_pkts[nb_rx++] = first_seg;
 		first_seg = NULL;
 	}
@@ -1105,8 +1105,8 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
 		ol_flags = tx_pkt->ol_flags;
-		l2_len = tx_pkt->pkt.vlan_macip.f.l2_len;
-		l3_len = tx_pkt->pkt.vlan_macip.f.l3_len;
+		l2_len = tx_pkt->vlan_macip.f.l2_len;
+		l3_len = tx_pkt->vlan_macip.f.l3_len;
 
 		/* Calculate the number of context descriptors needed. */
 		nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1116,7 +1116,7 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * a packet equals to the number of the segments of that
 		 * packet plus 1 context descriptor if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + nb_ctx);
+		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
 		tx_last = (uint16_t)(tx_id + nb_used - 1);
 
 		/* Circular ring */
@@ -1142,7 +1142,7 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/* Descriptor based VLAN insertion */
 		if (ol_flags & PKT_TX_VLAN_PKT) {
-			tx_flags |= tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+			tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
 						I40E_TX_FLAG_L2TAG1_SHIFT;
 			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
 			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -1199,7 +1199,7 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe->mbuf = m_seg;
 
 			/* Setup TX Descriptor */
-			slen = m_seg->pkt.data_len;
+			slen = m_seg->data_len;
 			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
 			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
 			txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
@@ -1207,7 +1207,7 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe->last_id = tx_last;
 			tx_id = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->pkt.next;
+			m_seg = m_seg->next;
 		} while (m_seg != NULL);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
@@ -1295,7 +1295,7 @@  tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 		txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
 		txdp->cmd_type_offset_bsz =
 			i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
-					(*pkts)->pkt.data_len, 0);
+					(*pkts)->data_len, 0);
 	}
 }
 
@@ -1309,7 +1309,7 @@  tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 	txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
 	txdp->cmd_type_offset_bsz =
 		i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
-				(*pkts)->pkt.data_len, 0);
+				(*pkts)->data_len, 0);
 }
 
 /* Fill hardware descriptor ring with mbuf data */
@@ -2130,10 +2130,10 @@  i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
 		}
 
 		rte_mbuf_refcnt_set(mbuf, 1);
-		mbuf->pkt.next = NULL;
-		mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
-		mbuf->pkt.nb_segs = 1;
-		mbuf->pkt.in_port = rxq->port_id;
+		mbuf->next = NULL;
+		mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->in_port = rxq->port_id;
 
 		dma_addr =
 			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 40ea4f8..c95e117 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -178,7 +178,7 @@  tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
 
 	for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
 		buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-		pkt_len = (*pkts)->pkt.data_len;
+		pkt_len = (*pkts)->data_len;
 
 		/* write data to descriptor */
 		txdp->read.buffer_addr = buf_dma_addr;
@@ -197,7 +197,7 @@  tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
 	uint32_t pkt_len;
 
 	buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-	pkt_len = (*pkts)->pkt.data_len;
+	pkt_len = (*pkts)->data_len;
 
 	/* write data to descriptor */
 	txdp->read.buffer_addr = buf_dma_addr;
@@ -570,7 +570,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		new_ctx = 0;
 		tx_pkt = *tx_pkts++;
-		pkt_len = tx_pkt->pkt.pkt_len;
+		pkt_len = tx_pkt->pkt_len;
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
@@ -579,7 +579,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * are needed for offload functionality.
 		 */
 		ol_flags = tx_pkt->ol_flags;
-		vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+		vlan_macip_lens = tx_pkt->vlan_macip.data;
 
 		/* If hardware offload required */
 		tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
@@ -597,7 +597,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * This will always be the number of segments + the number of
 		 * Context descriptors required to transmit the packet
 		 */
-		nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
 
 		/*
 		 * The number of descriptors that must be allocated for a
@@ -757,7 +757,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/*
 			 * Set up Transmit Data Descriptor.
 			 */
-			slen = m_seg->pkt.data_len;
+			slen = m_seg->data_len;
 			buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
 			txd->read.buffer_addr =
 				rte_cpu_to_le_64(buf_dma_addr);
@@ -768,7 +768,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			txe->last_id = tx_last;
 			tx_id = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->pkt.next;
+			m_seg = m_seg->next;
 		} while (m_seg != NULL);
 
 		/*
@@ -937,10 +937,10 @@  ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
 			mb = rxep[j].mbuf;
 			pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
 							rxq->crc_len);
-			mb->pkt.data_len = pkt_len;
-			mb->pkt.pkt_len = pkt_len;
-			mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
-			mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+			mb->data_len = pkt_len;
+			mb->pkt_len = pkt_len;
+			mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+			mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
 
 			/* convert descriptor fields to rte mbuf flags */
 			mb->ol_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -995,10 +995,10 @@  ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
 		/* populate the static rte mbuf fields */
 		mb = rxep[i].mbuf;
 		rte_mbuf_refcnt_set(mb, 1);
-		mb->pkt.next = NULL;
-		mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-		mb->pkt.nb_segs = 1;
-		mb->pkt.in_port = rxq->port_id;
+		mb->next = NULL;
+		mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+		mb->nb_segs = 1;
+		mb->in_port = rxq->port_id;
 
 		/* populate the descriptors */
 		dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -1247,17 +1247,17 @@  ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
 				      rxq->crc_len);
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rte_packet_prefetch(rxm->pkt.data);
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = pkt_len;
-		rxm->pkt.data_len = pkt_len;
-		rxm->pkt.in_port = rxq->port_id;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rte_packet_prefetch(rxm->data);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = pkt_len;
+		rxm->data_len = pkt_len;
+		rxm->in_port = rxq->port_id;
 
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->pkt.vlan_macip.f.vlan_tci =
+		rxm->vlan_macip.f.vlan_tci =
 			rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1268,12 +1268,12 @@  ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm->ol_flags = pkt_flags;
 
 		if (likely(pkt_flags & PKT_RX_RSS_HASH))
-			rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+			rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
 		else if (pkt_flags & PKT_RX_FDIR) {
-			rxm->pkt.hash.fdir.hash =
+			rxm->hash.fdir.hash =
 				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
 					   & IXGBE_ATR_HASH_MASK);
-			rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+			rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
 		}
 		/*
 		 * Store the mbuf address into the next entry of the array
@@ -1430,8 +1430,8 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 * Set data length & data buffer address of mbuf.
 		 */
 		data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-		rxm->pkt.data_len = data_len;
-		rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->data_len = data_len;
+		rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
 		/*
 		 * If this is the first buffer of the received packet,
@@ -1443,13 +1443,13 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 */
 		if (first_seg == NULL) {
 			first_seg = rxm;
-			first_seg->pkt.pkt_len = data_len;
-			first_seg->pkt.nb_segs = 1;
+			first_seg->pkt_len = data_len;
+			first_seg->nb_segs = 1;
 		} else {
-			first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
+			first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
 					+ data_len);
-			first_seg->pkt.nb_segs++;
-			last_seg->pkt.next = rxm;
+			first_seg->nb_segs++;
+			last_seg->next = rxm;
 		}
 
 		/*
@@ -1472,18 +1472,18 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *     mbuf, subtract the length of that CRC part from the
 		 *     data length of the previous mbuf.
 		 */
-		rxm->pkt.next = NULL;
+		rxm->next = NULL;
 		if (unlikely(rxq->crc_len > 0)) {
-			first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+			first_seg->pkt_len -= ETHER_CRC_LEN;
 			if (data_len <= ETHER_CRC_LEN) {
 				rte_pktmbuf_free_seg(rxm);
-				first_seg->pkt.nb_segs--;
-				last_seg->pkt.data_len = (uint16_t)
-					(last_seg->pkt.data_len -
+				first_seg->nb_segs--;
+				last_seg->data_len = (uint16_t)
+					(last_seg->data_len -
 					 (ETHER_CRC_LEN - data_len));
-				last_seg->pkt.next = NULL;
+				last_seg->next = NULL;
 			} else
-				rxm->pkt.data_len =
+				rxm->data_len =
 					(uint16_t) (data_len - ETHER_CRC_LEN);
 		}
 
@@ -1496,13 +1496,13 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		 *      - VLAN TCI, if any,
 		 *      - error flags.
 		 */
-		first_seg->pkt.in_port = rxq->port_id;
+		first_seg->in_port = rxq->port_id;
 
 		/*
 		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
 		 * set in the pkt_flags field.
 		 */
-		first_seg->pkt.vlan_macip.f.vlan_tci =
+		first_seg->vlan_macip.f.vlan_tci =
 				rte_le_to_cpu_16(rxd.wb.upper.vlan);
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1513,17 +1513,17 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = pkt_flags;
 
 		if (likely(pkt_flags & PKT_RX_RSS_HASH))
-			first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+			first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
 		else if (pkt_flags & PKT_RX_FDIR) {
-			first_seg->pkt.hash.fdir.hash =
+			first_seg->hash.fdir.hash =
 				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
 					   & IXGBE_ATR_HASH_MASK);
-			first_seg->pkt.hash.fdir.id =
+			first_seg->hash.fdir.id =
 				rxd.wb.lower.hi_dword.csum_ip.ip_id;
 		}
 
 		/* Prefetch data of first segment, if configured to do so. */
-		rte_packet_prefetch(first_seg->pkt.data);
+		rte_packet_prefetch(first_seg->data);
 
 		/*
 		 * Store the mbuf address into the next entry of the array
@@ -3212,10 +3212,10 @@  ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 		}
 
 		rte_mbuf_refcnt_set(mbuf, 1);
-		mbuf->pkt.next = NULL;
-		mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
-		mbuf->pkt.nb_segs = 1;
-		mbuf->pkt.in_port = rxq->port_id;
+		mbuf->next = NULL;
+		mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->in_port = rxq->port_id;
 
 		dma_addr =
 			rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 64c0695..4c9cb74 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -47,7 +47,7 @@ 
 #endif
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
 	(char *)(mb)->buf_addr))
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
index 047acf0..bafb215 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
@@ -48,9 +48,7 @@  static inline void
 ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
 {
 	static const struct rte_mbuf mb_def = {
-		.pkt = {
-			.nb_segs = 1,
-		},
+		.nb_segs = 1,
 	};
 	int i;
 	uint16_t rx_id;
@@ -68,7 +66,7 @@  ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
 
 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
 
-	def_low = _mm_load_si128((__m128i *)&(mb_def.pkt));
+	def_low = _mm_load_si128((__m128i *)&(mb_def.next));
 
 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
 	for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
@@ -99,8 +97,8 @@  ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
 		_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
 
 		/* flush mbuf with pkt template */
-		_mm_store_si128((__m128i *)&mb0->pkt, vaddr0);
-		_mm_store_si128((__m128i *)&mb1->pkt, vaddr1);
+		_mm_store_si128((__m128i *)&mb0->next, vaddr0);
+		_mm_store_si128((__m128i *)&mb1->next, vaddr1);
 
 		/* update refcnt per pkt */
 		rte_mbuf_refcnt_set(mb0, 1);
@@ -299,9 +297,9 @@  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
 
 		/* D.3 copy final 3,4 data to rx_pkts */
-		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->pkt.data_len),
+		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->data_len),
 				pkt_mb4);
-		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->pkt.data_len),
+		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->data_len),
 				pkt_mb3);
 
 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
@@ -313,9 +311,9 @@  ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 		staterr = _mm_packs_epi32(staterr, zero);
 
 		/* D.3 copy final 1,2 data to rx_pkts */
-		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->pkt.data_len),
+		_mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->data_len),
 				pkt_mb2);
-		_mm_storeu_si128((__m128i *)&(rx_pkts[pos]->pkt.data_len),
+		_mm_storeu_si128((__m128i *)&(rx_pkts[pos]->data_len),
 				pkt_mb1);
 
 		/* C.4 calc avaialbe number of desc */
@@ -342,7 +340,7 @@  vtx1(volatile union ixgbe_adv_tx_desc *txdp,
 	/* load buf_addr/buf_physaddr in t0 */
 	t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
 	/* load data, ... pkt_len in t1 */
-	t1 = _mm_loadu_si128((__m128i *)&(pkt->pkt.data));
+	t1 = _mm_loadu_si128((__m128i *)&(pkt->data));
 
 	/* calc offset = (data - buf_adr) */
 	offset = _mm_sub_epi64(t1, t0);
diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.c b/lib/librte_pmd_pcap/rte_eth_pcap.c
index eebe768..121de65 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.c
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.c
@@ -151,9 +151,9 @@  eth_pcap_rx(void *queue,
 
 		if (header.len <= buf_size) {
 			/* pcap packet will fit in the mbuf, go ahead and copy */
-			rte_memcpy(mbuf->pkt.data, packet, header.len);
-			mbuf->pkt.data_len = (uint16_t)header.len;
-			mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+			rte_memcpy(mbuf->data, packet, header.len);
+			mbuf->data_len = (uint16_t)header.len;
+			mbuf->pkt_len = mbuf->data_len;
 			bufs[num_rx] = mbuf;
 			num_rx++;
 		} else {
@@ -200,9 +200,9 @@  eth_pcap_tx_dumper(void *queue,
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		calculate_timestamp(&header.ts);
-		header.len = mbuf->pkt.data_len;
+		header.len = mbuf->data_len;
 		header.caplen = header.len;
-		pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->pkt.data);
+		pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->data);
 		rte_pktmbuf_free(mbuf);
 		num_tx++;
 	}
@@ -237,8 +237,8 @@  eth_pcap_tx(void *queue,
 
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
-		ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->pkt.data,
-				mbuf->pkt.data_len);
+		ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->data,
+				mbuf->data_len);
 		if (unlikely(ret != 0))
 			break;
 		num_tx++;
diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c b/lib/librte_pmd_virtio/virtio_rxtx.c
index a63ef17..55beceb 100644
--- a/lib/librte_pmd_virtio/virtio_rxtx.c
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c
@@ -118,7 +118,7 @@  virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
 		}
 
 		rte_prefetch0(cookie);
-		rte_packet_prefetch(cookie->pkt.data);
+		rte_packet_prefetch(cookie->data);
 		rx_pkts[i]  = cookie;
 		vq->vq_used_cons_idx++;
 		vq_ring_free_chain(vq, desc_idx);
@@ -187,7 +187,7 @@  virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
 {
 	struct vq_desc_extra *dxp;
 	struct vring_desc *start_dp;
-	uint16_t seg_num = cookie->pkt.nb_segs;
+	uint16_t seg_num = cookie->nb_segs;
 	uint16_t needed = 1 + seg_num;
 	uint16_t head_idx, idx;
 	uint16_t head_size = txvq->hw->vtnet_hdr_size;
@@ -216,9 +216,9 @@  virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
 	for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
 		idx = start_dp[idx].next;
 		start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
-		start_dp[idx].len   = cookie->pkt.data_len;
+		start_dp[idx].len   = cookie->data_len;
 		start_dp[idx].flags = VRING_DESC_F_NEXT;
-		cookie = cookie->pkt.next;
+		cookie = cookie->next;
 	}
 
 	start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
@@ -479,18 +479,18 @@  virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			continue;
 		}
 
-		rxm->pkt.in_port = rxvq->port_id;
-		rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->in_port = rxvq->port_id;
+		rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = (uint32_t)(len[i] - hdr_size);
-		rxm->pkt.data_len = (uint16_t)(len[i] - hdr_size);
+		rxm->nb_segs = 1;
+		rxm->next = NULL;
+		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+		rxm->data_len = (uint16_t)(len[i] - hdr_size);
 
-		VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
+		VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
 
 		rx_pkts[nb_rx++] = rxm;
-		rxvq->bytes += rx_pkts[nb_rx - 1]->pkt.pkt_len;
+		rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
 	}
 
 	rxvq->packets += nb_rx;
@@ -584,13 +584,13 @@  virtio_recv_mergeable_pkts(void *rx_queue,
 		if (seg_num == 0)
 			seg_num = 1;
 
-		rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rxm->pkt.nb_segs = seg_num;
-		rxm->pkt.next = NULL;
-		rxm->pkt.pkt_len = (uint32_t)(len[0] - hdr_size);
-		rxm->pkt.data_len = (uint16_t)(len[0] - hdr_size);
+		rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->nb_segs = seg_num;
+		rxm->next = NULL;
+		rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
+		rxm->data_len = (uint16_t)(len[0] - hdr_size);
 
-		rxm->pkt.in_port = rxvq->port_id;
+		rxm->in_port = rxvq->port_id;
 		rx_pkts[nb_rx] = rxm;
 		prev = rxm;
 
@@ -622,27 +622,27 @@  virtio_recv_mergeable_pkts(void *rx_queue,
 			while (extra_idx < rcv_cnt) {
 				rxm = rcv_pkts[extra_idx];
 
-				rxm->pkt.data =
+				rxm->data =
 					(char *)rxm->buf_addr +
 					RTE_PKTMBUF_HEADROOM - hdr_size;
-				rxm->pkt.next = NULL;
-				rxm->pkt.pkt_len = (uint32_t)(len[extra_idx]);
-				rxm->pkt.data_len = (uint16_t)(len[extra_idx]);
+				rxm->next = NULL;
+				rxm->pkt_len = (uint32_t)(len[extra_idx]);
+				rxm->data_len = (uint16_t)(len[extra_idx]);
 
 				if (prev)
-					prev->pkt.next = rxm;
+					prev->next = rxm;
 
 				prev = rxm;
-				rx_pkts[nb_rx]->pkt.pkt_len += rxm->pkt.pkt_len;
+				rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
 				extra_idx++;
 			};
 			seg_res -= rcv_cnt;
 		}
 
 		VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
-			rx_pkts[nb_rx]->pkt.data_len);
+			rx_pkts[nb_rx]->data_len);
 
-		rxvq->bytes += rx_pkts[nb_rx]->pkt.pkt_len;
+		rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
 		nb_rx++;
 	}
 
@@ -699,7 +699,7 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
 
 	while (nb_tx < nb_pkts) {
-		int need = tx_pkts[nb_tx]->pkt.nb_segs - txvq->vq_free_cnt;
+		int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt;
 		int deq_cnt = RTE_MIN(need, (int)num);
 
 		num -= (deq_cnt > 0) ? deq_cnt : 0;
@@ -708,7 +708,7 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			deq_cnt--;
 		}
 
-		if (tx_pkts[nb_tx]->pkt.nb_segs <= txvq->vq_free_cnt) {
+		if (tx_pkts[nb_tx]->nb_segs <= txvq->vq_free_cnt) {
 			txm = tx_pkts[nb_tx];
 			/* Enqueue Packet buffers */
 			error = virtqueue_enqueue_xmit(txvq, txm);
@@ -722,7 +722,7 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				break;
 			}
 			nb_tx++;
-			txvq->bytes += txm->pkt.pkt_len;
+			txvq->bytes += txm->pkt_len;
 		} else {
 			PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
 			break;
diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h
index 87db35f..d777feb 100644
--- a/lib/librte_pmd_virtio/virtqueue.h
+++ b/lib/librte_pmd_virtio/virtqueue.h
@@ -59,7 +59,7 @@ 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
 	(char *)(mb)->buf_addr))
 
 #define VTNET_SQ_RQ_QUEUE_IDX 0
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
index 9a9918d..04fc1e3 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -79,7 +79,7 @@ 
 
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+	(uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
 	(char *)(mb)->buf_addr))
 
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -289,7 +289,7 @@  vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 			txm = tx_pkts[nb_tx];
 			/* Don't support scatter packets yet, free them if met */
-			if (txm->pkt.nb_segs != 1) {
+			if (txm->nb_segs != 1) {
 				PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
 				rte_pktmbuf_free(tx_pkts[nb_tx]);
 				txq->stats.drop_total++;
@@ -299,7 +299,7 @@  vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			}
 
 			/* Needs to minus ether header len */
-			if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
 				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
 				rte_pktmbuf_free(tx_pkts[nb_tx]);
 				txq->stats.drop_total++;
@@ -314,7 +314,7 @@  vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
 			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
 			txd->addr = tbi->bufPA;
-			txd->len = txm->pkt.data_len;
+			txd->len = txm->data_len;
 
 			/* Mark the last descriptor as End of Packet. */
 			txd->cq = 1;
@@ -551,21 +551,21 @@  vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 					       rte_pktmbuf_mtod(rxm, void *));
 #endif
 				/* Copy vlan tag in packet buffer */
-				rxm->pkt.vlan_macip.f.vlan_tci =
+				rxm->vlan_macip.f.vlan_tci =
 					rte_le_to_cpu_16((uint16_t)rcd->tci);
 
 			} else
 				rxm->ol_flags = 0;
 
 			/* Initialize newly received packet buffer */
-			rxm->pkt.in_port = rxq->port_id;
-			rxm->pkt.nb_segs = 1;
-			rxm->pkt.next = NULL;
-			rxm->pkt.pkt_len = (uint16_t)rcd->len;
-			rxm->pkt.data_len = (uint16_t)rcd->len;
-			rxm->pkt.in_port = rxq->port_id;
-			rxm->pkt.vlan_macip.f.vlan_tci = 0;
-			rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+			rxm->in_port = rxq->port_id;
+			rxm->nb_segs = 1;
+			rxm->next = NULL;
+			rxm->pkt_len = (uint16_t)rcd->len;
+			rxm->data_len = (uint16_t)rcd->len;
+			rxm->in_port = rxq->port_id;
+			rxm->vlan_macip.f.vlan_tci = 0;
+			rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
 
 			rx_pkts[nb_rx++] = rxm;
 
diff --git a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
index ba82319..c118652 100644
--- a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
+++ b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
@@ -109,12 +109,12 @@  eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	for (i = 0; i < num ; i ++) {
 		rxm = rx_pkts[i];
 		PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
-		rxm->pkt.next = NULL;
-		rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-		rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
-		rxm->pkt.nb_segs = 1;
-		rxm->pkt.in_port = pi->port_id;
-		rxm->pkt.pkt_len  = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
+		rxm->next = NULL;
+		rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+		rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
+		rxm->nb_segs = 1;
+		rxm->in_port = pi->port_id;
+		rxm->pkt_len  = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
 	}
 	/* allocate new mbuf for the used descriptor */
 	while (likely(!virtqueue_full(rxvq))) {
diff --git a/lib/librte_pmd_xenvirt/virtqueue.h b/lib/librte_pmd_xenvirt/virtqueue.h
index 81cd938..d8717fe 100644
--- a/lib/librte_pmd_xenvirt/virtqueue.h
+++ b/lib/librte_pmd_xenvirt/virtqueue.h
@@ -54,7 +54,7 @@ 
  * rather than gpa<->hva in virito spec.
  */
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	((uint64_t)((mb)->pkt.data))
+	((uint64_t)((mb)->data))
 
 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
 
@@ -238,7 +238,7 @@  virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
 	start_dp[idx].addr  = (uintptr_t)NULL;
 	idx = start_dp[idx].next;
 	start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
-	start_dp[idx].len   = cookie->pkt.data_len;
+	start_dp[idx].len   = cookie->data_len;
 	start_dp[idx].flags = 0;
 	idx = start_dp[idx].next;
 	txvq->vq_desc_head_idx = idx;
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index ce5026f..9f1bd3c 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -159,7 +159,7 @@  rte_port_ring_reader_ipv4_frag_rx(void *port,
 		p->n_pkts--;
 
 		/* If not jumbo, pass current packet to output */
-		if (pkt->pkt.pkt_len <= IPV4_MTU_DEFAULT) {
+		if (pkt->pkt_len <= IPV4_MTU_DEFAULT) {
 			pkts[n_pkts_out++] = pkt;
 
 			n_pkts_to_provide = n_pkts - n_pkts_out;
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 968c2b3..ba60277 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -1015,7 +1015,7 @@  rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex
 {
 	struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
 	uint32_t tc_index = (qindex >> 2) & 0x3;
-	uint32_t pkt_len = pkt->pkt.pkt_len;
+	uint32_t pkt_len = pkt->pkt_len;
 
 	s->stats.n_pkts_tc[tc_index] += 1;
 	s->stats.n_bytes_tc[tc_index] += pkt_len;
@@ -1026,7 +1026,7 @@  rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_
 {
 	struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
 	uint32_t tc_index = (qindex >> 2) & 0x3;
-	uint32_t pkt_len = pkt->pkt.pkt_len;
+	uint32_t pkt_len = pkt->pkt_len;
 
 	s->stats.n_pkts_tc_dropped[tc_index] += 1;
 	s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
@@ -1036,7 +1036,7 @@  static inline void
 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
 {
 	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
-	uint32_t pkt_len = pkt->pkt.pkt_len;
+	uint32_t pkt_len = pkt->pkt_len;
 
 	qe->stats.n_pkts += 1;
 	qe->stats.n_bytes += pkt_len;
@@ -1046,7 +1046,7 @@  static inline void
 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
 {
 	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
-	uint32_t pkt_len = pkt->pkt.pkt_len;
+	uint32_t pkt_len = pkt->pkt_len;
 
 	qe->stats.n_pkts_dropped += 1;
 	qe->stats.n_bytes_dropped += pkt_len;
@@ -1563,7 +1563,7 @@  grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
 	struct rte_sched_pipe *pipe = grinder->pipe;
 	struct rte_mbuf *pkt = grinder->pkt;
 	uint32_t tc_index = grinder->tc_index;
-	uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
 	uint32_t subport_tb_credits = subport->tb_credits;
 	uint32_t subport_tc_credits = subport->tc_credits[tc_index];
 	uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1599,7 +1599,7 @@  grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
 	struct rte_sched_pipe *pipe = grinder->pipe;
 	struct rte_mbuf *pkt = grinder->pkt;
 	uint32_t tc_index = grinder->tc_index;
-	uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
 	uint32_t subport_tb_credits = subport->tb_credits;
 	uint32_t subport_tc_credits = subport->tc_credits[tc_index];
 	uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1640,7 +1640,7 @@  grinder_schedule(struct rte_sched_port *port, uint32_t pos)
 	struct rte_sched_grinder *grinder = port->grinder + pos;
 	struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
 	struct rte_mbuf *pkt = grinder->pkt;
-	uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
 
 #if RTE_SCHED_TS_CREDITS_CHECK
 	if (!grinder_credits_check(port, pos)) {
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 3f27755..e6bba22 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -106,7 +106,7 @@  extern "C" {
    2. Start of Frame Delimiter (SFD):       1 byte;
    3. Frame Check Sequence (FCS):           4 bytes;
    4. Inter Frame Gap (IFG):               12 bytes.
-The FCS is considered overhead only if not included in the packet length (field pkt.pkt_len
+The FCS is considered overhead only if not included in the packet length (field pkt_len
 of struct rte_mbuf). */
 #ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
 #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT      24
@@ -196,7 +196,7 @@  struct rte_sched_port_params {
 };
 
 /** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field pkt.hash.sched
+identify the destination queue for the current packet. Stored in the field hash.sched
 of struct rte_mbuf of each packet, typically written by the classification stage and read by
 scheduler enqueue.*/
 struct rte_sched_port_hierarchy {
@@ -352,7 +352,7 @@  static inline void
 rte_sched_port_pkt_write(struct rte_mbuf *pkt,
 	uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
 {
-	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
 
 	sched->color = (uint32_t) color;
 	sched->subport = subport;
@@ -381,7 +381,7 @@  rte_sched_port_pkt_write(struct rte_mbuf *pkt,
 static inline void
 rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
 {
-	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
 
 	*subport = sched->subport;
 	*pipe = sched->pipe;
@@ -392,7 +392,7 @@  rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint3
 static inline enum rte_meter_color
 rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
 {
-	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+	struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
 
 	return (enum rte_meter_color) sched->color;
 }