@@ -6376,7 +6376,6 @@ dump_struct_sizes(void)
{
#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
DUMP_SIZE(struct rte_mbuf);
- DUMP_SIZE(struct rte_pktmbuf);
DUMP_SIZE(struct rte_mempool);
DUMP_SIZE(struct rte_ring);
#undef DUMP_SIZE
@@ -263,7 +263,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
pkt_ol_flags = mb->ol_flags;
ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
/* Only allow single VLAN label here */
@@ -432,8 +432,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
/* Combine the packet header write. VLAN is not consider here */
- mb->pkt.vlan_macip.f.l2_len = l2_len;
- mb->pkt.vlan_macip.f.l3_len = l3_len;
+ mb->vlan_macip.f.l2_len = l2_len;
+ mb->vlan_macip.f.l3_len = l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
@@ -171,11 +171,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
if (!pkt)
break;
- pkt->pkt.data_len = pkt_size;
- pkt->pkt.next = NULL;
+ pkt->data_len = pkt_size;
+ pkt->next = NULL;
/* Initialize Ethernet header. */
- eth_hdr = (struct ether_hdr *)pkt->pkt.data;
+ eth_hdr = (struct ether_hdr *)pkt->data;
ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
@@ -205,12 +205,12 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr) -
sizeof(*ip_hdr));
- pkt->pkt.nb_segs = 1;
- pkt->pkt.pkt_len = pkt_size;
+ pkt->nb_segs = 1;
+ pkt->pkt_len = pkt_size;
pkt->ol_flags = ol_flags;
- pkt->pkt.vlan_macip.f.vlan_tci = vlan_tci;
- pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = vlan_tci;
+ pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
@@ -330,12 +330,12 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
nb_replies = 0;
for (i = 0; i < nb_rx; i++) {
pkt = pkts_burst[i];
- eth_h = (struct ether_hdr *) pkt->pkt.data;
+ eth_h = (struct ether_hdr *) pkt->data;
eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
l2_len = sizeof(struct ether_hdr);
if (verbose_level > 0) {
printf("\nPort %d pkt-len=%u nb-segs=%u\n",
- fs->rx_port, pkt->pkt.pkt_len, pkt->pkt.nb_segs);
+ fs->rx_port, pkt->pkt_len, pkt->nb_segs);
ether_addr_dump(" ETH: src=", ð_h->s_addr);
ether_addr_dump(" dst=", ð_h->d_addr);
}
@@ -546,7 +546,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
* Check that the received packet is a PTP packet that was detected
* by the hardware.
*/
- eth_hdr = (struct ether_hdr *)mb->pkt.data;
+ eth_hdr = (struct ether_hdr *)mb->data;
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
if (eth_type == ETHER_TYPE_1588) {
@@ -557,7 +557,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
printf("Port %u Received non PTP packet type=0x%4x "
"len=%u\n",
(unsigned) fs->rx_port, eth_type,
- (unsigned) mb->pkt.pkt_len);
+ (unsigned) mb->pkt_len);
}
rte_pktmbuf_free(mb);
return;
@@ -574,7 +574,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
* Check that the received PTP packet is a PTP V2 packet of type
* PTP_SYNC_MESSAGE.
*/
- ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data +
+ ptp_hdr = (struct ptpv2_msg *) ((char *) mb->data +
sizeof(struct ether_hdr));
if (ptp_hdr->version != 0x02) {
printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
@@ -119,7 +119,7 @@ pkt_burst_mac_retry_forward(struct fwd_stream *fs)
fs->rx_packets += nb_rx;
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
@@ -110,15 +110,15 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
ð_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -110,7 +110,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
txp = &ports[fs->tx_port];
for (i = 0; i < nb_rx; i++) {
mb = pkts_burst[i];
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
/* Swap dest and src mac addresses. */
ether_addr_copy(ð_hdr->d_addr, &addr);
@@ -118,9 +118,9 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ether_addr_copy(&addr, ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -149,24 +149,24 @@ pkt_burst_receive(struct fwd_stream *fs)
rte_pktmbuf_free(mb);
continue;
}
- eth_hdr = (struct ether_hdr *) mb->pkt.data;
+ eth_hdr = (struct ether_hdr *) mb->data;
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
print_ether_addr(" src=", ð_hdr->s_addr);
print_ether_addr(" - dst=", ð_hdr->d_addr);
printf(" - type=0x%04x - length=%u - nb_segs=%d",
- eth_type, (unsigned) mb->pkt.pkt_len,
- (int)mb->pkt.nb_segs);
+ eth_type, (unsigned) mb->pkt_len,
+ (int)mb->nb_segs);
if (ol_flags & PKT_RX_RSS_HASH) {
- printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss);
+ printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
}
else if (ol_flags & PKT_RX_FDIR)
printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
- mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id);
+ mb->hash.fdir.hash, mb->hash.fdir.id);
if (ol_flags & PKT_RX_VLAN_PKT)
printf(" - VLAN tci=0x%x",
- mb->pkt.vlan_macip.f.vlan_tci);
+ mb->vlan_macip.f.vlan_tci);
printf("\n");
if (ol_flags != 0) {
int rxf;
@@ -404,10 +404,10 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,
mb_ctor_arg->seg_buf_offset);
mb->buf_len = mb_ctor_arg->seg_buf_size;
mb->ol_flags = 0;
- mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.vlan_macip.data = 0;
- mb->pkt.hash.rss = 0;
+ mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->vlan_macip.data = 0;
+ mb->hash.rss = 0;
}
static void
@@ -60,7 +60,7 @@ int main(int argc, char **argv);
* The maximum number of segments per packet is used when creating
* scattered transmit packets composed of a list of mbufs.
*/
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */
#define MAX_PKT_BURST 512
#define DEF_PKT_BURST 32
@@ -106,18 +106,18 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
unsigned copy_len;
seg = pkt;
- while (offset >= seg->pkt.data_len) {
- offset -= seg->pkt.data_len;
- seg = seg->pkt.next;
+ while (offset >= seg->data_len) {
+ offset -= seg->data_len;
+ seg = seg->next;
}
- copy_len = seg->pkt.data_len - offset;
- seg_buf = ((char *) seg->pkt.data + offset);
+ copy_len = seg->data_len - offset;
+ seg_buf = ((char *) seg->data + offset);
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char*) buf + copy_len);
- seg = seg->pkt.next;
- seg_buf = seg->pkt.data;
+ seg = seg->next;
+ seg_buf = seg->data;
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
@@ -125,8 +125,8 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
static inline void
copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
- if (offset + len <= pkt->pkt.data_len) {
- rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+ if (offset + len <= pkt->data_len) {
+ rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
@@ -225,19 +225,19 @@ pkt_burst_transmit(struct fwd_stream *fs)
return;
break;
}
- pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
for (i = 1; i < tx_pkt_nb_segs; i++) {
- pkt_seg->pkt.next = tx_mbuf_alloc(mbp);
- if (pkt_seg->pkt.next == NULL) {
- pkt->pkt.nb_segs = i;
+ pkt_seg->next = tx_mbuf_alloc(mbp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
goto nomore_mbuf;
}
- pkt_seg = pkt_seg->pkt.next;
- pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
}
- pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+ pkt_seg->next = NULL; /* Last segment of packet. */
/*
* Initialize Ethernet header.
@@ -260,12 +260,12 @@ pkt_burst_transmit(struct fwd_stream *fs)
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->pkt.nb_segs = tx_pkt_nb_segs;
- pkt->pkt.pkt_len = tx_pkt_length;
+ pkt->nb_segs = tx_pkt_nb_segs;
+ pkt->pkt_len = tx_pkt_length;
pkt->ol_flags = ol_flags;
- pkt->pkt.vlan_macip.f.vlan_tci = vlan_tci;
- pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = vlan_tci;
+ pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -275,7 +275,6 @@ dump_struct_sizes(void)
{
#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
DUMP_SIZE(struct rte_mbuf);
- DUMP_SIZE(struct rte_pktmbuf);
DUMP_SIZE(struct rte_mempool);
DUMP_SIZE(struct rte_ring);
#undef DUMP_SIZE
@@ -54,18 +54,18 @@ copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
unsigned copy_len;
seg = pkt;
- while (offset >= seg->pkt.data_len) {
- offset -= seg->pkt.data_len;
- seg = seg->pkt.next;
+ while (offset >= seg->data_len) {
+ offset -= seg->data_len;
+ seg = seg->next;
}
- copy_len = seg->pkt.data_len - offset;
- seg_buf = ((char *) seg->pkt.data + offset);
+ copy_len = seg->data_len - offset;
+ seg_buf = ((char *) seg->data + offset);
while (len > copy_len) {
rte_memcpy(seg_buf, buf, (size_t) copy_len);
len -= copy_len;
buf = ((char *) buf + copy_len);
- seg = seg->pkt.next;
- seg_buf = seg->pkt.data;
+ seg = seg->next;
+ seg_buf = seg->data;
}
rte_memcpy(seg_buf, buf, (size_t) len);
}
@@ -73,8 +73,8 @@ copy_buf_to_pkt_segs(void *buf, unsigned len, struct rte_mbuf *pkt,
static inline void
copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
{
- if (offset + len <= pkt->pkt.data_len) {
- rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+ if (offset + len <= pkt->data_len) {
+ rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
return;
}
copy_buf_to_pkt_segs(buf, len, pkt, offset);
@@ -220,19 +220,19 @@ nomore_mbuf:
break;
}
- pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+ pkt->data_len = tx_pkt_seg_lengths[0];
pkt_seg = pkt;
for (i = 1; i < tx_pkt_nb_segs; i++) {
- pkt_seg->pkt.next = rte_pktmbuf_alloc(mp);
- if (pkt_seg->pkt.next == NULL) {
- pkt->pkt.nb_segs = i;
+ pkt_seg->next = rte_pktmbuf_alloc(mp);
+ if (pkt_seg->next == NULL) {
+ pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
goto nomore_mbuf;
}
- pkt_seg = pkt_seg->pkt.next;
- pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+ pkt_seg = pkt_seg->next;
+ pkt_seg->data_len = tx_pkt_seg_lengths[i];
}
- pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+ pkt_seg->next = NULL; /* Last segment of packet. */
/*
* Copy headers in first packet segment(s).
@@ -258,21 +258,21 @@ nomore_mbuf:
* Complete first mbuf of packet and append it to the
* burst of packets to be transmitted.
*/
- pkt->pkt.nb_segs = tx_pkt_nb_segs;
- pkt->pkt.pkt_len = tx_pkt_length;
- pkt->pkt.vlan_macip.f.l2_len = eth_hdr_size;
+ pkt->nb_segs = tx_pkt_nb_segs;
+ pkt->pkt_len = tx_pkt_length;
+ pkt->vlan_macip.f.l2_len = eth_hdr_size;
if (ipv4) {
- pkt->pkt.vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
- pkt->pkt.vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
- pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+ pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
+ pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
@@ -121,7 +121,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -143,7 +143,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
if (rte_lcore_count() >= 3) {
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = (i & 1) << 8;
+ bufs[i]->hash.rss = (i & 1) << 8;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -168,7 +168,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = i;
+ bufs[i]->hash.rss = i;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -200,7 +200,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
return -1;
}
for (i = 0; i < BIG_BATCH; i++)
- many_bufs[i]->pkt.hash.rss = i << 2;
+ many_bufs[i]->hash.rss = i << 2;
for (i = 0; i < BIG_BATCH/BURST; i++) {
rte_distributor_process(d, &many_bufs[i*BURST], BURST);
@@ -281,7 +281,7 @@ sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
rte_distributor_process(d, NULL, 0);
for (j = 0; j < BURST; j++) {
- bufs[j]->pkt.hash.rss = (i+j) << 1;
+ bufs[j]->hash.rss = (i+j) << 1;
bufs[j]->refcnt = 1;
}
@@ -360,7 +360,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
@@ -373,7 +373,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
return -1;
}
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
/* get worker zero to quit */
zero_quit = 1;
@@ -417,7 +417,7 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = 0;
+ bufs[i]->hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
@@ -489,7 +489,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
zero_quit = 0;
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->pkt.hash.rss = i << 1;
+ bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
@@ -160,7 +160,7 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)
}
/* ensure we have different hash value for each pkt */
for (i = 0; i < BURST; i++)
- bufs[i]->pkt.hash.rss = i;
+ bufs[i]->hash.rss = i;
start = rte_rdtsc();
for (i = 0; i < (1<<ITER_POWER); i++)
@@ -199,7 +199,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->pkt.hash.rss = i << 1;
+ bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
@@ -344,8 +344,8 @@ testclone_testupdate_testdetach(void)
GOTO_FAIL("cannot clone data\n");
rte_pktmbuf_free(clone);
- mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
- if(mc->pkt.next == NULL)
+ mc->next = rte_pktmbuf_alloc(pktmbuf_pool);
+ if(mc->next == NULL)
GOTO_FAIL("Next Pkt Null\n");
clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
@@ -432,7 +432,7 @@ test_pktmbuf_pool_ptr(void)
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- m[i]->pkt.data = RTE_PTR_ADD(m[i]->pkt.data, 64);
+ m[i]->data = RTE_PTR_ADD(m[i]->data, 64);
}
/* free them */
@@ -451,8 +451,8 @@ test_pktmbuf_pool_ptr(void)
printf("rte_pktmbuf_alloc() failed (%u)\n", i);
ret = -1;
}
- if (m[i]->pkt.data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
- printf ("pkt.data pointer not set properly\n");
+ if (m[i]->data != RTE_PTR_ADD(m[i]->buf_addr, RTE_PKTMBUF_HEADROOM)) {
+ printf ("data pointer not set properly\n");
ret = -1;
}
}
@@ -493,7 +493,7 @@ test_pktmbuf_free_segment(void)
mb = m[i];
while(mb != NULL) {
mt = mb;
- mb = mb->pkt.next;
+ mb = mb->next;
rte_pktmbuf_free_seg(mt);
}
}
@@ -147,8 +147,8 @@ prepare_pkt(struct rte_mbuf *mbuf)
rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
/* 64 byte packet */
- mbuf->pkt.pkt_len = 60;
- mbuf->pkt.data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
}
@@ -515,7 +515,7 @@ test_pipeline_single_filter(int expected_count)
struct rte_mbuf *mbuf;
mbuf = rte_pktmbuf_alloc(pool);
- memset(mbuf->pkt.data, 0x00,
+ memset(mbuf->data, 0x00,
sizeof(struct ipv4_5tuple));
five_tuple.proto = j;
@@ -524,7 +524,7 @@ test_pipeline_single_filter(int expected_count)
five_tuple.port_src = rte_bswap16(100 + j);
five_tuple.port_dst = rte_bswap16(200 + j);
- memcpy(mbuf->pkt.data, &five_tuple,
+ memcpy(mbuf->data, &five_tuple,
sizeof(struct ipv4_5tuple));
RTE_LOG(INFO, PIPELINE, "%s: Enqueue onto ring %d\n",
__func__, i);
@@ -551,7 +551,7 @@ test_pipeline_single_filter(int expected_count)
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "mbuf", mbuf->pkt.data, 64);
+ rte_hexdump(stdout, "mbuf", mbuf->data, 64);
rte_pktmbuf_free(mbuf);
}
tx_count += ret;
@@ -504,8 +504,8 @@ test_pipeline_single_filter(int test_type, int expected_count)
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
mbuf = (struct rte_mbuf *)objs[j];
- rte_hexdump(stdout, "Object:", mbuf->pkt.data,
- mbuf->pkt.data_len);
+ rte_hexdump(stdout, "Object:", mbuf->data,
+ mbuf->data_len);
rte_pktmbuf_free(mbuf);
}
tx_count += ret;
@@ -183,7 +183,7 @@ struct glob_keys g_crypto_hash_keys = {
*
*/
#define PACKET_DATA_START_PHYS(p) \
- ((p)->buf_physaddr + ((char *)p->pkt.data - (char *)p->buf_addr))
+ ((p)->buf_physaddr + ((char *)p->data - (char *)p->buf_addr))
/*
* A fixed offset to where the crypto is to be performed, which is the first
@@ -773,7 +773,7 @@ enum crypto_result
crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
{
CpaCySymDpOpData *opData =
- (CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data)
+ (CpaCySymDpOpData *) ((char *) (rte_buff->data)
+ CRYPTO_OFFSET_TO_OPDATA);
uint32_t lcore_id;
@@ -785,7 +785,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
bzero(opData, sizeof(CpaCySymDpOpData));
opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len;
+ opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+ CRYPTO_OFFSET_TO_OPDATA;
@@ -805,7 +805,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
opData->ivLenInBytes = IV_LENGTH_8_BYTES;
opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+ opData->messageLenToCipherInBytes = rte_buff->data_len
- CRYPTO_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of
@@ -818,7 +818,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
if (NO_HASH != h) {
opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+ opData->messageLenToHashInBytes = rte_buff->data_len
- HASH_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of block
@@ -831,7 +831,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
* Assumption: Ok ignore the passed digest pointer and place HMAC at end
* of packet.
*/
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+ opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
}
if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
@@ -848,7 +848,7 @@ enum crypto_result
crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
{
- CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data)
+ CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->data)
+ CRYPTO_OFFSET_TO_OPDATA);
uint32_t lcore_id;
@@ -860,7 +860,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
bzero(opData, sizeof(CpaCySymDpOpData));
opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len;
+ opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+ CRYPTO_OFFSET_TO_OPDATA;
opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
@@ -880,7 +880,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
opData->ivLenInBytes = IV_LENGTH_8_BYTES;
opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+ opData->messageLenToCipherInBytes = rte_buff->data_len
- CRYPTO_START_OFFSET;
/*
@@ -892,7 +892,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
}
if (NO_HASH != h) {
opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+ opData->messageLenToHashInBytes = rte_buff->data_len
- HASH_START_OFFSET;
/*
* Work around for padding, message length has to be a multiple of block
@@ -900,7 +900,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
*/
opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
% HASH_BLOCK_DEFAULT_SIZE;
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+ opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
}
if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
@@ -384,7 +384,7 @@ main_loop(__attribute__((unused)) void *dummy)
}
}
- port = dst_ports[pkt->pkt.in_port];
+ port = dst_ports[pkt->in_port];
/* Transmit the packet */
nic_tx_send_packet(pkt, (uint8_t)port);
@@ -302,16 +302,16 @@ main_loop(__attribute__((unused)) void *arg)
if (m == NULL)
continue;
- ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
+ ret = read(tap_fd, m->data, MAX_PACKET_SZ);
lcore_stats[lcore_id].rx++;
if (unlikely(ret < 0)) {
FATAL_ERROR("Reading from %s interface failed",
tap_name);
}
- m->pkt.nb_segs = 1;
- m->pkt.next = NULL;
- m->pkt.pkt_len = (uint16_t)ret;
- m->pkt.data_len = (uint16_t)ret;
+ m->nb_segs = 1;
+ m->next = NULL;
+ m->pkt_len = (uint16_t)ret;
+ m->data_len = (uint16_t)ret;
ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
if (unlikely(ret < 1)) {
rte_pktmbuf_free(m);
@@ -412,8 +412,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+ m->vlan_macip.f.l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
@@ -455,8 +455,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+ m->vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
@@ -329,17 +329,17 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
}
/* prepend new header */
- hdr->pkt.next = pkt;
+ hdr->next = pkt;
/* update header's fields */
- hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
- hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+ hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
+ hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
/* copy metadata from source packet*/
- hdr->pkt.in_port = pkt->pkt.in_port;
- hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
- hdr->pkt.hash = pkt->pkt.hash;
+ hdr->in_port = pkt->in_port;
+ hdr->vlan_macip = pkt->vlan_macip;
+ hdr->hash = pkt->hash;
hdr->ol_flags = pkt->ol_flags;
@@ -412,7 +412,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
/* Should we use rte_pktmbuf_clone() or not. */
use_clone = (port_num <= MCAST_CLONE_PORTS &&
- m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+ m->nb_segs <= MCAST_CLONE_SEGS);
/* Mark all packet's segments as referenced port_num times */
if (use_clone == 0)
@@ -709,7 +709,7 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
unsigned char *) + sizeof(struct ether_hdr));
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt.pkt_len) >= 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, pkt->pkt_len) >= 0) {
/* Update time to live and header checksum */
--(ipv4_hdr->time_to_live);
@@ -687,7 +687,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
@@ -489,7 +489,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
@@ -809,19 +809,19 @@ simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t portid, struct lcore_conf *
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
uint8_t valid_mask = MASK_ALL_PKTS;
- if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
rte_pktmbuf_free(m[0]);
valid_mask &= EXECLUDE_1ST_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
rte_pktmbuf_free(m[1]);
valid_mask &= EXECLUDE_2ND_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
rte_pktmbuf_free(m[2]);
valid_mask &= EXECLUDE_3RD_PKT;
}
- if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
rte_pktmbuf_free(m[3]);
valid_mask &= EXECLUDE_4TH_PKT;
}
@@ -1009,7 +1009,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
#ifdef DO_RFC_1812_CHECKS
/* Check to make sure the packet is valid (RFC1812) */
- if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+ if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
rte_pktmbuf_free(m);
return;
}
@@ -540,7 +540,7 @@ app_lcore_worker(
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
- port = pkt->pkt.in_port;
+ port = pkt->in_port;
}
pos = lp->mbuf_out[port].n_mbufs;
@@ -211,7 +211,7 @@ enqueue_packet(struct rte_mbuf *buf, uint8_t port)
static void
handle_packet(struct rte_mbuf *buf)
{
- const uint8_t in_port = buf->pkt.in_port;
+ const uint8_t in_port = buf->in_port;
const uint8_t out_port = output_ports[in_port];
enqueue_packet(buf, out_port);
@@ -104,8 +104,8 @@ static void send_pause_frame(uint8_t port_id, uint16_t duration)
pause_frame->opcode = rte_cpu_to_be_16(0x0001);
pause_frame->param = rte_cpu_to_be_16(duration);
- mbuf->pkt.pkt_len = 60;
- mbuf->pkt.data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
rte_eth_tx_burst(port_id, 0, &mbuf, 1);
}
@@ -1027,7 +1027,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
/* Copy mbuf data to buffer */
- rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+ rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
res_cur_idx++;
packet_success++;
@@ -1089,7 +1089,7 @@ link_vmdq(struct virtio_net *dev, struct rte_mbuf *m)
int i, ret;
/* Learn MAC address of guest device from packet */
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
dev_ll = ll_root_used;
@@ -1176,7 +1176,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
/*get the used devices list*/
dev_ll = ll_root_used;
@@ -1235,7 +1235,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
unsigned len, ret, offset = 0;
const uint16_t lcore_id = rte_lcore_id();
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0))
@@ -1288,22 +1288,22 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN + offset;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ mbuf->data_len = m->data_len + VLAN_HLEN + offset;
+ mbuf->pkt_len = mbuf->data_len;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+ rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+ vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+ rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+ (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
tx_q->m_table[len] = mbuf;
len++;
if (enable_stats) {
@@ -1393,8 +1393,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
vq->used->ring[used_idx].len = 0;
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.pkt.data_len = desc->len;
- m.pkt.data = (void*)(uintptr_t)buff_addr;
+ m.data_len = desc->len;
+ m.data = (void*)(uintptr_t)buff_addr;
PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
@@ -1713,9 +1713,9 @@ attach_rxmbuf_zcp(struct virtio_net *dev)
}
mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
- mbuf->pkt.data = (void *)(uintptr_t)(buff_addr);
+ mbuf->data = (void *)(uintptr_t)(buff_addr);
mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.data_len = desc->len;
+ mbuf->data_len = desc->len;
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
LOG_DEBUG(VHOST_DATA,
@@ -1750,9 +1750,9 @@ static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char *) m->buf_addr + buf_ofs;
+ m->data = (char *) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
}
/*
@@ -1984,7 +1984,7 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
unsigned len, ret, offset = 0;
struct vpool *vpool;
struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->data;
uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
/*Add packet to the port tx queue*/
@@ -2055,24 +2055,24 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
}
}
- mbuf->pkt.nb_segs = m->pkt.nb_segs;
- mbuf->pkt.next = m->pkt.next;
- mbuf->pkt.data_len = m->pkt.data_len + offset;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ mbuf->nb_segs = m->nb_segs;
+ mbuf->next = m->next;
+ mbuf->data_len = m->data_len + offset;
+ mbuf->pkt_len = mbuf->data_len;
if (unlikely(need_copy)) {
/* Copy the packet contents to the mbuf. */
- rte_memcpy((void *)((uint8_t *)mbuf->pkt.data),
- (const void *) ((uint8_t *)m->pkt.data),
- m->pkt.data_len);
+ rte_memcpy((void *)((uint8_t *)mbuf->data),
+ (const void *) ((uint8_t *)m->data),
+ m->data_len);
} else {
- mbuf->pkt.data = m->pkt.data;
+ mbuf->data = m->data;
mbuf->buf_physaddr = m->buf_physaddr;
mbuf->buf_addr = m->buf_addr;
}
mbuf->ol_flags = PKT_TX_VLAN_PKT;
- mbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag;
- mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mbuf->vlan_macip.f.vlan_tci = vlan_tag;
+ mbuf->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mbuf->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
tx_q->m_table[len] = mbuf;
@@ -2081,8 +2081,8 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
dev->device_fh,
- mbuf->pkt.nb_segs,
- (mbuf->pkt.next == NULL) ? "null" : "non-null");
+ mbuf->nb_segs,
+ (mbuf->next == NULL) ? "null" : "non-null");
if (enable_stats) {
dev_statistics[dev->device_fh].tx_total++;
@@ -2196,11 +2196,11 @@ virtio_dev_tx_zcp(struct virtio_net *dev)
* Setup dummy mbuf. This is copied to a real mbuf if
* transmitted out the physical port.
*/
- m.pkt.data_len = desc->len;
- m.pkt.nb_segs = 1;
- m.pkt.next = NULL;
- m.pkt.data = (void *)(uintptr_t)buff_addr;
- m.buf_addr = m.pkt.data;
+ m.data_len = desc->len;
+ m.nb_segs = 1;
+ m.next = NULL;
+ m.data = (void *)(uintptr_t)buff_addr;
+ m.buf_addr = m.data;
m.buf_physaddr = phys_addr;
/*
@@ -677,7 +677,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
/* Copy mbuf data to buffer */
- rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+ rte_memcpy((void *)(uintptr_t)buff_addr, (const void*)buff->data, rte_pktmbuf_data_len(buff));
res_cur_idx++;
packet_success++;
@@ -808,7 +808,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
struct ether_hdr *pkt_hdr;
uint64_t ret = 0;
- pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ pkt_hdr = (struct ether_hdr *)m->data;
/*get the used devices list*/
dev_ll = ll_root_used;
@@ -879,22 +879,22 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
if(!mbuf)
return;
- mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ mbuf->data_len = m->data_len + VLAN_HLEN;
+ mbuf->pkt_len = mbuf->data_len;
/* Copy ethernet header to mbuf. */
- rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+ rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);
/* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+ vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_TCI = htons(vlan_tag);
/* Copy the remaining packet contents to the mbuf. */
- rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
- (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), (m->pkt.data_len - ETH_HLEN));
+ rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+ (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - ETH_HLEN));
tx_q->m_table[len] = mbuf;
len++;
if (enable_stats) {
@@ -980,9 +980,9 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
rte_prefetch0((void*)(uintptr_t)buff_addr);
/* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.pkt.data_len = desc->len;
- m.pkt.data = (void*)(uintptr_t)buff_addr;
- m.pkt.nb_segs = 1;
+ m.data_len = desc->len;
+ m.data = (void*)(uintptr_t)buff_addr;
+ m.nb_segs = 1;
virtio_tx_route(dev, &m, mbuf_pool, 0);
@@ -282,7 +282,7 @@ rte_distributor_process(struct rte_distributor *d,
next_mb = mbufs[next_idx++];
next_value = (((int64_t)(uintptr_t)next_mb)
<< RTE_DISTRIB_FLAG_BITS);
- new_tag = (next_mb->pkt.hash.rss | 1);
+ new_tag = (next_mb->hash.rss | 1);
uint32_t match = 0;
unsigned i;
@@ -173,20 +173,20 @@ ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
struct rte_mbuf *ms;
/* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->pkt.vlan_macip.f.l2_len +
- mp->pkt.vlan_macip.f.l3_len));
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
+ mp->vlan_macip.f.l3_len));
/* chain two fragments. */
ms = rte_pktmbuf_lastseg(mn);
- ms->pkt.next = mp;
+ ms->next = mp;
/* accumulate number of segments and total length. */
- mn->pkt.nb_segs = (uint8_t)(mn->pkt.nb_segs + mp->pkt.nb_segs);
- mn->pkt.pkt_len += mp->pkt.pkt_len;
+ mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
+ mn->pkt_len += mp->pkt_len;
/* reset pkt_len and nb_segs for chained fragment. */
- mp->pkt.pkt_len = mp->pkt.data_len;
- mp->pkt.nb_segs = 1;
+ mp->pkt_len = mp->data_len;
+ mp->nb_segs = 1;
}
@@ -109,7 +109,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/* Fragment size should be a multiply of 8. */
IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
- in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
+ in_hdr = (struct ipv4_hdr *) pkt_in->data;
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
@@ -118,7 +118,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely(frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
return -EINVAL;
in_seg = pkt_in;
@@ -140,8 +140,8 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
}
/* Reserve space for the IP header that will be built later */
- out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
- out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
+ out_pkt->data_len = sizeof(struct ipv4_hdr);
+ out_pkt->pkt_len = sizeof(struct ipv4_hdr);
out_seg_prev = out_pkt;
more_out_segs = 1;
@@ -156,29 +156,29 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
__free_fragments(pkts_out, out_pkt_pos);
return -ENOMEM;
}
- out_seg_prev->pkt.next = out_seg;
+ out_seg_prev->next = out_seg;
out_seg_prev = out_seg;
/* Prepare indirect buffer */
rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt.pkt_len;
- if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
- len = in_seg->pkt.data_len - in_seg_data_pos;
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos;
- out_seg->pkt.data_len = (uint16_t)len;
- out_pkt->pkt.pkt_len = (uint16_t)(len +
- out_pkt->pkt.pkt_len);
- out_pkt->pkt.nb_segs += 1;
+ out_seg->data = (char*) in_seg->data + (uint16_t)in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt.pkt_len >= mtu_size))
+ if (unlikely(out_pkt->pkt_len >= mtu_size))
more_out_segs = 0;
/* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
- in_seg = in_seg->pkt.next;
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
in_seg_data_pos = 0;
if (unlikely(in_seg == NULL))
@@ -188,17 +188,17 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/* Build the IP header */
- out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
+ out_hdr = (struct ipv4_hdr*) out_pkt->data;
__fill_ipv4hdr_frag(out_hdr, in_hdr,
- (uint16_t)out_pkt->pkt.pkt_len,
+ (uint16_t)out_pkt->pkt_len,
flag_offset, fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
+ out_pkt->pkt_len - sizeof(struct ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
@@ -87,10 +87,10 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv4 header for the reassmebled packet */
ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->pkt.vlan_macip.f.l2_len);
+ m->vlan_macip.f.l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->pkt.vlan_macip.f.l3_len));
+ m->vlan_macip.f.l3_len));
ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
@@ -137,7 +137,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
ip_ofs *= IPV4_HDR_OFFSET_UNITS;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->pkt.vlan_macip.f.l3_len);
+ mb->vlan_macip.f.l3_len);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
@@ -122,10 +122,10 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
- (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv6_hdr))))
+ (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
return (-EINVAL);
- in_hdr = (struct ipv6_hdr *) pkt_in->pkt.data;
+ in_hdr = (struct ipv6_hdr *) pkt_in->data;
in_seg = pkt_in;
in_seg_data_pos = sizeof(struct ipv6_hdr);
@@ -146,8 +146,8 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
}
/* Reserve space for the IP header that will be built later */
- out_pkt->pkt.data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
- out_pkt->pkt.pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
+ out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
out_seg_prev = out_pkt;
more_out_segs = 1;
@@ -162,30 +162,30 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
__free_fragments(pkts_out, out_pkt_pos);
return (-ENOMEM);
}
- out_seg_prev->pkt.next = out_seg;
+ out_seg_prev->next = out_seg;
out_seg_prev = out_seg;
/* Prepare indirect buffer */
rte_pktmbuf_attach(out_seg, in_seg);
- len = mtu_size - out_pkt->pkt.pkt_len;
- if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
- len = in_seg->pkt.data_len - in_seg_data_pos;
+ len = mtu_size - out_pkt->pkt_len;
+ if (len > (in_seg->data_len - in_seg_data_pos)) {
+ len = in_seg->data_len - in_seg_data_pos;
}
- out_seg->pkt.data = (char *) in_seg->pkt.data + (uint16_t) in_seg_data_pos;
- out_seg->pkt.data_len = (uint16_t)len;
- out_pkt->pkt.pkt_len = (uint16_t)(len +
- out_pkt->pkt.pkt_len);
- out_pkt->pkt.nb_segs += 1;
+ out_seg->data = (char *) in_seg->data + (uint16_t) in_seg_data_pos;
+ out_seg->data_len = (uint16_t)len;
+ out_pkt->pkt_len = (uint16_t)(len +
+ out_pkt->pkt_len);
+ out_pkt->nb_segs += 1;
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+ if (unlikely(out_pkt->pkt_len >= mtu_size)) {
more_out_segs = 0;
}
/* Current input segment done ? */
- if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
- in_seg = in_seg->pkt.next;
+ if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+ in_seg = in_seg->next;
in_seg_data_pos = 0;
if (unlikely(in_seg == NULL)) {
@@ -196,14 +196,14 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
/* Build the IP header */
- out_hdr = (struct ipv6_hdr *) out_pkt->pkt.data;
+ out_hdr = (struct ipv6_hdr *) out_pkt->data;
__fill_ipv6hdr_frag(out_hdr, in_hdr,
- (uint16_t) out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr),
+ (uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
fragment_offset, more_in_segs);
fragment_offset = (uint16_t)(fragment_offset +
- out_pkt->pkt.pkt_len - sizeof(struct ipv6_hdr)
+ out_pkt->pkt_len - sizeof(struct ipv6_hdr)
- sizeof(struct ipv6_extension_fragment));
/* Write the fragment to the output list */
@@ -109,7 +109,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv6 header for the reassembled datagram */
ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->pkt.vlan_macip.f.l2_len);
+ m->vlan_macip.f.l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
@@ -120,7 +120,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
* other headers, so we assume there are no other headers and thus update
* the main IPv6 header instead.
*/
- move_len = m->pkt.vlan_macip.f.l2_len + m->pkt.vlan_macip.f.l3_len -
+ move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
sizeof(*frag_hdr);
frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
ip_hdr->proto = frag_hdr->next_header;
@@ -104,12 +104,12 @@ rte_pktmbuf_init(struct rte_mempool *mp,
m->buf_len = (uint16_t)buf_len;
/* keep some headroom between start of buffer and data */
- m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
+ m->data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
/* init some constant fields */
m->pool = mp;
- m->pkt.nb_segs = 1;
- m->pkt.in_port = 0xff;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
}
/* do some sanity checks on a mbuf: panic if it fails */
@@ -140,10 +140,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
if (is_header == 0)
return;
- nb_segs = m->pkt.nb_segs;
+ nb_segs = m->nb_segs;
m_seg = m;
while (m_seg && nb_segs != 0) {
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
nb_segs --;
}
if (nb_segs != 0)
@@ -162,22 +162,22 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
- "in_port=%u\n", m->pkt.pkt_len, m->ol_flags,
- (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port);
- nb_segs = m->pkt.nb_segs;
+ "in_port=%u\n", m->pkt_len, m->ol_flags,
+ (unsigned)m->nb_segs, (unsigned)m->in_port);
+ nb_segs = m->nb_segs;
while (m && nb_segs != 0) {
__rte_mbuf_sanity_check(m, 0);
fprintf(f, " segment at 0x%p, data=0x%p, data_len=%u\n",
- m, m->pkt.data, (unsigned)m->pkt.data_len);
+ m, m->data, (unsigned)m->data_len);
len = dump_len;
- if (len > m->pkt.data_len)
- len = m->pkt.data_len;
+ if (len > m->data_len)
+ len = m->data_len;
if (len != 0)
- rte_hexdump(f, NULL, m->pkt.data, len);
+ rte_hexdump(f, NULL, m->data, len);
dump_len -= len;
- m = m->pkt.next;
+ m = m->next;
nb_segs --;
}
}
@@ -133,32 +133,6 @@ union rte_vlan_macip {
#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
/**
- * A packet message buffer.
- */
-struct rte_pktmbuf {
- /* valid for any segment */
- struct rte_mbuf *next; /**< Next segment of scattered packet. */
- void* data; /**< Start address of data in segment buffer. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
-
- /* these fields are valid for first segment only */
- uint8_t nb_segs; /**< Number of segments. */
- uint8_t in_port; /**< Input port. */
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
-
- /* offload features */
- union rte_vlan_macip vlan_macip;
- union {
- uint32_t rss; /**< RSS hash result if RSS enabled */
- struct {
- uint16_t hash;
- uint16_t id;
- } fdir; /**< Filter identifier if FDIR enabled */
- uint32_t sched; /**< Hierarchical scheduler */
- } hash; /**< hash information */
-};
-
-/**
* The generic rte_mbuf, containing a packet mbuf.
*/
struct rte_mbuf {
@@ -185,7 +159,26 @@ struct rte_mbuf {
uint16_t reserved; /**< Unused field. Required for padding. */
uint16_t ol_flags; /**< Offload features. */
- struct rte_pktmbuf pkt;
+ /* valid for any segment */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ void* data; /**< Start address of data in segment buffer. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+
+ /* these fields are valid for first segment only */
+ uint8_t nb_segs; /**< Number of segments. */
+ uint8_t in_port; /**< Input port. */
+ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+
+ /* offload features, valid for first segment only */
+ union rte_vlan_macip vlan_macip;
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ } fdir; /**< Filter identifier if FDIR enabled */
+ uint32_t sched; /**< Hierarchical scheduler */
+ } hash; /**< hash information */
union {
uint8_t metadata[0];
@@ -477,18 +470,18 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
{
uint32_t buf_ofs;
- m->pkt.next = NULL;
- m->pkt.pkt_len = 0;
- m->pkt.vlan_macip.data = 0;
- m->pkt.nb_segs = 1;
- m->pkt.in_port = 0xff;
+ m->next = NULL;
+ m->pkt_len = 0;
+ m->vlan_macip.data = 0;
+ m->nb_segs = 1;
+ m->in_port = 0xff;
m->ol_flags = 0;
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char*) m->buf_addr + buf_ofs;
+ m->data = (char*) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
__rte_mbuf_sanity_check(m, 1);
}
@@ -542,11 +535,16 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
mi->buf_addr = md->buf_addr;
mi->buf_len = md->buf_len;
- mi->pkt = md->pkt;
+ mi->next = md->next;
+ mi->data = md->data;
+ mi->data_len = md->data_len;
+ mi->in_port = md->in_port;
+ mi->vlan_macip = md->vlan_macip;
+ mi->hash = md->hash;
- mi->pkt.next = NULL;
- mi->pkt.pkt_len = mi->pkt.data_len;
- mi->pkt.nb_segs = 1;
+ mi->next = NULL;
+ mi->pkt_len = mi->data_len;
+ mi->nb_segs = 1;
mi->ol_flags = md->ol_flags;
__rte_mbuf_sanity_check(mi, 1);
@@ -576,9 +574,9 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->pkt.data = (char*) m->buf_addr + buf_ofs;
+ m->data = (char*) m->buf_addr + buf_ofs;
- m->pkt.data_len = 0;
+ m->data_len = 0;
}
#endif /* RTE_MBUF_REFCNT */
@@ -645,7 +643,7 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
__rte_mbuf_sanity_check(m, 1);
while (m != NULL) {
- m_next = m->pkt.next;
+ m_next = m->next;
rte_pktmbuf_free_seg(m);
m = m_next;
}
@@ -681,21 +679,21 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
return (NULL);
mi = mc;
- prev = &mi->pkt.next;
- pktlen = md->pkt.pkt_len;
+ prev = &mi->next;
+ pktlen = md->pkt_len;
nseg = 0;
do {
nseg++;
rte_pktmbuf_attach(mi, md);
*prev = mi;
- prev = &mi->pkt.next;
- } while ((md = md->pkt.next) != NULL &&
+ prev = &mi->next;
+ } while ((md = md->next) != NULL &&
(mi = rte_pktmbuf_alloc(mp)) != NULL);
*prev = NULL;
- mc->pkt.nb_segs = nseg;
- mc->pkt.pkt_len = pktlen;
+ mc->nb_segs = nseg;
+ mc->pkt_len = pktlen;
/* Allocation of new indirect segment failed */
if (unlikely (mi == NULL)) {
@@ -724,7 +722,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
do {
rte_mbuf_refcnt_update(m, v);
- } while ((m = m->pkt.next) != NULL);
+ } while ((m = m->next) != NULL);
}
#endif /* RTE_MBUF_REFCNT */
@@ -740,7 +738,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
- return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr);
+ return (uint16_t) ((char*) m->data - (char*) m->buf_addr);
}
/**
@@ -755,7 +753,7 @@ static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
- m->pkt.data_len);
+ m->data_len);
}
/**
@@ -771,8 +769,8 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
struct rte_mbuf *m2 = (struct rte_mbuf *)m;
__rte_mbuf_sanity_check(m, 1);
- while (m2->pkt.next != NULL)
- m2 = m2->pkt.next;
+ while (m2->next != NULL)
+ m2 = m2->next;
return m2;
}
@@ -788,7 +786,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
* @param t
* The type to cast the result into.
*/
-#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data))
+#define rte_pktmbuf_mtod(m, t) ((t)((m)->data))
/**
* A macro that returns the length of the packet.
@@ -798,7 +796,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len)
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
/**
* A macro that returns the length of the segment.
@@ -808,7 +806,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len)
+#define rte_pktmbuf_data_len(m) ((m)->data_len)
/**
* Prepend len bytes to an mbuf data area.
@@ -833,11 +831,11 @@ static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->pkt.data = (char*) m->pkt.data - len;
- m->pkt.data_len = (uint16_t)(m->pkt.data_len + len);
- m->pkt.pkt_len = (m->pkt.pkt_len + len);
+ m->data = (char*) m->data - len;
+ m->data_len = (uint16_t)(m->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
- return (char*) m->pkt.data;
+ return (char*) m->data;
}
/**
@@ -866,9 +864,9 @@ static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
return NULL;
- tail = (char*) m_last->pkt.data + m_last->pkt.data_len;
- m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len);
- m->pkt.pkt_len = (m->pkt.pkt_len + len);
+ tail = (char*) m_last->data + m_last->data_len;
+ m_last->data_len = (uint16_t)(m_last->data_len + len);
+ m->pkt_len = (m->pkt_len + len);
return (char*) tail;
}
@@ -890,13 +888,13 @@ static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
{
__rte_mbuf_sanity_check(m, 1);
- if (unlikely(len > m->pkt.data_len))
+ if (unlikely(len > m->data_len))
return NULL;
- m->pkt.data_len = (uint16_t)(m->pkt.data_len - len);
- m->pkt.data = ((char*) m->pkt.data + len);
- m->pkt.pkt_len = (m->pkt.pkt_len - len);
- return (char*) m->pkt.data;
+ m->data_len = (uint16_t)(m->data_len - len);
+ m->data = ((char*) m->data + len);
+ m->pkt_len = (m->pkt_len - len);
+ return (char*) m->data;
}
/**
@@ -920,11 +918,11 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
__rte_mbuf_sanity_check(m, 1);
m_last = rte_pktmbuf_lastseg(m);
- if (unlikely(len > m_last->pkt.data_len))
+ if (unlikely(len > m_last->data_len))
return -1;
- m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len);
- m->pkt.pkt_len = (m->pkt.pkt_len - len);
+ m_last->data_len = (uint16_t)(m_last->data_len - len);
+ m->pkt_len = (m->pkt_len - len);
return 0;
}
@@ -940,7 +938,7 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 1);
- return !!(m->pkt.nb_segs == 1);
+ return !!(m->nb_segs == 1);
}
/**
@@ -198,14 +198,14 @@ xmit_slave_hash(const struct rte_mbuf *buf, uint8_t slave_count, uint8_t policy)
switch (policy) {
case BALANCE_XMIT_POLICY_LAYER2:
- eth_hdr = (struct ether_hdr *)buf->pkt.data;
+ eth_hdr = (struct ether_hdr *)buf->data;
hash = ether_hash(eth_hdr);
hash ^= hash >> 8;
return hash % slave_count;
case BALANCE_XMIT_POLICY_LAYER23:
- eth_hdr = (struct ether_hdr *)buf->pkt.data;
+ eth_hdr = (struct ether_hdr *)buf->data;
if (buf->ol_flags & PKT_RX_VLAN_PKT)
eth_offset = sizeof(struct ether_hdr) + sizeof(struct vlan_hdr);
@@ -91,7 +91,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
+ (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@ -421,7 +421,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->pkt.vlan_macip;
+ hdrlen = tx_pkt->vlan_macip;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
@@ -434,7 +434,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
@@ -454,7 +454,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
" tx_first=%u tx_last=%u\n",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
- (unsigned) tx_pkt->pkt.pkt_len,
+ (unsigned) tx_pkt->pkt_len,
(unsigned) tx_id,
(unsigned) tx_last);
@@ -516,7 +516,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+ popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
E1000_TXD_VLAN_SHIFT;
}
@@ -566,7 +566,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set up Transmit Data Descriptor.
*/
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
@@ -576,7 +576,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
@@ -771,20 +771,20 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
@@ -940,8 +940,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
@@ -953,12 +953,12 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len += data_len;
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
@@ -981,18 +981,18 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
@@ -1003,17 +1003,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - IP checksum flag,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
+ first_seg->in_port = rxq->port_id;
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
@@ -96,7 +96,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->pkt.data) - \
+ (uint64_t) ((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -365,7 +365,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = *tx_pkts++;
- pkt_len = tx_pkt->pkt.pkt_len;
+ pkt_len = tx_pkt->pkt_len;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -377,10 +377,10 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* for the packet, starting from the current position (tx_id)
* in the ring.
*/
- tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+ tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+ vlan_macip_lens = tx_pkt->vlan_macip.data;
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
@@ -527,7 +527,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set up transmit descriptor.
*/
- slen = (uint16_t) m_seg->pkt.data_len;
+ slen = (uint16_t) m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
@@ -538,7 +538,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
@@ -753,18 +753,18 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
-
- rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
+
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -929,8 +929,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
@@ -942,12 +942,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len += data_len;
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
@@ -970,18 +970,18 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
@@ -994,14 +994,14 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
- first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->in_port = rxq->port_id;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_macip.f.vlan_tci =
+ first_seg->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1012,7 +1012,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
@@ -79,7 +79,7 @@
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
((uint64_t)((mb)->buf_physaddr + \
- (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr)))
static const struct rte_memzone *
@@ -614,9 +614,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_STATUS_SHIFT;
pkt_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- mb->pkt.data_len = pkt_len;
- mb->pkt.pkt_len = pkt_len;
- mb->pkt.vlan_macip.f.vlan_tci = rx_status &
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_macip.f.vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(\
rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
@@ -625,7 +625,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
mb->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- mb->pkt.hash.rss = rte_le_to_cpu_32(\
+ mb->hash.rss = rte_le_to_cpu_32(\
rxdp->wb.qword0.hi_dword.rss);
}
@@ -687,10 +687,10 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
for (i = 0; i < rxq->rx_free_thresh; i++) {
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
- mb->pkt.next = NULL;
- mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.in_port = rxq->port_id;
+ mb->next = NULL;
+ mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->in_port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
@@ -845,15 +845,15 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_packet_len = ((qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_prefetch0(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = rx_packet_len;
- rxm->pkt.data_len = rx_packet_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->in_port = rxq->port_id;
- rxm->pkt.vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_macip.f.vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -861,7 +861,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
rxm->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->pkt.hash.rss =
+ rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
rx_pkts[nb_rx++] = rxm;
@@ -948,8 +948,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
rxdp->read.pkt_addr = dma_addr;
rx_packet_len = (qword1 & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rxm->pkt.data_len = rx_packet_len;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = rx_packet_len;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/**
* If this is the first buffer of the received packet, set the
@@ -960,14 +960,14 @@ i40e_recv_scattered_pkts(void *rx_queue,
*/
if (!first_seg) {
first_seg = rxm;
- first_seg->pkt.nb_segs = 1;
- first_seg->pkt.pkt_len = rx_packet_len;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
} else {
- first_seg->pkt.pkt_len =
- (uint16_t)(first_seg->pkt.pkt_len +
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
rx_packet_len);
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/**
@@ -990,23 +990,23 @@ i40e_recv_scattered_pkts(void *rx_queue,
* the length of that CRC part from the data length of the
* previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (rx_packet_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len =
- (uint16_t)(last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
(ETHER_CRC_LEN - rx_packet_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len = (uint16_t)(rx_packet_len -
+ rxm->data_len = (uint16_t)(rx_packet_len -
ETHER_CRC_LEN);
}
- first_seg->pkt.in_port = rxq->port_id;
- first_seg->pkt.vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->in_port = rxq->port_id;
+ first_seg->vlan_macip.f.vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1014,11 +1014,11 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
first_seg->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->pkt.hash.rss =
+ rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
/* Prefetch data of first segment, if configured to do so. */
- rte_prefetch0(first_seg->pkt.data);
+ rte_prefetch0(first_seg->data);
rx_pkts[nb_rx++] = first_seg;
first_seg = NULL;
}
@@ -1108,8 +1108,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->pkt.vlan_macip.f.l2_len;
- l3_len = tx_pkt->pkt.vlan_macip.f.l3_len;
+ l2_len = tx_pkt->vlan_macip.f.l2_len;
+ l3_len = tx_pkt->vlan_macip.f.l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1119,7 +1119,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* a packet equals to the number of the segments of that
* packet plus 1 context descriptor if needed.
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + nb_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
@@ -1145,7 +1145,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+ tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->mbuf = m_seg;
/* Setup TX Descriptor */
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
@@ -1210,7 +1210,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/* The last packet data descriptor needs End Of Packet (EOP) */
@@ -1298,7 +1298,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->pkt.data_len, 0);
+ (*pkts)->data_len, 0);
}
}
@@ -1312,7 +1312,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
- (*pkts)->pkt.data_len, 0);
+ (*pkts)->data_len, 0);
}
/* Fill hardware descriptor ring with mbuf data */
@@ -2019,10 +2019,10 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->pkt.next = NULL;
- mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.nb_segs = 1;
- mbuf->pkt.in_port = rxq->port_id;
+ mbuf->next = NULL;
+ mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->in_port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -178,7 +178,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
- pkt_len = (*pkts)->pkt.data_len;
+ pkt_len = (*pkts)->data_len;
/* write data to descriptor */
txdp->read.buffer_addr = buf_dma_addr;
@@ -197,7 +197,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
uint32_t pkt_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
- pkt_len = (*pkts)->pkt.data_len;
+ pkt_len = (*pkts)->data_len;
/* write data to descriptor */
txdp->read.buffer_addr = buf_dma_addr;
@@ -570,7 +570,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
- pkt_len = tx_pkt->pkt.pkt_len;
+ pkt_len = tx_pkt->pkt_len;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -579,7 +579,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+ vlan_macip_lens = tx_pkt->vlan_macip.data;
/* If hardware offload required */
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
@@ -597,7 +597,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
@@ -757,7 +757,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set up Transmit Data Descriptor.
*/
- slen = m_seg->pkt.data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
@@ -768,7 +768,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->pkt.next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
@@ -937,10 +937,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
mb = rxep[j].mbuf;
pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
rxq->crc_len);
- mb->pkt.data_len = pkt_len;
- mb->pkt.pkt_len = pkt_len;
- mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
- mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
/* convert descriptor fields to rte mbuf flags */
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -995,10 +995,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
rte_mbuf_refcnt_set(mb, 1);
- mb->pkt.next = NULL;
- mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
- mb->pkt.nb_segs = 1;
- mb->pkt.in_port = rxq->port_id;
+ mb->next = NULL;
+ mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->in_port = rxq->port_id;
/* populate the descriptors */
dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -1247,17 +1247,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
rxq->crc_len);
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->pkt.data);
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = pkt_len;
- rxm->pkt.data_len = pkt_len;
- rxm->pkt.in_port = rxq->port_id;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1268,12 +1268,12 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->pkt.hash.fdir.hash =
+ rxm->hash.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
/*
* Store the mbuf address into the next entry of the array
@@ -1430,8 +1430,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
- rxm->pkt.data_len = data_len;
- rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
@@ -1443,13 +1443,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt.pkt_len = data_len;
- first_seg->pkt.nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
- first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
+ data_len);
- first_seg->pkt.nb_segs++;
- last_seg->pkt.next = rxm;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
@@ -1472,18 +1472,18 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->pkt.next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->pkt.nb_segs--;
- last_seg->pkt.data_len = (uint16_t)
- (last_seg->pkt.data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
- last_seg->pkt.next = NULL;
+ last_seg->next = NULL;
} else
- rxm->pkt.data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
@@ -1496,13 +1496,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->pkt.in_port = rxq->port_id;
+ first_seg->in_port = rxq->port_id;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->pkt.vlan_macip.f.vlan_tci =
+ first_seg->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1513,17 +1513,17 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->pkt.hash.fdir.hash =
+ first_seg->hash.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- first_seg->pkt.hash.fdir.id =
+ first_seg->hash.fdir.id =
rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->pkt.data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
@@ -3212,10 +3212,10 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
}
rte_mbuf_refcnt_set(mbuf, 1);
- mbuf->pkt.next = NULL;
- mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
- mbuf->pkt.nb_segs = 1;
- mbuf->pkt.in_port = rxq->port_id;
+ mbuf->next = NULL;
+ mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->in_port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -47,7 +47,7 @@
#endif
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -47,23 +47,21 @@
static struct rte_mbuf mb_def = {
.ol_flags = 0,
- .pkt = {
- .data_len = 0,
- .pkt_len = 0,
-
- .vlan_macip = {
- .data = 0,
- },
- .hash = {
- .rss = 0,
- },
-
- .nb_segs = 1,
- .in_port = 0,
-
- .next = NULL,
- .data = NULL,
+ .data_len = 0,
+ .pkt_len = 0,
+
+ .vlan_macip = {
+ .data = 0,
+ },
+ .hash = {
+ .rss = 0,
},
+
+ .nb_segs = 1,
+ .in_port = 0,
+
+ .next = NULL,
+ .data = NULL,
};
static inline void
@@ -85,7 +83,7 @@ ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
rxdp = rxq->rx_ring + rxq->rxrearm_start;
- def_low = _mm_load_si128((__m128i *)&(mb_def.pkt));
+ def_low = _mm_load_si128((__m128i *)&(mb_def.next));
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
@@ -116,8 +114,8 @@ ixgbe_rxq_rearm(struct igb_rx_queue *rxq)
_mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
/* flush mbuf with pkt template */
- _mm_store_si128((__m128i *)&mb0->pkt, vaddr0);
- _mm_store_si128((__m128i *)&mb1->pkt, vaddr1);
+ _mm_store_si128((__m128i *)&mb0->next, vaddr0);
+ _mm_store_si128((__m128i *)&mb1->next, vaddr1);
/* update refcnt per pkt */
rte_mbuf_refcnt_set(mb0, 1);
@@ -316,9 +314,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
/* D.3 copy final 3,4 data to rx_pkts */
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+3]->data_len),
pkt_mb4);
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+2]->data_len),
pkt_mb3);
/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
@@ -330,9 +328,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
staterr = _mm_packs_epi32(staterr, zero);
/* D.3 copy final 1,2 data to rx_pkts */
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos+1]->data_len),
pkt_mb2);
- _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->pkt.data_len),
+ _mm_storeu_si128((__m128i *)&(rx_pkts[pos]->data_len),
pkt_mb1);
/* C.4 calc avaialbe number of desc */
@@ -359,7 +357,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp,
/* load buf_addr/buf_physaddr in t0 */
t0 = _mm_loadu_si128((__m128i *)&(pkt->buf_addr));
/* load data, ... pkt_len in t1 */
- t1 = _mm_loadu_si128((__m128i *)&(pkt->pkt.data));
+ t1 = _mm_loadu_si128((__m128i *)&(pkt->data));
/* calc offset = (data - buf_adr) */
offset = _mm_sub_epi64(t1, t0);
@@ -151,9 +151,9 @@ eth_pcap_rx(void *queue,
if (header.len <= buf_size) {
/* pcap packet will fit in the mbuf, go ahead and copy */
- rte_memcpy(mbuf->pkt.data, packet, header.len);
- mbuf->pkt.data_len = (uint16_t)header.len;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ rte_memcpy(mbuf->data, packet, header.len);
+ mbuf->data_len = (uint16_t)header.len;
+ mbuf->pkt_len = mbuf->data_len;
bufs[i] = mbuf;
num_rx++;
} else {
@@ -200,9 +200,9 @@ eth_pcap_tx_dumper(void *queue,
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
calculate_timestamp(&header.ts);
- header.len = mbuf->pkt.data_len;
+ header.len = mbuf->data_len;
header.caplen = header.len;
- pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->pkt.data);
+ pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->data);
rte_pktmbuf_free(mbuf);
num_tx++;
}
@@ -237,8 +237,8 @@ eth_pcap_tx(void *queue,
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
- ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->pkt.data,
- mbuf->pkt.data_len);
+ ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->data,
+ mbuf->data_len);
if (unlikely(ret != 0))
break;
num_tx++;
@@ -118,7 +118,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
}
rte_prefetch0(cookie);
- rte_packet_prefetch(cookie->pkt.data);
+ rte_packet_prefetch(cookie->data);
rx_pkts[i] = cookie;
vq->vq_used_cons_idx++;
vq_ring_free_chain(vq, desc_idx);
@@ -209,7 +209,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
start_dp[idx].flags = VRING_DESC_F_NEXT;
idx = start_dp[idx].next;
start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
- start_dp[idx].len = cookie->pkt.data_len;
+ start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = 0;
idx = start_dp[idx].next;
txvq->vq_desc_head_idx = idx;
@@ -469,16 +469,16 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
continue;
}
- rxm->pkt.in_port = rxvq->port_id;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint32_t)(len[i]
+ rxm->in_port = rxvq->port_id;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint32_t)(len[i]
- sizeof(struct virtio_net_hdr));
- rxm->pkt.data_len = (uint16_t)(len[i]
+ rxm->data_len = (uint16_t)(len[i]
- sizeof(struct virtio_net_hdr));
- VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
+ VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
rx_pkts[nb_rx++] = rxm;
rxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);
@@ -555,7 +555,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
break;
}
nb_tx++;
- txvq->bytes += txm->pkt.data_len;
+ txvq->bytes += txm->data_len;
} else {
PMD_TX_LOG(ERR, "No free tx descriptors to transmit");
break;
@@ -59,7 +59,7 @@
#define VIRTQUEUE_MAX_NAME_SZ 32
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define VTNET_SQ_RQ_QUEUE_IDX 0
@@ -79,7 +79,7 @@
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+ (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
(char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -288,7 +288,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txm = tx_pkts[nb_tx];
/* Don't support scatter packets yet, free them if met */
- if (txm->pkt.nb_segs != 1) {
+ if (txm->nb_segs != 1) {
PMD_TX_LOG(DEBUG, "Don't support scatter packets yet, drop!");
rte_pktmbuf_free(tx_pkts[nb_tx]);
txq->stats.drop_total++;
@@ -298,7 +298,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
/* Needs to minus ether header len */
- if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+ if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
rte_pktmbuf_free(tx_pkts[nb_tx]);
txq->stats.drop_total++;
@@ -313,7 +313,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
txd->addr = tbi->bufPA;
- txd->len = txm->pkt.data_len;
+ txd->len = txm->data_len;
/* Mark the last descriptor as End of Packet. */
txd->cq = 1;
@@ -550,21 +550,21 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_pktmbuf_mtod(rxm, void *));
#endif
/* Copy vlan tag in packet buffer */
- rxm->pkt.vlan_macip.f.vlan_tci =
+ rxm->vlan_macip.f.vlan_tci =
rte_le_to_cpu_16((uint16_t)rcd->tci);
} else
rxm->ol_flags = 0;
/* Initialize newly received packet buffer */
- rxm->pkt.in_port = rxq->port_id;
- rxm->pkt.nb_segs = 1;
- rxm->pkt.next = NULL;
- rxm->pkt.pkt_len = (uint16_t)rcd->len;
- rxm->pkt.data_len = (uint16_t)rcd->len;
- rxm->pkt.in_port = rxq->port_id;
- rxm->pkt.vlan_macip.f.vlan_tci = 0;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->in_port = rxq->port_id;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint16_t)rcd->len;
+ rxm->data_len = (uint16_t)rcd->len;
+ rxm->in_port = rxq->port_id;
+ rxm->vlan_macip.f.vlan_tci = 0;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;
@@ -109,12 +109,12 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
for (i = 0; i < num ; i ++) {
rxm = rx_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
- rxm->pkt.next = NULL;
- rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
- rxm->pkt.nb_segs = 1;
- rxm->pkt.in_port = pi->port_id;
- rxm->pkt.pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
+ rxm->next = NULL;
+ rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
+ rxm->nb_segs = 1;
+ rxm->in_port = pi->port_id;
+ rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
}
/* allocate new mbuf for the used descriptor */
while (likely(!virtqueue_full(rxvq))) {
@@ -54,7 +54,7 @@
* rather than gpa<->hva in virito spec.
*/
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->pkt.data))
+ ((uint64_t)((mb)->data))
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
@@ -238,7 +238,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
start_dp[idx].addr = (uintptr_t)NULL;
idx = start_dp[idx].next;
start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
- start_dp[idx].len = cookie->pkt.data_len;
+ start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = 0;
idx = start_dp[idx].next;
txvq->vq_desc_head_idx = idx;
@@ -1015,7 +1015,7 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
s->stats.n_pkts_tc[tc_index] += 1;
s->stats.n_bytes_tc[tc_index] += pkt_len;
@@ -1026,7 +1026,7 @@ rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
s->stats.n_pkts_tc_dropped[tc_index] += 1;
s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
@@ -1036,7 +1036,7 @@ static inline void
rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts += 1;
qe->stats.n_bytes += pkt_len;
@@ -1046,7 +1046,7 @@ static inline void
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
- uint32_t pkt_len = pkt->pkt.pkt_len;
+ uint32_t pkt_len = pkt->pkt_len;
qe->stats.n_pkts_dropped += 1;
qe->stats.n_bytes_dropped += pkt_len;
@@ -1563,7 +1563,7 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_mbuf *pkt = grinder->pkt;
uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
uint32_t subport_tb_credits = subport->tb_credits;
uint32_t subport_tc_credits = subport->tc_credits[tc_index];
uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1599,7 +1599,7 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
struct rte_sched_pipe *pipe = grinder->pipe;
struct rte_mbuf *pkt = grinder->pkt;
uint32_t tc_index = grinder->tc_index;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
uint32_t subport_tb_credits = subport->tb_credits;
uint32_t subport_tc_credits = subport->tc_credits[tc_index];
uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1640,7 +1640,7 @@ grinder_schedule(struct rte_sched_port *port, uint32_t pos)
struct rte_sched_grinder *grinder = port->grinder + pos;
struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
struct rte_mbuf *pkt = grinder->pkt;
- uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+ uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
#if RTE_SCHED_TS_CREDITS_CHECK
if (!grinder_credits_check(port, pos)) {
@@ -106,7 +106,7 @@ extern "C" {
2. Start of Frame Delimiter (SFD): 1 byte;
3. Frame Check Sequence (FCS): 4 bytes;
4. Inter Frame Gap (IFG): 12 bytes.
-The FCS is considered overhead only if not included in the packet length (field pkt.pkt_len
+The FCS is considered overhead only if not included in the packet length (field pkt_len
of struct rte_mbuf). */
#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
@@ -196,7 +196,7 @@ struct rte_sched_port_params {
};
/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field pkt.hash.sched
+identify the destination queue for the current packet. Stored in the field hash.sched
of struct rte_mbuf of each packet, typically written by the classification stage and read by
scheduler enqueue.*/
struct rte_sched_port_hierarchy {
@@ -352,7 +352,7 @@ static inline void
rte_sched_port_pkt_write(struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
sched->color = (uint32_t) color;
sched->subport = subport;
@@ -381,7 +381,7 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
static inline void
rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
*subport = sched->subport;
*pipe = sched->pipe;
@@ -392,7 +392,7 @@ rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint3
static inline enum rte_meter_color
rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+ struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
return (enum rte_meter_color) sched->color;
}