@@ -432,8 +432,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
/* Combine the packet header write. VLAN is not consider here */
- mb->vlan_macip.f.l2_len = l2_len;
- mb->vlan_macip.f.l3_len = l3_len;
+ mb->tx_ol.l2_len = l2_len;
+ mb->tx_ol.l3_len = l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
@@ -208,9 +208,9 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
pkt->nb_segs = 1;
pkt->pkt_len = pkt_size;
pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci0 = vlan_tci;
+ pkt->tx_ol.l2_len = sizeof(struct ether_hdr);
+ pkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
@@ -116,9 +116,9 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->tx_ol.l2_len = sizeof(struct ether_hdr);
+ mb->tx_ol.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci0 = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -118,9 +118,9 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ether_addr_copy(&addr, ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->tx_ol.l2_len = sizeof(struct ether_hdr);
+ mb->tx_ol.l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci0 = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -158,15 +158,14 @@ pkt_burst_receive(struct fwd_stream *fs)
eth_type, (unsigned) mb->pkt_len,
(int)mb->nb_segs);
if (ol_flags & PKT_RX_RSS_HASH) {
- printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
+ printf(" - RSS hash=0x%x", (unsigned) mb->filters.rss);
printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
}
else if (ol_flags & PKT_RX_FDIR)
printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
- mb->hash.fdir.hash, mb->hash.fdir.id);
+ mb->filters.fdir.hash, mb->filters.fdir.id);
if (ol_flags & PKT_RX_VLAN_PKT)
- printf(" - VLAN tci=0x%x",
- mb->vlan_macip.f.vlan_tci);
+ printf(" - VLAN tci=0x%x", mb->vlan_tci0);
printf("\n");
if (ol_flags != 0) {
int rxf;
@@ -406,8 +406,9 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,
mb->ol_flags = 0;
mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->vlan_macip.data = 0;
- mb->hash.rss = 0;
+ mb->vlan_tci0 = 0;
+ mb->tx_ol.u64 = 0;
+ mb->filters.rss = 0;
}
static void
@@ -264,9 +264,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci0 = vlan_tci;
+ pkt->tx_ol.l2_len = sizeof(struct ether_hdr);
+ pkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -261,19 +261,19 @@ nomore_mbuf:
*/
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
- pkt->vlan_macip.f.l2_len = eth_hdr_size;
+ pkt->tx_ol.l2_len = eth_hdr_size;
if (ipv4) {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci0 = ETHER_TYPE_IPv4;
+ pkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+ pkt->vlan_tci0 = ETHER_TYPE_IPv6;
+ pkt->tx_ol.l3_len = sizeof(struct ipv6_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
@@ -121,7 +121,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->filters.rss = 0;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -143,7 +143,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
if (rte_lcore_count() >= 3) {
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = (i & 1) << 8;
+ bufs[i]->filters.rss = (i & 1) << 8;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -168,7 +168,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = i;
+ bufs[i]->filters.rss = i;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
@@ -200,7 +200,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
return -1;
}
for (i = 0; i < BIG_BATCH; i++)
- many_bufs[i]->hash.rss = i << 2;
+ many_bufs[i]->filters.rss = i << 2;
for (i = 0; i < BIG_BATCH/BURST; i++) {
rte_distributor_process(d, &many_bufs[i*BURST], BURST);
@@ -281,7 +281,7 @@ sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
rte_distributor_process(d, NULL, 0);
for (j = 0; j < BURST; j++) {
- bufs[j]->hash.rss = (i+j) << 1;
+ bufs[j]->filters.rss = (i+j) << 1;
bufs[j]->refcnt = 1;
}
@@ -360,7 +360,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->filters.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
@@ -373,7 +373,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
return -1;
}
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->filters.rss = 0;
/* get worker zero to quit */
zero_quit = 1;
@@ -417,7 +417,7 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->filters.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
@@ -489,7 +489,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
zero_quit = 0;
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->hash.rss = i << 1;
+ bufs[i]->filters.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
@@ -160,7 +160,7 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)
}
/* ensure we have different hash value for each pkt */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = i;
+ bufs[i]->filters.rss = i;
start = rte_rdtsc();
for (i = 0; i < (1<<ITER_POWER); i++)
@@ -199,7 +199,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->hash.rss = i << 1;
+ bufs[i]->filters.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
@@ -282,7 +282,7 @@ rte_distributor_process(struct rte_distributor *d,
next_mb = mbufs[next_idx++];
next_value = (((int64_t)(uintptr_t)next_mb)
<< RTE_DISTRIB_FLAG_BITS);
- new_tag = (next_mb->hash.rss | 1);
+ new_tag = (next_mb->filters.rss | 1);
uint32_t match = 0;
unsigned i;
@@ -173,8 +173,7 @@ ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
struct rte_mbuf *ms;
/* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
- mp->vlan_macip.f.l3_len));
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->tx_ol.l2_len + mp->tx_ol.l3_len));
/* chain two fragments. */
ms = rte_pktmbuf_lastseg(mn);
@@ -198,7 +198,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
out_pkt->pkt_len - sizeof(struct ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->tx_ol.l3_len = sizeof(struct ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
@@ -87,10 +87,10 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv4 header for the reassmebled packet */
ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->tx_ol.l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->vlan_macip.f.l3_len));
+ m->tx_ol.l3_len));
ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
@@ -137,7 +137,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
ip_ofs *= IPV4_HDR_OFFSET_UNITS;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->vlan_macip.f.l3_len);
+ mb->tx_ol.l3_len);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
@@ -109,7 +109,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv6 header for the reassembled datagram */
ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->tx_ol.l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
@@ -120,8 +120,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
* other headers, so we assume there are no other headers and thus update
* the main IPv6 header instead.
*/
- move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
- sizeof(*frag_hdr);
+ move_len = m->tx_ol.l2_len + m->tx_ol.l3_len - sizeof(*frag_hdr);
frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
ip_hdr->proto = frag_hdr->next_header;
@@ -112,41 +112,38 @@ extern "C" {
#define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
/** Offload features */
-union rte_vlan_macip {
- uint32_t data;
+union rte_tx_offloads {
+ uint64_t u64;
struct {
+ uint8_t l4_len; /**< L4 Header Length */
+ uint8_t inner_l3_len; /**< L3 tunnelling header header */
uint16_t l3_len:9; /**< L3 (IP) Header Length. */
uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
- uint16_t vlan_tci;
- /**< VLAN Tag Control Identifier (CPU order). */
- } f;
-};
-/*
- * Compare mask for vlan_macip_len.data,
- * should be in sync with rte_vlan_macip.f layout.
- * */
-#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
-#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
-#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
-/**< MAC+IP length. */
-#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+ uint16_t mss; /**< Maximum segment size */
+ uint16_t reserved;
+ };
+};
+/* define a set of marker types that can be used to refer to set points in the
+ * mbuf */
+typedef void *MARKER[0]; /**< generic marker for a point in a structure */
+typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
+ * with a single assignment */
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
struct rte_mbuf {
/** dummy field marking start of first cache line */
- void *cache_line0[0];
+ MARKER cache_line0;
void *buf_addr; /**< Virtual address of segment buffer. */
phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
/* next 8 bytes are initialised on RX descriptor rearm */
- uint64_t rearm_data[0]; /**< dummy element so we can get uin64_t ptrs
- * to this part of the mbuf without alias error
- */
+ MARKER64 rearm_data; /**< dummy element for data set of rearm */
+
uint16_t buf_len; /**< Length of segment buffer. */
uint16_t data_off;
/**
@@ -170,27 +167,42 @@ struct rte_mbuf {
/* remaining bytes are set on RX when pulling packet from descriptor */
uint64_t ol_flags; /**< Offload features. */
- __m128i rx_descriptor_fields1[0]; /**< dummy field used as marker for
- * writes in a vector driver */
+ /** dummy field used as marker for writes in a vector driver. */
+ MARKER rx_descriptor_fields1;
+
uint16_t packet_type; /**< Type of packet, e.g. protocols used */
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
- union rte_vlan_macip vlan_macip;
+ uint16_t vlan_tci0; /**< first vlan tag control identifier */
+ uint16_t vlan_tci1; /**< second vlan tci */
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
uint16_t hash;
uint16_t id;
} fdir; /**< Filter identifier if FDIR enabled */
- uint32_t sched; /**< Hierarchical scheduler */
- } hash; /**< hash information */
+ struct {
+ uint32_t lo;
+ union {
+ uint32_t hi;
+ uint32_t filter_id;
+ };
+ } fdir_i40e; /**< i40e enhanced flow director */
+ uint32_t sched; /**< Hierarchical scheduler */
+ } filters; /**< NIC filter information information */
+ uint32_t sequence; /**< Packet sequence number */
/* second cache line, fields only used in slow path or on TX */
/** dummy field marking start of second cache line */
- void *cache_line1[0] __rte_cache_aligned;
+ MARKER cache_line1 __rte_cache_aligned;
+
struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ void *userdata; /**< Pointer available for application use */
+
+ union rte_tx_offloads tx_ol; /**< Fields to enable TX offloads */
+
union {
uint8_t metadata[0];
uint16_t metadata16[0];
@@ -238,7 +250,6 @@ struct rte_mbuf {
*/
#define RTE_MBUF_DIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb))
-
/**
* Private data in case of pktmbuf pool.
*
@@ -475,17 +486,20 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
* The given mbuf must have only one segment.
*
* @param m
- * The packet mbuf to be resetted.
+ * The packet mbuf to be reset.
*/
static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
{
m->next = NULL;
m->pkt_len = 0;
- m->vlan_macip.data = 0;
+ m->vlan_tci0 = m->vlan_tci1 = 0;
+ m->sequence = 0;
m->nb_segs = 1;
m->port = 0xff;
+ m->packet_type = 0;
m->ol_flags = 0;
+ m->tx_ol.u64 = 0;
m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
RTE_PKTMBUF_HEADROOM : m->buf_len;
@@ -538,22 +552,20 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
+ /* save any needed values */
+ struct rte_mempool *pool = mi->pool;
+
+ /* increment reference count of direct mbuf */
rte_mbuf_refcnt_update(md, 1);
- mi->buf_physaddr = md->buf_physaddr;
- mi->buf_addr = md->buf_addr;
- mi->buf_len = md->buf_len;
- mi->next = md->next;
- mi->data_off = md->data_off;
- mi->data_len = md->data_len;
- mi->port = md->port;
- mi->vlan_macip = md->vlan_macip;
- mi->hash = md->hash;
+ /* assign all fields over, then reset some values */
+ *mi = *md;
+ mi->pool = pool;
mi->next = NULL;
mi->pkt_len = mi->data_len;
mi->nb_segs = 1;
- mi->ol_flags = md->ol_flags;
+ rte_mbuf_refcnt_set(mi, 1);
__rte_mbuf_sanity_check(mi, 1);
__rte_mbuf_sanity_check(md, 0);
@@ -116,6 +116,27 @@ struct e1000_vfta {
uint32_t vfta[IGB_VFTA_SIZE];
};
+/** Offload features for context descriptors - previously in mbuf */
+union rte_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with rte_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/**< MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/*
* VF data which used by PF host only
*/
@@ -420,7 +420,11 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->vlan_macip;
+ hdrlen = (union rte_vlan_macip){ .f = {
+ .l3_len = tx_pkt->tx_ol.l3_len,
+ .l2_len = tx_pkt->tx_ol.l2_len,
+ .vlan_tci = tx_pkt->vlan_tci0,
+ } };
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
@@ -515,8 +519,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
- E1000_TXD_VLAN_SHIFT;
+ popts_spec = tx_pkt->vlan_tci0 << E1000_TXD_VLAN_SHIFT;
}
if (tx_ol_req) {
@@ -783,7 +786,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
@@ -1009,7 +1012,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
@@ -353,7 +353,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t tx_ol_req;
uint32_t new_ctx = 0;
uint32_t ctx = 0;
- uint32_t vlan_macip_lens;
+ union rte_vlan_macip vlan_macip_lens;
txq = tx_queue;
sw_ring = txq->sw_ring;
@@ -378,13 +378,15 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
+ vlan_macip_lens.f.l3_len = tx_pkt->tx_ol.l3_len;
+ vlan_macip_lens.f.l2_len = tx_pkt->tx_ol.l2_len;
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
ctx = txq->ctx_curr;
@@ -500,7 +502,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -759,11 +761,10 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->filters.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -993,14 +994,13 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - error flags.
*/
first_seg->port = rxq->port_id;
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->filters.rss = rxd.wb.lower.hi_dword.rss;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -614,16 +614,16 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rx_status &
+ mb->vlan_tci0 = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
- rte_le_to_cpu_16(\
- rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
+ rte_le_to_cpu_16(\
+ rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
mb->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- mb->hash.rss = rte_le_to_cpu_32(\
+ mb->filters.rss = rte_le_to_cpu_32(\
rxdp->wb.qword0.hi_dword.rss);
}
@@ -851,7 +851,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_tci0 = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -859,7 +859,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
rxm->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->hash.rss =
+ rxm->filters.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
rx_pkts[nb_rx++] = rxm;
@@ -1004,7 +1004,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
}
first_seg->port = rxq->port_id;
- first_seg->vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->vlan_tci0 = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1012,7 +1012,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
first_seg->ol_flags = pkt_flags;
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->hash.rss =
+ rxm->filters.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
/* Prefetch data of first segment, if configured to do so. */
@@ -1107,8 +1107,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->vlan_macip.f.l2_len;
- l3_len = tx_pkt->vlan_macip.f.l3_len;
+ l2_len = tx_pkt->tx_ol.l2_len;
+ l3_len = tx_pkt->tx_ol.l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1144,7 +1144,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
+ tx_flags |= tx_pkt->vlan_tci0 <<
I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -98,6 +98,27 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/** Offload features for context descriptors - previously in mbuf */
+union rte_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with rte_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/**< MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/*
* Information about the fdir mode.
*/
@@ -599,7 +599,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens = (union rte_vlan_macip){ .f = {
+ .l3_len = tx_pkt->tx_ol.l3_len,
+ .l2_len = tx_pkt->tx_ol.l2_len,
+ .vlan_tci = tx_pkt->vlan_tci0,
+ } }.data;
/* If hardware offload required */
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
@@ -959,8 +963,8 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
rxq->crc_len);
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->vlan_tci0 = rxdp[j].wb.upper.vlan;
+ mb->filters.rss = rxdp[j].wb.lower.hi_dword.rss;
/* convert descriptor fields to rte mbuf flags */
mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -1277,8 +1281,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -1288,12 +1291,12 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->filters.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
+ rxm->filters.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->filters.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
/*
* Store the mbuf address into the next entry of the array
@@ -1522,8 +1525,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -1533,12 +1535,12 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = pkt_flags;
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+ first_seg->filters.rss = rxd.wb.lower.hi_dword.rss;
else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->hash.fdir.hash =
+ first_seg->filters.fdir.hash =
(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK);
- first_seg->hash.fdir.id =
+ first_seg->filters.fdir.id =
rxd.wb.lower.hi_dword.csum_ip.ip_id;
}
@@ -287,9 +287,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
/* D.3 copy final 3,4 data to rx_pkts */
- _mm_storeu_si128(rx_pkts[pos+3]->rx_descriptor_fields1,
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
pkt_mb4);
- _mm_storeu_si128(rx_pkts[pos+2]->rx_descriptor_fields1,
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
pkt_mb3);
/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
@@ -301,9 +301,9 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
staterr = _mm_packs_epi32(staterr, zero);
/* D.3 copy final 1,2 data to rx_pkts */
- _mm_storeu_si128(rx_pkts[pos+1]->rx_descriptor_fields1,
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
pkt_mb2);
- _mm_storeu_si128(rx_pkts[pos]->rx_descriptor_fields1,
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
/* C.4 calc avaialbe number of desc */
@@ -549,7 +549,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_pktmbuf_mtod(rxm, void *));
#endif
/* Copy vlan tag in packet buffer */
- rxm->vlan_macip.f.vlan_tci =
+ rxm->vlan_tci0 =
rte_le_to_cpu_16((uint16_t)rcd->tci);
} else
@@ -562,7 +562,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt_len = (uint16_t)rcd->len;
rxm->data_len = (uint16_t)rcd->len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = 0;
+ rxm->vlan_tci0 = 0;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;
@@ -195,10 +195,12 @@ struct rte_sched_port_params {
#endif
};
-/** Path through the scheduler hierarchy used by the scheduler enqueue operation to
-identify the destination queue for the current packet. Stored in the field hash.sched
-of struct rte_mbuf of each packet, typically written by the classification stage and read by
-scheduler enqueue.*/
+/**
+ * Path through the scheduler hierarchy used by the scheduler enqueue operation
+ * to identify the destination queue for the current packet. Stored in the field
+ * filters.sched of struct rte_mbuf of each packet, typically written by the
+ * classification stage and read by scheduler enqueue.
+ */
struct rte_sched_port_hierarchy {
uint32_t queue:2; /**< Queue ID (0 .. 3) */
uint32_t traffic_class:2; /**< Traffic class ID (0 .. 3)*/
@@ -352,7 +354,8 @@ static inline void
rte_sched_port_pkt_write(struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_meter_color color)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_port_hierarchy *sched =
+ (struct rte_sched_port_hierarchy *) &pkt->filters.sched;
sched->color = (uint32_t) color;
sched->subport = subport;
@@ -381,7 +384,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
static inline void
rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_port_hierarchy *sched =
+ (struct rte_sched_port_hierarchy *) &pkt->filters.sched;
*subport = sched->subport;
*pipe = sched->pipe;
@@ -392,7 +396,8 @@ rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint3
static inline enum rte_meter_color
rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
{
- struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
+ struct rte_sched_port_hierarchy *sched =
+ (struct rte_sched_port_hierarchy *) &pkt->filters.sched;
return (enum rte_meter_color) sched->color;
}