@@ -384,7 +384,7 @@ main_loop(__attribute__((unused)) void *dummy)
}
}
- port = dst_ports[pkt->in_port];
+ port = dst_ports[pkt->port];
/* Transmit the packet */
nic_tx_send_packet(pkt, (uint8_t)port);
@@ -337,7 +337,7 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
/* copy metadata from source packet*/
- hdr->in_port = pkt->in_port;
+ hdr->port = pkt->port;
hdr->vlan_macip = pkt->vlan_macip;
hdr->hash = pkt->hash;
@@ -540,7 +540,7 @@ app_lcore_worker(
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
- port = pkt->in_port;
+ port = pkt->port;
}
pos = lp->mbuf_out[port].n_mbufs;
@@ -211,7 +211,7 @@ enqueue_packet(struct rte_mbuf *buf, uint8_t port)
static void
handle_packet(struct rte_mbuf *buf)
{
- const uint8_t in_port = buf->in_port;
+ const uint8_t in_port = buf->port;
const uint8_t out_port = output_ports[in_port];
enqueue_packet(buf, out_port);
@@ -109,7 +109,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
/* init some constant fields */
m->pool = mp;
m->nb_segs = 1;
- m->in_port = 0xff;
+ m->port = 0xff;
}
/* do some sanity checks on a mbuf: panic if it fails */
@@ -163,7 +163,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
"in_port=%u\n", m->pkt_len, m->ol_flags,
- (unsigned)m->nb_segs, (unsigned)m->in_port);
+ (unsigned)m->nb_segs, (unsigned)m->port);
nb_segs = m->nb_segs;
while (m && nb_segs != 0) {
@@ -166,7 +166,7 @@ struct rte_mbuf {
/* these fields are valid for first segment only */
uint8_t nb_segs; /**< Number of segments. */
- uint8_t in_port; /**< Input port. */
+ uint8_t port; /**< Input port. */
uint16_t ol_flags; /**< Offload features. */
uint16_t reserved; /**< Unused field. Required for padding. */
@@ -473,7 +473,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
m->pkt_len = 0;
m->vlan_macip.data = 0;
m->nb_segs = 1;
- m->in_port = 0xff;
+ m->port = 0xff;
m->ol_flags = 0;
m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
@@ -536,7 +536,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
mi->next = md->next;
mi->data_off = md->data_off;
mi->data_len = md->data_len;
- mi->in_port = md->in_port;
+ mi->port = md->port;
mi->vlan_macip = md->vlan_macip;
mi->hash = md->hash;
@@ -776,7 +776,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->next = NULL;
rxm->pkt_len = pkt_len;
rxm->data_len = pkt_len;
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
@@ -1002,7 +1002,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - IP checksum flag,
* - error flags.
*/
- first_seg->in_port = rxq->port_id;
+ first_seg->port = rxq->port_id;
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
@@ -757,7 +757,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->next = NULL;
rxm->pkt_len = pkt_len;
rxm->data_len = pkt_len;
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
@@ -992,7 +992,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->in_port = rxq->port_id;
+ first_seg->port = rxq->port_id;
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
@@ -688,7 +688,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
mb->next = NULL;
mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->in_port = rxq->port_id;
+ mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
@@ -849,7 +849,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->next = NULL;
rxm->pkt_len = rx_packet_len;
rxm->data_len = rx_packet_len;
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
rxm->vlan_macip.f.vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
@@ -1003,7 +1003,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
ETHER_CRC_LEN);
}
- first_seg->in_port = rxq->port_id;
+ first_seg->port = rxq->port_id;
first_seg->vlan_macip.f.vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
@@ -2021,7 +2021,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
- mbuf->in_port = rxq->port_id;
+ mbuf->port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -998,7 +998,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
mb->next = NULL;
mb->data_off = RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->in_port = rxq->port_id;
+ mb->port = rxq->port_id;
/* populate the descriptors */
dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -1253,7 +1253,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->next = NULL;
rxm->pkt_len = pkt_len;
rxm->data_len = pkt_len;
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
@@ -1496,7 +1496,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - VLAN TCI, if any,
* - error flags.
*/
- first_seg->in_port = rxq->port_id;
+ first_seg->port = rxq->port_id;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
@@ -3216,7 +3216,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
- mbuf->in_port = rxq->port_id;
+ mbuf->port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
@@ -469,7 +469,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
continue;
}
- rxm->in_port = rxvq->port_id;
+ rxm->port = rxvq->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = 1;
rxm->next = NULL;
@@ -556,12 +556,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->ol_flags = 0;
/* Initialize newly received packet buffer */
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
rxm->nb_segs = 1;
rxm->next = NULL;
rxm->pkt_len = (uint16_t)rcd->len;
rxm->data_len = (uint16_t)rcd->len;
- rxm->in_port = rxq->port_id;
+ rxm->port = rxq->port_id;
rxm->vlan_macip.f.vlan_tci = 0;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
@@ -113,7 +113,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
rxm->nb_segs = 1;
- rxm->in_port = pi->port_id;
+ rxm->port = pi->port_id;
rxm->pkt_len = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
}
/* allocate new mbuf for the used descriptor */