@@ -1214,7 +1214,7 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw)
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (hw->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
hw->supported_rxdid &
@@ -308,7 +308,7 @@ alloc_rxq_mbufs(struct ice_rx_queue *rxq)
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
rxd->read.hdr_addr = 0;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rxd->read.rsvd1 = 0;
rxd->read.rsvd2 = 0;
#endif
@@ -86,7 +86,7 @@ ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
@@ -101,7 +101,7 @@ ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
{
volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
uint16_t stat_err;
#endif
@@ -110,7 +110,7 @@ ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
@@ -134,7 +134,7 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
@@ -178,7 +178,7 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
@@ -374,7 +374,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
rx_ctx.qlen = rxq->nb_rx_desc;
rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rx_ctx.dsize = 1; /* 32B descriptors */
#endif
rx_ctx.rxmax = rxq->max_pkt_len;
@@ -501,7 +501,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay));
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
rxd->read.rsvd1 = 0;
rxd->read.rsvd2 = 0;
#endif
@@ -1668,7 +1668,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
mb->vlan_tci = 0;
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
(1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
@@ -1705,7 +1705,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags = 0;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
bool is_tsinit = false;
uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
@@ -1721,7 +1721,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
@@ -1783,7 +1783,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rxq->ts_flag > 0 &&
(rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
rxq->time_high =
@@ -2023,7 +2023,7 @@ ice_recv_scattered_pkts(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
bool is_tsinit = false;
uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
@@ -2151,7 +2151,7 @@ ice_recv_scattered_pkts(void *rx_queue,
ice_rxd_to_vlan_tci(first_seg, &rxd);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rxq->ts_flag > 0 &&
(rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
rxq->time_high =
@@ -2540,7 +2540,7 @@ ice_recv_pkts(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
bool is_tsinit = false;
uint64_t ts_ns;
struct ice_vsi *vsi = rxq->vsi;
@@ -2649,7 +2649,7 @@ ice_recv_pkts(void *rx_queue,
ice_rxd_to_vlan_tci(rxm, &rxd);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
if (rxq->ts_flag > 0 &&
(rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
rxq->time_high =
@@ -23,7 +23,7 @@
#define ICE_CHK_Q_ENA_COUNT 100
#define ICE_CHK_Q_ENA_INTERVAL_US 100
-#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifdef RTE_NET_INTEL_USE_16BYTE_DESC
#define ice_rx_flex_desc ice_16b_rx_flex_desc
#else
#define ice_rx_flex_desc ice_32b_rx_flex_desc
@@ -38,7 +38,7 @@ ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)
return;
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
struct rte_mbuf *mb0, *mb1;
__m128i dma_addr0, dma_addr1;
__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
@@ -440,7 +440,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
} /* if() on fdir_enabled */
if (offload) {
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/**
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
@@ -462,7 +462,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
} /* if() on fdir_enabled */
if (do_offload) {
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/**
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
@@ -477,7 +477,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifndef RTE_NET_INTEL_USE_16BYTE_DESC
/**
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.