> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Friday, December 29, 2023 6:11 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; stable@dpdk.org; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process
>
> In a multi-process environment, a secondary process operates on shared
> memory and changes the function pointer of the primary process, resulting
> in a crash when the primary process cannot find the function address during
> an Rx/Tx burst.
>
> Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
> Cc: stable@dpdk.org
>
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Add fix for Rx burst.
> ---
> drivers/net/iavf/iavf.h | 42 +++++++-
> drivers/net/iavf/iavf_rxtx.c | 184 ++++++++++++++++++++++++++++++-----
> drivers/net/iavf/iavf_rxtx.h | 8 ++
> 3 files changed, 205 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 10868f2c30..8db9f3d7cd 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -313,6 +313,44 @@ struct iavf_devargs {
>
> struct iavf_security_ctx;
>
> +enum iavf_rx_burst_type {
> + IAVF_RX_BURST_DEFAULT,
> + IAVF_RX_BURST_FRXD,
> + IAVF_RX_BURST_BULK_ALLOC,
> + IAVF_RX_BURST_SCATTERED,
> + IAVF_RX_BURST_SFRXD,
> + IAVF_RX_BURST_VEC_SSE,
> + IAVF_RX_BURST_VEC_AVX2,
> + IAVF_RX_BURST_VEC_AVX2_OFFLOAD,
> + IAVF_RX_BURST_VEC_SSE_FRXD,
> + IAVF_RX_BURST_VEC_AVX2_FRXD,
> + IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD,
> + IAVF_RX_BURST_VEC_SSE_SCATTERED,
> + IAVF_RX_BURST_VEC_AVX2_SCATTERED,
> + IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD,
> + IAVF_RX_BURST_VEC_SSE_SFLEX_RXD,
> + IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD,
> + IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD,
> + IAVF_RX_BURST_VEC_AVX512,
> + IAVF_RX_BURST_VEC_AVX512_OFFLOAD,
> + IAVF_RX_BURST_VEC_AVX512_FRXD,
> + IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD,
> + IAVF_RX_BURST_VEC_AVX512_SCATTERED,
> + IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD,
> + IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD,
> + IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD,
What is SFLEX, SFRXD, SFRXD, FRXD, please make it clear by following a consistent naming pattern.
Btw, you can consider removing BURST and VEC which didn't give any additional information if you are looking for a short name.
....
> @@ -3809,42 +3886,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
> }
> if (use_flex) {
> dev->rx_pkt_burst =
> iavf_recv_scattered_pkts_vec_flex_rxd;
> + rx_burst_type =
> IAVF_RX_BURST_VEC_SSE_SFLEX_RXD;
> if (use_avx2) {
> - if (check_ret == IAVF_VECTOR_PATH)
> + if (check_ret == IAVF_VECTOR_PATH)
> {
> dev->rx_pkt_burst =
>
> iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
> - else
> + rx_burst_type =
> +
> IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD;
As you already introduce the burst_type, its not necessary to set the function pointer for each case.
Why not just
dev->rx_pkt_burst = rx_burst_ops[rx_burst_type] at last?
....
> +struct iavf_rx_burst_ops {
> + eth_rx_burst_t rx_pkt_burst;
> +};
Why create a wrapper here but not just use eth_rx_burst_t directly?
@@ -313,6 +313,44 @@ struct iavf_devargs {
struct iavf_security_ctx;
+enum iavf_rx_burst_type {
+ IAVF_RX_BURST_DEFAULT,
+ IAVF_RX_BURST_FRXD,
+ IAVF_RX_BURST_BULK_ALLOC,
+ IAVF_RX_BURST_SCATTERED,
+ IAVF_RX_BURST_SFRXD,
+ IAVF_RX_BURST_VEC_SSE,
+ IAVF_RX_BURST_VEC_AVX2,
+ IAVF_RX_BURST_VEC_AVX2_OFFLOAD,
+ IAVF_RX_BURST_VEC_SSE_FRXD,
+ IAVF_RX_BURST_VEC_AVX2_FRXD,
+ IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD,
+ IAVF_RX_BURST_VEC_SSE_SCATTERED,
+ IAVF_RX_BURST_VEC_AVX2_SCATTERED,
+ IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD,
+ IAVF_RX_BURST_VEC_SSE_SFLEX_RXD,
+ IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD,
+ IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD,
+ IAVF_RX_BURST_VEC_AVX512,
+ IAVF_RX_BURST_VEC_AVX512_OFFLOAD,
+ IAVF_RX_BURST_VEC_AVX512_FRXD,
+ IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD,
+ IAVF_RX_BURST_VEC_AVX512_SCATTERED,
+ IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD,
+ IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD,
+ IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+ IAVF_TX_BURST_DEFAULT,
+ IAVF_TX_BURST_VEC_SSE,
+ IAVF_TX_BURST_VEC_AVX2,
+ IAVF_TX_BURST_VEC_AVX2_OFFLOAD,
+ IAVF_TX_BURST_VEC_AVX512,
+ IAVF_TX_BURST_VEC_AVX512_OFFLOAD,
+ IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD,
+};
+
/* Structure to store private data for each VF instance. */
struct iavf_adapter {
struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
bool stopped;
bool closed;
bool no_poll;
- eth_rx_burst_t rx_pkt_burst;
- eth_tx_burst_t tx_pkt_burst;
+ enum iavf_rx_burst_type rx_burst_type;
+ enum iavf_tx_burst_type tx_burst_type;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
};
@@ -3707,15 +3707,88 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+static const
+struct iavf_rx_burst_ops iavf_rx_pkt_burst_ops[] = {
+ [IAVF_RX_BURST_DEFAULT].rx_pkt_burst = iavf_recv_pkts,
+ [IAVF_RX_BURST_FRXD].rx_pkt_burst = iavf_recv_pkts_flex_rxd,
+ [IAVF_RX_BURST_BULK_ALLOC].rx_pkt_burst = iavf_recv_pkts_bulk_alloc,
+ [IAVF_RX_BURST_SCATTERED].rx_pkt_burst = iavf_recv_scattered_pkts,
+ [IAVF_RX_BURST_SFRXD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+ [IAVF_RX_BURST_VEC_SSE].rx_pkt_burst = iavf_recv_pkts_vec,
+ [IAVF_RX_BURST_VEC_AVX2].rx_pkt_burst = iavf_recv_pkts_vec_avx2,
+ [IAVF_RX_BURST_VEC_AVX2_OFFLOAD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx2_offload,
+ [IAVF_RX_BURST_VEC_SSE_FRXD].rx_pkt_burst =
+ iavf_recv_pkts_vec_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX2_FRXD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx2_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+ [IAVF_RX_BURST_VEC_SSE_SCATTERED].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec,
+ [IAVF_RX_BURST_VEC_AVX2_SCATTERED].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx2,
+ [IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx2_offload,
+ [IAVF_RX_BURST_VEC_SSE_SFLEX_RXD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_RX_BURST_VEC_AVX512].rx_pkt_burst = iavf_recv_pkts_vec_avx512,
+ [IAVF_RX_BURST_VEC_AVX512_OFFLOAD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_offload,
+ [IAVF_RX_BURST_VEC_AVX512_FRXD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD].rx_pkt_burst =
+ iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+ [IAVF_RX_BURST_VEC_AVX512_SCATTERED].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512,
+ [IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_offload,
+ [IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+ [IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD].rx_pkt_burst =
+ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+ [IAVF_RX_BURST_VEC_SSE].rx_pkt_burst = iavf_recv_pkts_vec,
+#endif
+};
+
+static const
+struct iavf_tx_burst_ops iavf_tx_pkt_burst_ops[] = {
+ [IAVF_TX_BURST_DEFAULT].tx_pkt_burst = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+ [IAVF_TX_BURST_VEC_SSE].tx_pkt_burst = iavf_xmit_pkts_vec,
+ [IAVF_TX_BURST_VEC_AVX2].tx_pkt_burst = iavf_xmit_pkts_vec_avx2,
+ [IAVF_TX_BURST_VEC_AVX2_OFFLOAD].tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_TX_BURST_VEC_AVX512].tx_pkt_burst = iavf_xmit_pkts_vec_avx512,
+ [IAVF_TX_BURST_VEC_AVX512_OFFLOAD].tx_pkt_burst =
+ iavf_xmit_pkts_vec_avx512_offload,
+ [IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD].tx_pkt_burst =
+ iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
static uint16_t
iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct iavf_rx_queue *rxq = rx_queue;
+ enum iavf_rx_burst_type rx_burst_type =
+ rxq->vsi->adapter->rx_burst_type;
+
if (!rxq->vsi || rxq->vsi->adapter->no_poll)
return 0;
- return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+ return iavf_rx_pkt_burst_ops[rx_burst_type].rx_pkt_burst(rx_queue,
rx_pkts, nb_pkts);
}
@@ -3724,10 +3797,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct iavf_tx_queue *txq = tx_queue;
+ enum iavf_tx_burst_type tx_burst_type =
+ txq->vsi->adapter->tx_burst_type;
+
if (!txq->vsi || txq->vsi->adapter->no_poll)
return 0;
- return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+ return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
tx_pkts, nb_pkts);
}
@@ -3738,6 +3814,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ enum iavf_rx_burst_type rx_burst_type;
int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
int i;
struct iavf_rx_queue *rxq;
@@ -3809,42 +3886,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
}
if (use_flex) {
dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+ rx_burst_type = IAVF_RX_BURST_VEC_SSE_SFLEX_RXD;
if (use_avx2) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
- else
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD;
+ } else {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD;
+ }
}
#ifdef CC_AVX512_SUPPORT
if (use_avx512) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
- else
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD;
+ } else {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD;
+ }
}
#endif
} else {
dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+ rx_burst_type = IAVF_RX_BURST_VEC_SSE_SCATTERED;
if (use_avx2) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx2;
- else
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX2_SCATTERED;
+ } else {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx2_offload;
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD;
+ }
}
#ifdef CC_AVX512_SUPPORT
if (use_avx512) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx512;
- else
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX512_SCATTERED;
+ } else {
dev->rx_pkt_burst =
iavf_recv_scattered_pkts_vec_avx512_offload;
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD;
+ }
}
#endif
}
@@ -3875,49 +3974,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
}
if (use_flex) {
dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+ rx_burst_type = IAVF_RX_BURST_VEC_SSE_FRXD;
if (use_avx2) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx2_flex_rxd;
- else
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX2_FRXD;
+ } else {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD;
+ }
}
#ifdef CC_AVX512_SUPPORT
if (use_avx512) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx512_flex_rxd;
- else
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX512_FRXD;
+ } else {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+ rx_burst_type =
+ IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD;
+ }
}
#endif
} else {
dev->rx_pkt_burst = iavf_recv_pkts_vec;
+ rx_burst_type = IAVF_RX_BURST_VEC_SSE;
if (use_avx2) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx2;
- else
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX2;
+ } else {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx2_offload;
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX2_OFFLOAD;
+ }
}
#ifdef CC_AVX512_SUPPORT
if (use_avx512) {
- if (check_ret == IAVF_VECTOR_PATH)
+ if (check_ret == IAVF_VECTOR_PATH) {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx512;
- else
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX512;
+ } else {
dev->rx_pkt_burst =
iavf_recv_pkts_vec_avx512_offload;
+ rx_burst_type = IAVF_RX_BURST_VEC_AVX512_OFFLOAD;
+ }
}
#endif
}
}
if (no_poll_on_link_down) {
- adapter->rx_pkt_burst = dev->rx_pkt_burst;
+ adapter->rx_burst_type = rx_burst_type;
dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
}
return;
@@ -3935,9 +4049,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
(void)iavf_rxq_vec_setup(rxq);
}
dev->rx_pkt_burst = iavf_recv_pkts_vec;
+ rx_burst_type = IAVF_RX_BURST_VEC_SSE;
if (no_poll_on_link_down) {
- adapter->rx_pkt_burst = dev->rx_pkt_burst;
+ adapter->rx_burst_type = rx_burst_type;
dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
}
return;
@@ -3946,25 +4061,32 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
dev->data->port_id);
- if (use_flex)
+ if (use_flex) {
dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
- else
+ rx_burst_type = IAVF_RX_BURST_SFRXD;
+ } else {
dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+ rx_burst_type = IAVF_RX_BURST_SCATTERED;
+ }
} else if (adapter->rx_bulk_alloc_allowed) {
PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+ rx_burst_type = IAVF_RX_BURST_BULK_ALLOC;
} else {
PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
dev->data->port_id);
- if (use_flex)
+ if (use_flex) {
dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
- else
+ rx_burst_type = IAVF_RX_BURST_FRXD;
+ } else {
dev->rx_pkt_burst = iavf_recv_pkts;
+ rx_burst_type = IAVF_RX_BURST_DEFAULT;
+ }
}
if (no_poll_on_link_down) {
- adapter->rx_pkt_burst = dev->rx_pkt_burst;
+ adapter->rx_burst_type = rx_burst_type;
dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
}
}
@@ -3975,6 +4097,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
{
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ enum iavf_tx_burst_type tx_burst_type;
int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
#ifdef RTE_ARCH_X86
struct iavf_tx_queue *txq;
@@ -4011,10 +4134,12 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
dev->data->port_id);
dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+ tx_burst_type = IAVF_TX_BURST_VEC_SSE;
}
if (use_avx2) {
if (check_ret == IAVF_VECTOR_PATH) {
dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+ tx_burst_type = IAVF_TX_BURST_VEC_AVX2;
PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
dev->data->port_id);
} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4023,6 +4148,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
goto normal;
} else {
dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+ tx_burst_type = IAVF_TX_BURST_VEC_AVX2_OFFLOAD;
dev->tx_pkt_prepare = iavf_prep_pkts;
PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
dev->data->port_id);
@@ -4032,15 +4158,18 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
if (use_avx512) {
if (check_ret == IAVF_VECTOR_PATH) {
dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+ tx_burst_type = IAVF_TX_BURST_VEC_AVX512;
PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
dev->data->port_id);
} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+ tx_burst_type = IAVF_TX_BURST_VEC_AVX512_OFFLOAD;
dev->tx_pkt_prepare = iavf_prep_pkts;
PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
dev->data->port_id);
} else {
dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+ tx_burst_type = IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD;
dev->tx_pkt_prepare = iavf_prep_pkts;
PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
dev->data->port_id);
@@ -4063,7 +4192,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
}
if (no_poll_on_link_down) {
- adapter->tx_pkt_burst = dev->tx_pkt_burst;
+ adapter->tx_burst_type = tx_burst_type;
dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
}
return;
@@ -4074,10 +4203,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
dev->data->port_id);
dev->tx_pkt_burst = iavf_xmit_pkts;
+ tx_burst_type = IAVF_TX_BURST_DEFAULT;
dev->tx_pkt_prepare = iavf_prep_pkts;
if (no_poll_on_link_down) {
- adapter->tx_pkt_burst = dev->tx_pkt_burst;
+ adapter->tx_burst_type = tx_burst_type;
dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
}
}
@@ -203,6 +203,14 @@ struct iavf_txq_ops {
void (*release_mbufs)(struct iavf_tx_queue *txq);
};
+struct iavf_rx_burst_ops {
+ eth_rx_burst_t rx_pkt_burst;
+};
+
+struct iavf_tx_burst_ops {
+ eth_tx_burst_t tx_pkt_burst;
+};
+
struct iavf_rx_queue_stats {
uint64_t reserved;