@@ -305,6 +305,7 @@ struct iavf_devargs {
uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
uint16_t quanta_size;
uint32_t watchdog_period;
+ uint16_t no_poll_on_link_down;
};
struct iavf_security_ctx;
@@ -323,6 +324,7 @@ struct iavf_adapter {
uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
bool stopped;
bool closed;
+ bool no_poll;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
};
@@ -37,6 +37,7 @@
#define IAVF_PROTO_XTR_ARG "proto_xtr"
#define IAVF_QUANTA_SIZE_ARG "quanta_size"
#define IAVF_RESET_WATCHDOG_ARG "watchdog_period"
+#define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
uint64_t iavf_timestamp_dynflag;
int iavf_timestamp_dynfield_offset = -1;
@@ -45,6 +46,7 @@ static const char * const iavf_valid_args[] = {
IAVF_PROTO_XTR_ARG,
IAVF_QUANTA_SIZE_ARG,
IAVF_RESET_WATCHDOG_ARG,
+ IAVF_NO_POLL_ON_LINK_DOWN_ARG,
NULL
};
@@ -2237,6 +2239,7 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
struct rte_kvargs *kvlist;
int ret;
int watchdog_period = -1;
+ uint16_t no_poll_on_link_down;
if (!devargs)
return 0;
@@ -2270,6 +2273,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
else
ad->devargs.watchdog_period = watchdog_period;
+ no_poll_on_link_down = rte_kvargs_count(kvlist,
+ IAVF_NO_POLL_ON_LINK_DOWN_ARG);
+
+ if (no_poll_on_link_down == 0)
+ ad->devargs.no_poll_on_link_down = 0;
+ else
+ ad->devargs.no_poll_on_link_down = 1;
+
if (ad->devargs.quanta_size != 0 &&
(ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
ad->devargs.quanta_size & 0x40)) {
@@ -770,6 +770,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf =
IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_vsi *vsi = &vf->vsi;
struct iavf_tx_queue *txq;
const struct rte_memzone *mz;
uint32_t ring_size;
@@ -843,6 +844,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+ txq->vsi = vsi;
if (iavf_ipsec_crypto_supported(adapter))
txq->ipsec_crypto_pkt_md_offset =
@@ -1406,9 +1408,12 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ rxq = rx_queue;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
nb_rx = 0;
nb_hold = 0;
- rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1515,9 +1520,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
const uint32_t *ptype_tbl;
uint64_t ts_ns;
+ rxq = rx_queue;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
nb_rx = 0;
nb_hold = 0;
- rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1641,6 +1649,9 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
@@ -1818,6 +1829,9 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
volatile union iavf_rx_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
@@ -1973,6 +1987,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
uint64_t ts_ns;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -2104,6 +2121,9 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint1
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -2281,6 +2301,9 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
if (!nb_pkts)
return 0;
@@ -2768,6 +2791,8 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t idx;
uint16_t slen;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
/* Check if the descriptor ring needs to be cleaned. */
if (txq->nb_free < txq->free_thresh)
@@ -288,6 +288,7 @@ struct iavf_tx_queue {
uint16_t free_thresh;
uint16_t rs_thresh;
uint8_t rel_mbufs_type;
+ struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
uint16_t port_id;
uint16_t queue_id;
@@ -26,8 +26,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
{
#define IAVF_DESCS_PER_LOOP_AVX 8
- /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
- const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+ const uint32_t *type_table;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
0, rxq->mbuf_initializer);
@@ -36,6 +35,11 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
const int avx_aligned = ((rxq->rx_tail & 1) == 0);
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
+ type_table = rxq->vsi->adapter->ptype_tbl;
+
rte_prefetch0(rxdp);
/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -530,12 +534,12 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
{
#define IAVF_DESCS_PER_LOOP_AVX 8
- struct iavf_adapter *adapter = rxq->vsi->adapter;
+ struct iavf_adapter *adapter;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
- uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ uint64_t offloads;
#endif
- const uint32_t *type_table = adapter->ptype_tbl;
+ const uint32_t *type_table;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
0, rxq->mbuf_initializer);
@@ -543,6 +547,15 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
volatile union iavf_rx_flex_desc *rxdp =
(union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
+ adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
+ type_table = adapter->ptype_tbl;
+
rte_prefetch0(rxdp);
/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -1774,6 +1787,9 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
if (txq->nb_free < txq->free_thresh)
iavf_tx_free_bufs(txq);
@@ -1834,6 +1850,9 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_tx = 0;
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
while (nb_pkts) {
uint16_t ret, num;
@@ -45,7 +45,7 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
bool offload)
{
#ifdef IAVF_RX_PTYPE_OFFLOAD
- const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+ const uint32_t *type_table;
#endif
const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
@@ -53,6 +53,13 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
+#ifdef IAVF_RX_PTYPE_OFFLOAD
+ type_table = rxq->vsi->adapter->ptype_tbl;
+#endif
+
rte_prefetch0(rxdp);
/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -588,12 +595,12 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
uint8_t *split_packet,
bool offload)
{
- struct iavf_adapter *adapter = rxq->vsi->adapter;
+ struct iavf_adapter *adapter;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
- uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ uint64_t offloads;
#endif
#ifdef IAVF_RX_PTYPE_OFFLOAD
- const uint32_t *type_table = adapter->ptype_tbl;
+ const uint32_t *type_table;
#endif
const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
@@ -602,6 +609,17 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
volatile union iavf_rx_flex_desc *rxdp =
(union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
+ adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
+#ifdef IAVF_RX_PTYPE_OFFLOAD
+ type_table = adapter->ptype_tbl;
+#endif
+
rte_prefetch0(rxdp);
/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -1700,6 +1718,10 @@ iavf_recv_scattered_pkts_vec_avx512_cmn(void *rx_queue, struct rte_mbuf **rx_pkt
uint16_t nb_pkts, bool offload)
{
uint16_t retval = 0;
+ struct iavf_rx_queue *rxq = rx_queue;
+
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
uint16_t burst = iavf_recv_scattered_burst_vec_avx512(rx_queue,
@@ -2303,6 +2325,9 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
if (txq->nb_free < txq->free_thresh)
iavf_tx_free_bufs_avx512(txq);
@@ -2370,6 +2395,9 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
if (txq->nb_free < txq->free_thresh)
iavf_tx_free_bufs_avx512(txq);
@@ -2432,6 +2460,9 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_tx = 0;
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
while (nb_pkts) {
uint16_t ret, num;
@@ -2498,6 +2529,9 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_tx = 0;
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
while (nb_pkts) {
uint16_t ret, num;
@@ -479,7 +479,12 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
int pos;
uint64_t var;
__m128i shuf_msk;
- const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ const uint32_t *ptype_tbl;
+
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
@@ -1198,6 +1203,11 @@ uint16_t
iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
+ struct iavf_rx_queue *rxq = rx_queue;
+
+ if (!rxq->vsi)
+ return 0;
+
return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL);
}
@@ -1215,6 +1225,9 @@ iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
unsigned int i = 0;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
/* get some new buffers */
uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
split_flags);
@@ -1284,6 +1297,9 @@ iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue,
uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
unsigned int i = 0;
+ if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+ return 0;
+
/* get some new buffers */
uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts,
split_flags);
@@ -1437,6 +1453,9 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_tx = 0;
struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ if (!txq->vsi || txq->vsi->adapter->no_poll)
+ return 0;
+
while (nb_pkts) {
uint16_t ret, num;
@@ -263,6 +263,14 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
if (!vf->link_up)
iavf_dev_watchdog_enable(adapter);
}
+ if (vf->link_up && adapter->no_poll) {
+ adapter->no_poll = false;
+ PMD_DRV_LOG(DEBUG, "IAVF no-poll turned off");
+ }
+ if (!vf->link_up && adapter->devargs.no_poll_on_link_down) {
+ adapter->no_poll = true;
+ PMD_DRV_LOG(DEBUG, "IAVF no-poll turned on");
+ }
PMD_DRV_LOG(INFO, "Link status update:%s",
vf->link_up ? "up" : "down");
break;
@@ -465,6 +473,15 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
if (!vf->link_up)
iavf_dev_watchdog_enable(adapter);
}
+ if (vf->link_up && adapter->no_poll) {
+ adapter->no_poll = false;
+ PMD_DRV_LOG(DEBUG, "IAVF no-poll turned off");
+ }
+ if (!vf->link_up && adapter->devargs.no_poll_on_link_down) {
+ adapter->no_poll = true;
+ PMD_DRV_LOG(DEBUG, "IAVF no-poll turned on");
+ }
+
iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: