The functions to loop over the Tx queue and clean up all the mbufs on
it, e.g. for queue shutdown, is not device specific and so can move into
the common_intel headers. Only complication is ensuring that the
correct ring format, either minimal vector or full structure, is used.
Ice driver currently uses two functions and a function pointer to help
with this - though actually one of those functions uses a further check
inside it - so we can simplify this down to just one common function,
with a flag set in the appropriate place. This avoids checking for
AVX-512-specific functions, which were the only function using the
smaller struct in this driver.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/_common_intel/tx.h | 49 ++++++++++++++++++++++++-
drivers/net/ice/ice_dcf_ethdev.c | 5 +--
drivers/net/ice/ice_ethdev.h | 3 +-
drivers/net/ice/ice_rxtx.c | 33 +++++------------
drivers/net/ice/ice_rxtx_vec_common.h | 51 ---------------------------
drivers/net/ice/ice_rxtx_vec_sse.c | 4 ---
6 files changed, 60 insertions(+), 85 deletions(-)
@@ -65,6 +65,8 @@ struct ci_tx_queue {
rte_iova_t tx_ring_dma; /* TX ring DMA address */
bool tx_deferred_start; /* don't start this queue in dev start */
bool q_set; /* indicate if tx queue has been configured */
+ bool vector_tx; /* port is using vector TX */
+ bool vector_sw_ring; /* port is using vectorized SW ring (ieth_tx_entry_vec) */
union { /* the VSI this queue belongs to */
struct i40e_vsi *i40e_vsi;
struct iavf_vsi *iavf_vsi;
@@ -74,7 +76,6 @@ struct ci_tx_queue {
union {
struct { /* ICE driver specific values */
- ice_tx_release_mbufs_t tx_rel_mbufs;
uint32_t q_teid; /* TX schedule node id. */
};
struct { /* I40E driver specific values */
@@ -270,4 +271,50 @@ ci_tx_free_bufs_vec(struct ci_tx_queue *txq, ci_desc_done_fn desc_done, bool ctx
return txq->tx_rs_thresh;
}
+#define IETH_FREE_BUFS_LOOP(txq, swr, start) do { \
+ uint16_t i = start; \
+ if (txq->tx_tail < i) { \
+ for (; i < txq->nb_tx_desc; i++) { \
+ rte_pktmbuf_free_seg(swr[i].mbuf); \
+ swr[i].mbuf = NULL; \
+ } \
+ i = 0; \
+ } \
+ for (; i < txq->tx_tail; i++) { \
+ rte_pktmbuf_free_seg(swr[i].mbuf); \
+ swr[i].mbuf = NULL; \
+ } \
+} while (0)
+
+static inline void
+ci_txq_release_all_mbufs(struct ci_tx_queue *txq)
+{
+ if (unlikely(!txq || !txq->sw_ring))
+ return;
+
+ if (!txq->vector_tx) {
+ for (uint16_t i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ return;
+ }
+
+ /**
+ * vPMD tx will not set sw_ring's mbuf to NULL after free,
+ * so need to free remains more carefully.
+ */
+ const uint16_t start = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+
+ if (txq->vector_sw_ring) {
+ struct ci_tx_entry_vec *swr = txq->sw_ring_vec;
+ IETH_FREE_BUFS_LOOP(txq, swr, start);
+ } else {
+ struct ci_tx_entry *swr = txq->sw_ring;
+ IETH_FREE_BUFS_LOOP(txq, swr, start);
+ }
+}
+
#endif /* _COMMON_INTEL_TX_H_ */
@@ -24,6 +24,7 @@
#include "ice_generic_flow.h"
#include "ice_dcf_ethdev.h"
#include "ice_rxtx.h"
+#include "_common_intel/tx.h"
#define DCF_NUM_MACADDR_MAX 64
@@ -500,7 +501,7 @@ ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->tx_rel_mbufs(txq);
+ ci_txq_release_all_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -650,7 +651,7 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->tx_rel_mbufs(txq);
+ ci_txq_release_all_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -621,13 +621,12 @@ struct ice_adapter {
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
-#ifdef RTE_ARCH_X86
+ /* used only on X86, zero on other Archs */
bool rx_use_avx2;
bool rx_use_avx512;
bool tx_use_avx2;
bool tx_use_avx512;
bool rx_vec_offload_support;
-#endif
};
struct ice_vsi_vlan_pvid_info {
@@ -751,6 +751,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct ice_aqc_add_tx_qgrp *txq_elem;
struct ice_tlan_ctx tx_ctx;
int buf_len;
+ struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -822,6 +823,10 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return -EIO;
}
+ /* record what kind of descriptor cleanup we need on teardown */
+ txq->vector_tx = ad->tx_vec_allowed;
+ txq->vector_sw_ring = ad->tx_use_avx512;
+
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
rte_free(txq_elem);
@@ -1006,25 +1011,6 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return 0;
}
-/* Free all mbufs for descriptors in tx queue */
-static void
-_ice_tx_queue_release_mbufs(struct ci_tx_queue *txq)
-{
- uint16_t i;
-
- if (!txq || !txq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
- return;
- }
-
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
- txq->sw_ring[i].mbuf = NULL;
- }
- }
-}
-
static void
ice_reset_tx_queue(struct ci_tx_queue *txq)
{
@@ -1103,7 +1089,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return -EINVAL;
}
- txq->tx_rel_mbufs(txq);
+ ci_txq_release_all_mbufs(txq);
ice_reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -1166,7 +1152,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return -EINVAL;
}
- txq->tx_rel_mbufs(txq);
+ ci_txq_release_all_mbufs(txq);
txq->qtx_tail = NULL;
return 0;
@@ -1518,7 +1504,6 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
ice_reset_tx_queue(txq);
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
- txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
ice_set_tx_function_flag(dev, txq);
return 0;
@@ -1546,8 +1531,7 @@ ice_tx_queue_release(void *txq)
return;
}
- if (q->tx_rel_mbufs != NULL)
- q->tx_rel_mbufs(q);
+ ci_txq_release_all_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -2460,7 +2444,6 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
txq->q_set = true;
pf->fdir.txq = txq;
- txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
return ICE_SUCCESS;
}
@@ -61,57 +61,6 @@ _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
}
-static inline void
-_ice_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
-{
- uint16_t i;
-
- if (unlikely(!txq || !txq->sw_ring)) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
- return;
- }
-
- /**
- * vPMD tx will not set sw_ring's mbuf to NULL after free,
- * so need to free remains more carefully.
- */
- i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
-
-#ifdef __AVX512VL__
- struct rte_eth_dev *dev = &rte_eth_devices[txq->ice_vsi->adapter->pf.dev_data->port_id];
-
- if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
- dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
- struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
-
- if (txq->tx_tail < i) {
- for (; i < txq->nb_tx_desc; i++) {
- rte_pktmbuf_free_seg(swr[i].mbuf);
- swr[i].mbuf = NULL;
- }
- i = 0;
- }
- for (; i < txq->tx_tail; i++) {
- rte_pktmbuf_free_seg(swr[i].mbuf);
- swr[i].mbuf = NULL;
- }
- } else
-#endif
- {
- if (txq->tx_tail < i) {
- for (; i < txq->nb_tx_desc; i++) {
- rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
- txq->sw_ring[i].mbuf = NULL;
- }
- i = 0;
- }
- for (; i < txq->tx_tail; i++) {
- rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
- txq->sw_ring[i].mbuf = NULL;
- }
- }
-}
-
static inline int
ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
{
@@ -795,10 +795,6 @@ ice_rxq_vec_setup(struct ice_rx_queue *rxq)
int __rte_cold
ice_txq_vec_setup(struct ci_tx_queue *txq __rte_unused)
{
- if (!txq)
- return -1;
-
- txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs_vec;
return 0;
}