@@ -1439,7 +1439,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_recv_pkts));
rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(i40e_xmit_pkts));
- dev->tx_pkt_prepare = i40e_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(i40e_prep_pkts));
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -1542,7 +1542,7 @@ static _RTE_ETH_TX_DEF(i40e_xmit_pkts_vec)
* TX simple prep functions
*
**********************************************************************/
-uint16_t
+static uint16_t
i40e_simple_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1574,12 +1574,14 @@ i40e_simple_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+_RTE_ETH_TX_PREP_DEF(i40e_simple_prep_pkts)
+
/*********************************************************************
*
* TX prep functions
*
**********************************************************************/
-uint16_t
+static uint16_t
i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1636,6 +1638,8 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+_RTE_ETH_TX_PREP_DEF(i40e_prep_pkts)
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -3594,12 +3598,14 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(i40e_xmit_pkts_simple));
}
- dev->tx_pkt_prepare = i40e_simple_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_simple_prep_pkts));
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(i40e_xmit_pkts));
- dev->tx_pkt_prepare = i40e_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_prep_pkts));
}
}
@@ -204,10 +204,9 @@ _RTE_ETH_RX_PROTO(i40e_recv_pkts);
_RTE_ETH_RX_PROTO(i40e_recv_scattered_pkts);
_RTE_ETH_TX_PROTO(i40e_xmit_pkts);
-uint16_t i40e_simple_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(i40e_simple_prep_pkts);
+_RTE_ETH_TX_PROTO(i40e_prep_pkts);
+
int i40e_tx_queue_init(struct i40e_tx_queue *txq);
int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -1998,7 +1998,7 @@ ice_dev_init(struct rte_eth_dev *dev)
dev->tx_descriptor_status = ice_tx_descriptor_status;
rte_eth_set_rx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_recv_pkts));
rte_eth_set_tx_burst(dev->data->port_id, _RTE_ETH_FUNC(ice_xmit_pkts));
- dev->tx_pkt_prepare = ice_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id, _RTE_ETH_FUNC(ice_prep_pkts));
/* for secondary processes, we don't initialise any further as primary
* has already done this work.
@@ -3339,7 +3339,7 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
#define ICE_MIN_TSO_MSS 64
#define ICE_MAX_TSO_MSS 9728
#define ICE_MAX_TSO_FRAME_SIZE 262144
-uint16_t
+static uint16_t
ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -3378,6 +3378,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+_RTE_ETH_TX_PREP_DEF(ice_prep_pkts)
+
void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
@@ -3430,7 +3432,7 @@ ice_set_tx_function(struct rte_eth_dev *dev)
}
if (ad->tx_vec_allowed) {
- dev->tx_pkt_prepare = NULL;
+ rte_eth_set_tx_prep(dev->data->port_id, NULL);
if (ad->tx_use_avx512) {
#ifdef CC_AVX512_SUPPORT
if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
@@ -3439,7 +3441,8 @@ ice_set_tx_function(struct rte_eth_dev *dev)
dev->data->port_id);
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx512_offload));
- dev->tx_pkt_prepare = ice_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_prep_pkts));
} else {
PMD_DRV_LOG(NOTICE,
"Using AVX512 Vector Tx (port %d).",
@@ -3455,7 +3458,8 @@ ice_set_tx_function(struct rte_eth_dev *dev)
dev->data->port_id);
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(ice_xmit_pkts_vec_avx2_offload));
- dev->tx_pkt_prepare = ice_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_prep_pkts));
} else {
PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
ad->tx_use_avx2 ? "avx2 " : "",
@@ -3475,12 +3479,13 @@ ice_set_tx_function(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(ice_xmit_pkts_simple));
- dev->tx_pkt_prepare = NULL;
+ rte_eth_set_tx_prep(dev->data->port_id, NULL);
} else {
PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
rte_eth_set_tx_burst(dev->data->port_id,
_RTE_ETH_FUNC(ice_xmit_pkts));
- dev->tx_pkt_prepare = ice_prep_pkts;
+ rte_eth_set_tx_prep(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_prep_pkts));
}
}
@@ -215,8 +215,7 @@ int ice_fdir_setup_rx_resources(struct ice_pf *pf);
_RTE_ETH_RX_PROTO(ice_recv_pkts);
_RTE_ETH_TX_PROTO(ice_xmit_pkts);
void ice_set_rx_function(struct rte_eth_dev *dev);
-uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+_RTE_ETH_TX_PROTO(ice_prep_pkts);
void ice_set_tx_function_flag(struct rte_eth_dev *dev,
struct ice_tx_queue *txq);
void ice_set_tx_function(struct rte_eth_dev *dev);
@@ -1727,6 +1727,68 @@ rte_eth_tx_burst_t rte_eth_get_tx_burst(uint16_t port_id);
__rte_experimental
int rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf);
+/**
+ * @internal
+ * Helper routine for eth driver tx_prepare API.
+ * Should be called as first thing on entrance to the PMD's rte_eth_tx_prepare
+ * implementation.
+ * Does necessary checks and returns pointer to TX queue data structure.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queues.
+ *
+ * @return
+ * Pointer to device TX queue structure on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_eth_tx_prep_prolog(uint16_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
+ rte_errno = ENODEV;
+ return NULL;
+ }
+#endif
+
+ dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+#endif
+
+ return dev->data->tx_queues[queue_id];
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD tx_prepare
+ * functions.
+ */
+#define _RTE_ETH_TX_PREP_DEF(fn) \
+_RTE_ETH_TX_PROTO(fn) \
+{ \
+ void *txq = _rte_eth_tx_prep_prolog(port_id, queue_id); \
+ if (txq == NULL) \
+ return 0; \
+ return fn(txq, tx_pkts, nb_pkts); \
+}
+
+__rte_experimental
+rte_eth_tx_prep_t rte_eth_get_tx_prep(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t txf);
+
#ifdef __cplusplus
}
#endif
@@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev->device = NULL;
eth_dev->process_private = NULL;
eth_dev->intr_handle = NULL;
- eth_dev->tx_pkt_prepare = NULL;
eth_dev->rx_queue_count = NULL;
eth_dev->rx_descriptor_done = NULL;
eth_dev->rx_descriptor_status = NULL;
@@ -6379,3 +6378,25 @@ rte_eth_set_tx_burst(uint16_t port_id, rte_eth_tx_burst_t txf)
rte_eth_burst_api[port_id].tx_pkt_burst = txf;
return 0;
}
+
+__rte_experimental
+rte_eth_tx_prep_t
+rte_eth_get_tx_prep(uint16_t port_id)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return rte_eth_burst_api[port_id].tx_pkt_prepare;
+}
+
+__rte_experimental
+int
+rte_eth_set_tx_prep(uint16_t port_id, rte_eth_tx_prep_t tpf)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api))
+ return -EINVAL;
+
+ rte_eth_burst_api[port_id].tx_pkt_prepare = tpf;
+ return 0;
+}
@@ -5293,30 +5293,13 @@ static inline uint16_t
rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct rte_eth_dev *dev;
-
-#ifdef RTE_ETHDEV_DEBUG_TX
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
- rte_errno = ENODEV;
- return 0;
- }
-#endif
-
- dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_ETHDEV_DEBUG_TX
- if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
- rte_errno = EINVAL;
+ if (port_id >= RTE_MAX_ETHPORTS)
return 0;
- }
-#endif
- if (!dev->tx_pkt_prepare)
+ if (rte_eth_burst_api[port_id].tx_pkt_prepare == NULL)
return nb_pkts;
- return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
+ return rte_eth_burst_api[port_id].tx_pkt_prepare(port_id, queue_id,
tx_pkts, nb_pkts);
}
@@ -115,8 +115,6 @@ struct rte_eth_rxtx_callback {
* process, while the actual configuration data for the device is shared.
*/
struct rte_eth_dev {
- eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
-
eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
@@ -254,8 +254,10 @@ EXPERIMENTAL {
rte_eth_burst_api;
rte_eth_get_rx_burst;
rte_eth_get_tx_burst;
+ rte_eth_get_tx_prep;
rte_eth_set_rx_burst;
rte_eth_set_tx_burst;
+ rte_eth_set_tx_prep;
};
INTERNAL {