@@ -1433,7 +1433,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &i40e_eth_dev_ops;
- dev->rx_queue_count = i40e_dev_rx_queue_count;
+ rte_eth_set_rx_qcnt(dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_rx_queue_count));
dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
rte_eth_set_rx_desc_st(dev->data->port_id,
_RTE_ETH_FUNC(i40e_dev_rx_descriptor_status));
@@ -1572,7 +1572,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
/* assign ops func pointer */
eth_dev->dev_ops = &i40evf_eth_dev_ops;
- eth_dev->rx_queue_count = i40e_dev_rx_queue_count;
+ rte_eth_set_rx_qcnt(eth_dev->data->port_id,
+ _RTE_ETH_FUNC(i40e_dev_rx_queue_count));
eth_dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
rte_eth_set_rx_desc_st(eth_dev->data->port_id,
_RTE_ETH_FUNC(i40e_dev_rx_descriptor_status));
@@ -2134,7 +2134,7 @@ i40e_dev_rx_queue_release(void *rxq)
rte_free(q);
}
-uint32_t
+static uint32_t
i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define I40E_RXQ_SCAN_INTERVAL 4
@@ -2163,6 +2163,8 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
+_RTE_ETH_RX_QCNT_DEF(i40e_dev_rx_queue_count)
+
int
i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
@@ -220,8 +220,7 @@ int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
-uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+_RTE_ETH_RX_QCNT_PROTO(i40e_dev_rx_queue_count);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
_RTE_ETH_RX_DESC_PROTO(i40e_dev_rx_descriptor_status);
_RTE_ETH_TX_DESC_PROTO(i40e_dev_tx_descriptor_status);
@@ -1993,7 +1993,8 @@ ice_dev_init(struct rte_eth_dev *dev)
#endif
dev->dev_ops = &ice_eth_dev_ops;
- dev->rx_queue_count = ice_rx_queue_count;
+ rte_eth_set_rx_qcnt(dev->data->port_id,
+ _RTE_ETH_FUNC(ice_rx_queue_count));
rte_eth_set_rx_desc_st(dev->data->port_id,
_RTE_ETH_FUNC(ice_rx_descriptor_status));
rte_eth_set_tx_desc_st(dev->data->port_id,
@@ -1426,7 +1426,7 @@ ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
-uint32_t
+static uint32_t
ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define ICE_RXQ_SCAN_INTERVAL 4
@@ -1454,6 +1454,8 @@ ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
+_RTE_ETH_RX_QCNT_DEF(ice_rx_queue_count)
+
#define ICE_RX_FLEX_ERR0_BITS \
((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \
(1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
@@ -219,7 +219,7 @@ _RTE_ETH_TX_PROTO(ice_prep_pkts);
void ice_set_tx_function_flag(struct rte_eth_dev *dev,
struct ice_tx_queue *txq);
void ice_set_tx_function(struct rte_eth_dev *dev);
-uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+_RTE_ETH_RX_QCNT_PROTO(ice_rx_queue_count);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
@@ -1921,6 +1921,64 @@ rte_eth_tx_descriptor_status_t rte_eth_get_tx_desc_st(uint16_t port_id);
__rte_experimental
int rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t rf);
+/**
+ * @internal
+ * Helper routine for eth driver rx_queue_count API.
+ * Should be called as first thing on entrance to the PMD's
+ * rx_queue_count implementation.
+ * Does necessary checks for input parameters.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the receive queue.
+ *
+ * @return
+ * Zero on success or negative error code otherwise.
+ */
+__rte_internal
+static inline int
+_rte_eth_rx_qcnt_prolog(uint16_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+ if (queue_id >= dev->data->nb_rx_queues ||
+ dev->data->rx_queues[queue_id] == NULL)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_queue_count
+ * functions.
+ */
+#define _RTE_ETH_RX_QCNT_PROTO(fn) \
+ int _RTE_ETH_FUNC(fn)(uint16_t port_id, uint16_t queue_id)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD rx_queue_count
+ * functions.
+ */
+#define _RTE_ETH_RX_QCNT_DEF(fn) \
+_RTE_ETH_RX_QCNT_PROTO(fn) \
+{ \
+ int rc; \
+ rc = _rte_eth_rx_qcnt_prolog(port_id, queue_id); \
+ if (rc != 0) \
+ return rc; \
+ return fn(&rte_eth_devices[port_id], queue_id); \
+}
+
+__rte_experimental
+rte_eth_rx_queue_count_t rte_eth_get_rx_qcnt(uint16_t port_id);
+
+__rte_experimental
+int rte_eth_set_rx_qcnt(uint16_t port_id, rte_eth_rx_queue_count_t rf);
+
#ifdef __cplusplus
}
#endif
@@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev->device = NULL;
eth_dev->process_private = NULL;
eth_dev->intr_handle = NULL;
- eth_dev->rx_queue_count = NULL;
eth_dev->rx_descriptor_done = NULL;
eth_dev->dev_ops = NULL;
@@ -6421,6 +6420,7 @@ rte_eth_set_rx_desc_st(uint16_t port_id, rte_eth_rx_descriptor_status_t rf)
return 0;
}
+__rte_experimental
rte_eth_tx_descriptor_status_t
rte_eth_get_tx_desc_st(uint16_t port_id)
{
@@ -6441,3 +6441,25 @@ rte_eth_set_tx_desc_st(uint16_t port_id, rte_eth_tx_descriptor_status_t tf)
rte_eth_burst_api[port_id].tx_descriptor_status = tf;
return 0;
}
+
+__rte_experimental
+rte_eth_rx_queue_count_t
+rte_eth_get_rx_qcnt(uint16_t port_id)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ return rte_eth_burst_api[port_id].rx_queue_count;
+}
+
+__rte_experimental
+int
+rte_eth_set_rx_qcnt(uint16_t port_id, rte_eth_rx_queue_count_t rf)
+{
+ if (port_id >= RTE_DIM(rte_eth_burst_api))
+ return -EINVAL;
+
+ rte_eth_burst_api[port_id].rx_queue_count = rf;
+ return 0;
+}
@@ -5004,16 +5004,15 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
static inline int
rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
{
- struct rte_eth_dev *dev;
+ rte_eth_rx_queue_count_t rqc;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_queue_count, -ENOTSUP);
- if (queue_id >= dev->data->nb_rx_queues ||
- dev->data->rx_queues[queue_id] == NULL)
+ if (port_id >= RTE_MAX_ETHPORTS)
return -EINVAL;
- return (int)(*dev->rx_queue_count)(dev, queue_id);
+ rqc = rte_eth_burst_api[port_id].rx_queue_count;
+ RTE_FUNC_PTR_OR_ERR_RET(rqc, -ENOTSUP);
+
+ return (rqc)(port_id, queue_id);
}
/**
@@ -115,7 +115,6 @@ struct rte_eth_rxtx_callback {
* process, while the actual configuration data for the device is shared.
*/
struct rte_eth_dev {
- eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
/**
* Next two fields are per-device data but *data is shared between
@@ -254,13 +254,15 @@ EXPERIMENTAL {
rte_eth_burst_api;
rte_eth_get_rx_burst;
rte_eth_get_rx_desc_st;
- rte_eth_get_tx_desc_st;
+ rte_eth_get_rx_qcnt;
rte_eth_get_tx_burst;
+ rte_eth_get_tx_desc_st;
rte_eth_get_tx_prep;
rte_eth_set_rx_burst;
rte_eth_set_rx_desc_st;
- rte_eth_set_tx_desc_st;
+ rte_eth_set_rx_qcnt;
rte_eth_set_tx_burst;
+ rte_eth_set_tx_desc_st;
rte_eth_set_tx_prep;
};