[v2,2/2] ethdev: remove callback checks from fast path
Checks
Commit Message
From: Sunil Kumar Kori <skori@marvell.com>
rte_eth_fp_ops contains ops for fast path APIs. Each API
validates availability of callback and then invoke it.
These checks impact data path performace.
Hence removing these NULL checks instead using dummy
callbacks.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
lib/ethdev/ethdev_driver.c | 55 +++++++++++++++++++++++++++++
lib/ethdev/ethdev_driver.h | 71 ++++++++++++++++++++++++++++++++++++++
lib/ethdev/rte_ethdev.h | 29 ++--------------
3 files changed, 129 insertions(+), 26 deletions(-)
Comments
> From: Sunil Kumar Kori <skori@marvell.com>
> Sent: Monday, 12 May 2025 17.07
>
> rte_eth_fp_ops contains ops for fast path APIs. Each API
> validates availability of callback and then invoke it.
> These checks impact data path performace.
>
> Hence removing these NULL checks instead using dummy
> callbacks.
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
Acked-by: Morten Brørup <mb@smartsharesystems.com>
> From: Sunil Kumar Kori <skori@marvell.com>
> Sent: Monday, 12 May 2025 17.07
>
> rte_eth_fp_ops contains ops for fast path APIs. Each API
> validates availability of callback and then invoke it.
> These checks impact data path performace.
Picking up the discussion from another thread [1]:
> From: Konstantin Ananyev [mailto:konstantin.ananyev@huawei.com]
> Sent: Wednesday, 28 May 2025 11.14
>
> So what we are saving with that patch: one cmp and one un-taken branch:
> @@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t
> queue_id)
> return -EINVAL;
> #endif
>
> - if (p->rx_queue_count == NULL)
> - return -ENOTSUP;
> return p->rx_queue_count(qd);
> }
These are inline functions, so we also save some code space, instruction cache, and possibly an entry in the branch predictor - everywhere these functions are instantiated by the compiler.
>
> I wonder is how realistic (and measurable) is the gain?
The performance optimization is mainly targeting the mbuf recycle operations, i.e. the hot fast path, where every cycle counts.
And while optimizing those, the other ethdev fast path callbacks are also optimized.
Yes, although we all agree that there is no downside to this optimization, it would be nice to see some performance numbers.
[1]: https://inbox.dpdk.org/dev/581e7a5389f842a9824a365a46c470ad@huawei.com/
>
> Hence removing these NULL checks instead using dummy
> callbacks.
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> lib/ethdev/ethdev_driver.c | 55 +++++++++++++++++++++++++++++
> lib/ethdev/ethdev_driver.h | 71 ++++++++++++++++++++++++++++++++++++++
> lib/ethdev/rte_ethdev.h | 29 ++--------------
> 3 files changed, 129 insertions(+), 26 deletions(-)
>
> diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
> index ec0c1e1176..f89562b237 100644
> --- a/lib/ethdev/ethdev_driver.c
> +++ b/lib/ethdev/ethdev_driver.c
> @@ -75,6 +75,20 @@ eth_dev_get(uint16_t port_id)
> return eth_dev;
> }
>
> +static void
> +eth_dev_set_dummy_fops(struct rte_eth_dev *eth_dev)
> +{
> + eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
> + eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
> + eth_dev->tx_pkt_prepare = rte_eth_tx_pkt_prepare_dummy;
> + eth_dev->rx_queue_count = rte_eth_queue_count_dummy;
> + eth_dev->tx_queue_count = rte_eth_queue_count_dummy;
> + eth_dev->rx_descriptor_status = rte_eth_descriptor_status_dummy;
> + eth_dev->tx_descriptor_status = rte_eth_descriptor_status_dummy;
> + eth_dev->recycle_tx_mbufs_reuse =
> rte_eth_recycle_tx_mbufs_reuse_dummy;
> + eth_dev->recycle_rx_descriptors_refill =
> rte_eth_recycle_rx_descriptors_refill_dummy;
> +}
> +
> RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_dev_allocate)
> struct rte_eth_dev *
> rte_eth_dev_allocate(const char *name)
> @@ -115,6 +129,7 @@ rte_eth_dev_allocate(const char *name)
> }
>
> eth_dev = eth_dev_get(port_id);
> + eth_dev_set_dummy_fops(eth_dev);
> eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
> strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
> eth_dev->data->port_id = port_id;
> @@ -847,6 +862,46 @@ rte_eth_pkt_burst_dummy(void *queue __rte_unused,
> return 0;
> }
>
> +RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_tx_pkt_prepare_dummy)
> +uint16_t
> +rte_eth_tx_pkt_prepare_dummy(void *queue __rte_unused,
> + struct rte_mbuf **pkts __rte_unused,
> + uint16_t nb_pkts)
> +{
> + return nb_pkts;
> +}
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_queue_count_dummy)
> +int
> +rte_eth_queue_count_dummy(void *queue __rte_unused)
> +{
> + return -ENOTSUP;
> +}
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_descriptor_status_dummy)
> +int
> +rte_eth_descriptor_status_dummy(void *queue __rte_unused,
> + uint16_t offset __rte_unused)
> +{
> + return -ENOTSUP;
> +}
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_recycle_tx_mbufs_reuse_dummy)
> +uint16_t
> +rte_eth_recycle_tx_mbufs_reuse_dummy(void *queue __rte_unused,
> + struct rte_eth_recycle_rxq_info *recycle_rxq_info
> __rte_unused)
> +{
> + return 0;
> +}
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_recycle_rx_descriptors_refill_dummy
> )
> +void
> +rte_eth_recycle_rx_descriptors_refill_dummy(void *queue __rte_unused,
> + uint16_t nb __rte_unused)
> +{
> + /* No action. */
> +}
> +
> RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_representor_id_get)
> int
> rte_eth_representor_id_get(uint16_t port_id,
> diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
> index 2b4d2ae9c3..71085bddff 100644
> --- a/lib/ethdev/ethdev_driver.h
> +++ b/lib/ethdev/ethdev_driver.h
> @@ -1874,6 +1874,77 @@ rte_eth_pkt_burst_dummy(void *queue
> __rte_unused,
> struct rte_mbuf **pkts __rte_unused,
> uint16_t nb_pkts __rte_unused);
>
> +/**
> + * @internal
> + * Dummy DPDK callback for Tx packet prepare.
> + *
> + * @param queue
> + * Pointer to Tx queue
> + * @param pkts
> + * Packet array
> + * @param nb_pkts
> + * Number of packets in packet array
> + */
> +__rte_internal
> +uint16_t
> +rte_eth_tx_pkt_prepare_dummy(void *queue __rte_unused,
> + struct rte_mbuf **pkts __rte_unused,
> + uint16_t nb_pkts __rte_unused);
> +
> +/**
> + * @internal
> + * Dummy DPDK callback for queue count.
> + *
> + * @param queue
> + * Pointer to Rx/Tx queue
> + */
> +__rte_internal
> +int
> +rte_eth_queue_count_dummy(void *queue __rte_unused);
> +
> +/**
> + * @internal
> + * Dummy DPDK callback for descriptor status.
> + *
> + * @param queue
> + * Pointer to Rx/Tx queue
> + * @param offset
> + * The offset of the descriptor starting from tail (0 is the next
> + * packet to be received by the driver).
> + */
> +__rte_internal
> +int
> +rte_eth_descriptor_status_dummy(void *queue __rte_unused,
> + uint16_t offset __rte_unused);
> +
> +/**
> + * @internal
> + * Dummy DPDK callback for recycle Tx mbufs reuse.
> + *
> + * @param queue
> + * Pointer to Tx queue
> + * @param recycle_rxq_info
> + * Pointer to recycle Rx queue info
> + */
> +__rte_internal
> +uint16_t
> +rte_eth_recycle_tx_mbufs_reuse_dummy(void *queue __rte_unused,
> + struct rte_eth_recycle_rxq_info *recycle_rxq_info
> __rte_unused);
> +
> +/**
> + * @internal
> + * Dummy DPDK callback Rx descriptor refill.
> + *
> + * @param queue
> + * Pointer Rx queue
> + * @param offset
> + * number of descriptors to refill
> + */
> +__rte_internal
> +void
> +rte_eth_recycle_rx_descriptors_refill_dummy(void *queue __rte_unused,
> + uint16_t nb __rte_unused);
> +
> /**
> * Allocate an unique switch domain identifier.
> *
> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
> index b3031ab9e6..2034680560 100644
> --- a/lib/ethdev/rte_ethdev.h
> +++ b/lib/ethdev/rte_ethdev.h
> @@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t
> queue_id)
> return -EINVAL;
> #endif
>
> - if (p->rx_queue_count == NULL)
> - return -ENOTSUP;
> return p->rx_queue_count(qd);
> }
>
> @@ -6471,8 +6469,6 @@ rte_eth_rx_descriptor_status(uint16_t port_id,
> uint16_t queue_id,
> if (qd == NULL)
> return -ENODEV;
> #endif
> - if (p->rx_descriptor_status == NULL)
> - return -ENOTSUP;
> return p->rx_descriptor_status(qd, offset);
> }
>
> @@ -6542,8 +6538,6 @@ static inline int
> rte_eth_tx_descriptor_status(uint16_t port_id,
> if (qd == NULL)
> return -ENODEV;
> #endif
> - if (p->tx_descriptor_status == NULL)
> - return -ENOTSUP;
> return p->tx_descriptor_status(qd, offset);
> }
>
> @@ -6786,9 +6780,6 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t
> queue_id,
> }
> #endif
>
> - if (!p->tx_pkt_prepare)
> - return nb_pkts;
> -
> return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
> }
>
> @@ -6985,8 +6976,6 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id,
> uint16_t rx_queue_id,
> return 0;
> }
> #endif
> - if (p1->recycle_tx_mbufs_reuse == NULL)
> - return 0;
>
> #ifdef RTE_ETHDEV_DEBUG_RX
> if (rx_port_id >= RTE_MAX_ETHPORTS ||
> @@ -7010,8 +6999,6 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id,
> uint16_t rx_queue_id,
> return 0;
> }
> #endif
> - if (p2->recycle_rx_descriptors_refill == NULL)
> - return 0;
>
> /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
> * into Rx mbuf ring.
> @@ -7107,15 +7094,13 @@ rte_eth_tx_queue_count(uint16_t port_id,
> uint16_t queue_id)
> #ifdef RTE_ETHDEV_DEBUG_TX
> if (port_id >= RTE_MAX_ETHPORTS ||
> !rte_eth_dev_is_valid_port(port_id)) {
> RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
> - rc = -ENODEV;
> - goto out;
> + return -ENODEV;
> }
>
> if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
> RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for
> port_id=%u",
> queue_id, port_id);
> - rc = -EINVAL;
> - goto out;
> + return -EINVAL;
> }
> #endif
>
> @@ -7127,18 +7112,10 @@ rte_eth_tx_queue_count(uint16_t port_id,
> uint16_t queue_id)
> if (qd == NULL) {
> RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for
> port_id=%u",
> queue_id, port_id);
> - rc = -EINVAL;
> - goto out;
> + return -EINVAL;
> }
> #endif
> - if (fops->tx_queue_count == NULL) {
> - rc = -ENOTSUP;
> - goto out;
> - }
> -
> rc = fops->tx_queue_count(qd);
> -
> -out:
> rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
> return rc;
> }
> --
> 2.43.0
> > From: Sunil Kumar Kori <skori@marvell.com>
> > Sent: Monday, 12 May 2025 17.07
> >
> > rte_eth_fp_ops contains ops for fast path APIs. Each API validates
> > availability of callback and then invoke it.
> > These checks impact data path performace.
>
> Picking up the discussion from another thread [1]:
>
> > From: Konstantin Ananyev [mailto:konstantin.ananyev@huawei.com]
> > Sent: Wednesday, 28 May 2025 11.14
> >
> > So what we are saving with that patch: one cmp and one un-taken branch:
> > @@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id,
> > uint16_t
> > queue_id)
> > return -EINVAL;
> > #endif
> >
> > - if (p->rx_queue_count == NULL)
> > - return -ENOTSUP;
> > return p->rx_queue_count(qd);
> > }
>
> These are inline functions, so we also save some code space, instruction cache,
> and possibly an entry in the branch predictor - everywhere these functions are
> instantiated by the compiler.
>
> >
> > I wonder is how realistic (and measurable) is the gain?
>
> The performance optimization is mainly targeting the mbuf recycle operations,
> i.e. the hot fast path, where every cycle counts.
> And while optimizing those, the other ethdev fast path callbacks are also
> optimized.
>
> Yes, although we all agree that there is no downside to this optimization, it
> would be nice to see some performance numbers.
>
Sure, I will get performance numbers for Marvell platform and will share.
> [1]: https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__inbox.dpdk.org_dev_581e7a5389f842a9824a365a46c470ad-
> 40huawei.com_&d=DwIFAw&c=nKjWec2b6R0mOyPaz7xtfQ&r=dXeXaAMkP5COgn1zx
> HMyaF1_d9IIuq6vHQO6NrIPjaE&m=kCcHSf697ZGAlmjtOXGJe9h7VNzQg7yxhU2aYRbf
> -
> 3fDNbnnKjbeZUqgMpvhL8Xe&s=eob_TZZsekAeDDGzD15QMOhhpbe7PQScJLLP6IMrqg
> 4&e=
>
> > > From: Sunil Kumar Kori <skori@marvell.com>
> > > Sent: Monday, 12 May 2025 17.07
> > >
> > > rte_eth_fp_ops contains ops for fast path APIs. Each API validates
> > > availability of callback and then invoke it.
> > > These checks impact data path performace.
> >
> > Picking up the discussion from another thread [1]:
> >
> > > From: Konstantin Ananyev [mailto:konstantin.ananyev@huawei.com]
> > > Sent: Wednesday, 28 May 2025 11.14
> > >
> > > So what we are saving with that patch: one cmp and one un-taken branch:
> > > @@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id,
> > > uint16_t
> > > queue_id)
> > > return -EINVAL;
> > > #endif
> > >
> > > - if (p->rx_queue_count == NULL)
> > > - return -ENOTSUP;
> > > return p->rx_queue_count(qd);
> > > }
> >
> > These are inline functions, so we also save some code space,
> > instruction cache, and possibly an entry in the branch predictor -
> > everywhere these functions are instantiated by the compiler.
> >
> > >
> > > I wonder is how realistic (and measurable) is the gain?
> >
> > The performance optimization is mainly targeting the mbuf recycle
> > operations, i.e. the hot fast path, where every cycle counts.
> > And while optimizing those, the other ethdev fast path callbacks are
> > also optimized.
> >
> > Yes, although we all agree that there is no downside to this
> > optimization, it would be nice to see some performance numbers.
> >
> Sure, I will get performance numbers for Marvell platform and will share.
>
Hi Morten,
I got performance numbers on multiple Marvell's platforms and observed gain around 0.1% (~20K pps) with this patch. Other than this, there are other fast path callbacks (rx_pkt_burst and tx_pkt_burst) which avoid this check.
IMO, this patch has no negative impact and slight improvement & cleanup the fast path. Please suggest.
> From: Sunil Kumar Kori [mailto:skori@marvell.com]
> Sent: Monday, 16 June 2025 10.36
>
> > > > From: Sunil Kumar Kori <skori@marvell.com>
> > > > Sent: Monday, 12 May 2025 17.07
> > > >
> > > > rte_eth_fp_ops contains ops for fast path APIs. Each API validates
> > > > availability of callback and then invoke it.
> > > > These checks impact data path performace.
> > >
> > > Picking up the discussion from another thread [1]:
> > >
> > > > From: Konstantin Ananyev [mailto:konstantin.ananyev@huawei.com]
> > > > Sent: Wednesday, 28 May 2025 11.14
> > > >
> > > > So what we are saving with that patch: one cmp and one un-taken branch:
> > > > @@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id,
> > > > uint16_t
> > > > queue_id)
> > > > return -EINVAL;
> > > > #endif
> > > >
> > > > - if (p->rx_queue_count == NULL)
> > > > - return -ENOTSUP;
> > > > return p->rx_queue_count(qd);
> > > > }
> > >
> > > These are inline functions, so we also save some code space,
> > > instruction cache, and possibly an entry in the branch predictor -
> > > everywhere these functions are instantiated by the compiler.
> > >
> > > >
> > > > I wonder is how realistic (and measurable) is the gain?
> > >
> > > The performance optimization is mainly targeting the mbuf recycle
> > > operations, i.e. the hot fast path, where every cycle counts.
> > > And while optimizing those, the other ethdev fast path callbacks are
> > > also optimized.
> > >
> > > Yes, although we all agree that there is no downside to this
> > > optimization, it would be nice to see some performance numbers.
> > >
> > Sure, I will get performance numbers for Marvell platform and will share.
> >
>
> Hi Morten,
> I got performance numbers on multiple Marvell's platforms and observed gain
> around 0.1% (~20K pps) with this patch. Other than this, there are other fast
> path callbacks (rx_pkt_burst and tx_pkt_burst) which avoid this check.
>
> IMO, this patch has no negative impact and slight improvement & cleanup the
> fast path. Please suggest.
I still like this patch, so I confirm my ACK:
Acked-by: Morten Brørup <mb@smartsharesystems.com>
@@ -75,6 +75,20 @@ eth_dev_get(uint16_t port_id)
return eth_dev;
}
+static void
+eth_dev_set_dummy_fops(struct rte_eth_dev *eth_dev)
+{
+ eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+ eth_dev->tx_pkt_prepare = rte_eth_tx_pkt_prepare_dummy;
+ eth_dev->rx_queue_count = rte_eth_queue_count_dummy;
+ eth_dev->tx_queue_count = rte_eth_queue_count_dummy;
+ eth_dev->rx_descriptor_status = rte_eth_descriptor_status_dummy;
+ eth_dev->tx_descriptor_status = rte_eth_descriptor_status_dummy;
+ eth_dev->recycle_tx_mbufs_reuse = rte_eth_recycle_tx_mbufs_reuse_dummy;
+ eth_dev->recycle_rx_descriptors_refill = rte_eth_recycle_rx_descriptors_refill_dummy;
+}
+
RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_dev_allocate)
struct rte_eth_dev *
rte_eth_dev_allocate(const char *name)
@@ -115,6 +129,7 @@ rte_eth_dev_allocate(const char *name)
}
eth_dev = eth_dev_get(port_id);
+ eth_dev_set_dummy_fops(eth_dev);
eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
eth_dev->data->port_id = port_id;
@@ -847,6 +862,46 @@ rte_eth_pkt_burst_dummy(void *queue __rte_unused,
return 0;
}
+RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_tx_pkt_prepare_dummy)
+uint16_t
+rte_eth_tx_pkt_prepare_dummy(void *queue __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t nb_pkts)
+{
+ return nb_pkts;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_queue_count_dummy)
+int
+rte_eth_queue_count_dummy(void *queue __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_descriptor_status_dummy)
+int
+rte_eth_descriptor_status_dummy(void *queue __rte_unused,
+ uint16_t offset __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_recycle_tx_mbufs_reuse_dummy)
+uint16_t
+rte_eth_recycle_tx_mbufs_reuse_dummy(void *queue __rte_unused,
+ struct rte_eth_recycle_rxq_info *recycle_rxq_info __rte_unused)
+{
+ return 0;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_recycle_rx_descriptors_refill_dummy)
+void
+rte_eth_recycle_rx_descriptors_refill_dummy(void *queue __rte_unused,
+ uint16_t nb __rte_unused)
+{
+ /* No action. */
+}
+
RTE_EXPORT_INTERNAL_SYMBOL(rte_eth_representor_id_get)
int
rte_eth_representor_id_get(uint16_t port_id,
@@ -1874,6 +1874,77 @@ rte_eth_pkt_burst_dummy(void *queue __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t nb_pkts __rte_unused);
+/**
+ * @internal
+ * Dummy DPDK callback for Tx packet prepare.
+ *
+ * @param queue
+ * Pointer to Tx queue
+ * @param pkts
+ * Packet array
+ * @param nb_pkts
+ * Number of packets in packet array
+ */
+__rte_internal
+uint16_t
+rte_eth_tx_pkt_prepare_dummy(void *queue __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused);
+
+/**
+ * @internal
+ * Dummy DPDK callback for queue count.
+ *
+ * @param queue
+ * Pointer to Rx/Tx queue
+ */
+__rte_internal
+int
+rte_eth_queue_count_dummy(void *queue __rte_unused);
+
+/**
+ * @internal
+ * Dummy DPDK callback for descriptor status.
+ *
+ * @param queue
+ * Pointer to Rx/Tx queue
+ * @param offset
+ * The offset of the descriptor starting from tail (0 is the next
+ * packet to be received by the driver).
+ */
+__rte_internal
+int
+rte_eth_descriptor_status_dummy(void *queue __rte_unused,
+ uint16_t offset __rte_unused);
+
+/**
+ * @internal
+ * Dummy DPDK callback for recycle Tx mbufs reuse.
+ *
+ * @param queue
+ * Pointer to Tx queue
+ * @param recycle_rxq_info
+ * Pointer to recycle Rx queue info
+ */
+__rte_internal
+uint16_t
+rte_eth_recycle_tx_mbufs_reuse_dummy(void *queue __rte_unused,
+ struct rte_eth_recycle_rxq_info *recycle_rxq_info __rte_unused);
+
+/**
+ * @internal
+ * Dummy DPDK callback Rx descriptor refill.
+ *
+ * @param queue
+ * Pointer Rx queue
+ * @param offset
+ * number of descriptors to refill
+ */
+__rte_internal
+void
+rte_eth_recycle_rx_descriptors_refill_dummy(void *queue __rte_unused,
+ uint16_t nb __rte_unused);
+
/**
* Allocate an unique switch domain identifier.
*
@@ -6399,8 +6399,6 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
return -EINVAL;
#endif
- if (p->rx_queue_count == NULL)
- return -ENOTSUP;
return p->rx_queue_count(qd);
}
@@ -6471,8 +6469,6 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
if (qd == NULL)
return -ENODEV;
#endif
- if (p->rx_descriptor_status == NULL)
- return -ENOTSUP;
return p->rx_descriptor_status(qd, offset);
}
@@ -6542,8 +6538,6 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
if (qd == NULL)
return -ENODEV;
#endif
- if (p->tx_descriptor_status == NULL)
- return -ENOTSUP;
return p->tx_descriptor_status(qd, offset);
}
@@ -6786,9 +6780,6 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
}
#endif
- if (!p->tx_pkt_prepare)
- return nb_pkts;
-
return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
}
@@ -6985,8 +6976,6 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
return 0;
}
#endif
- if (p1->recycle_tx_mbufs_reuse == NULL)
- return 0;
#ifdef RTE_ETHDEV_DEBUG_RX
if (rx_port_id >= RTE_MAX_ETHPORTS ||
@@ -7010,8 +6999,6 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
return 0;
}
#endif
- if (p2->recycle_rx_descriptors_refill == NULL)
- return 0;
/* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
* into Rx mbuf ring.
@@ -7107,15 +7094,13 @@ rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
#ifdef RTE_ETHDEV_DEBUG_TX
if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
- rc = -ENODEV;
- goto out;
+ return -ENODEV;
}
if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
queue_id, port_id);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
#endif
@@ -7127,18 +7112,10 @@ rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
if (qd == NULL) {
RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
queue_id, port_id);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
#endif
- if (fops->tx_queue_count == NULL) {
- rc = -ENOTSUP;
- goto out;
- }
-
rc = fops->tx_queue_count(qd);
-
-out:
rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
return rc;
}