On Wed, Oct 6, 2021 at 12:21 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Move fastpath inline function pointers from rte_eventdev into a
> separate structure accessed via a flat array.
> The intension is to make rte_eventdev and related structures private
intention
> to avoid future API/ABI breakages.`
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Acked-by: Ray Kinsella <mdr@ashroe.eu>
> ---
> lib/eventdev/eventdev_pmd.h | 38 +++++++++++
> lib/eventdev/eventdev_pmd_pci.h | 4 +-
> lib/eventdev/eventdev_private.c | 112 +++++++++++++++++++++++++++++++
> lib/eventdev/meson.build | 1 +
> lib/eventdev/rte_eventdev.c | 22 +++++-
> lib/eventdev/rte_eventdev_core.h | 28 ++++++++
> lib/eventdev/version.map | 6 ++
> 7 files changed, 209 insertions(+), 2 deletions(-)
> create mode 100644 lib/eventdev/eventdev_private.c
>
sources = files(
> + 'eventdev_private.c',
> 'rte_eventdev.c',
> 'rte_event_ring.c',
> 'eventdev_trace_points.c',
Since you are reworking, please sort this in alphabetical order.
>
> +struct rte_event_fp_ops {
> + event_enqueue_t enqueue;
> + /**< PMD enqueue function. */
> + event_enqueue_burst_t enqueue_burst;
> + /**< PMD enqueue burst function. */
> + event_enqueue_burst_t enqueue_new_burst;
> + /**< PMD enqueue burst new function. */
> + event_enqueue_burst_t enqueue_forward_burst;
> + /**< PMD enqueue burst fwd function. */
> + event_dequeue_t dequeue;
> + /**< PMD dequeue function. */
> + event_dequeue_burst_t dequeue_burst;
> + /**< PMD dequeue burst function. */
> + event_tx_adapter_enqueue_t txa_enqueue;
> + /**< PMD Tx adapter enqueue function. */
> + event_tx_adapter_enqueue_t txa_enqueue_same_dest;
> + /**< PMD Tx adapter enqueue same destination function. */
> + event_crypto_adapter_enqueue_t ca_enqueue;
> + /**< PMD Crypto adapter enqueue function. */
> + uintptr_t reserved[2];
> +
> + void **data;
Since access to data is a must for all ops, Please move that to first.
Also, you can merge reserved and reserved2 in that case.
> + /**< points to array of internal port data pointers */
> + uintptr_t reserved2[4];
> +} __rte_cache_aligned;
> +
> +extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
> +
> #define RTE_EVENTDEV_NAME_MAX_LEN (64)
> /**< @internal Max length of name of event PMD */
>
> diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
> index 5f1fe412a4..a3a732089b 100644
> --- a/lib/eventdev/version.map
> +++ b/lib/eventdev/version.map
> @@ -85,6 +85,9 @@ DPDK_22 {
> rte_event_timer_cancel_burst;
> rte_eventdevs;
>
> + #added in 21.11
> + rte_event_fp_ops;
> +
> local: *;
> };
>
> @@ -141,6 +144,9 @@ EXPERIMENTAL {
> INTERNAL {
> global:
>
> + event_dev_fp_ops_reset;
> + event_dev_fp_ops_set;
> + event_dev_probing_finish;
> rte_event_pmd_selftest_seqn_dynfield_offset;
> rte_event_pmd_allocate;
> rte_event_pmd_get_named_dev;
> --
> 2.17.1
>
@@ -1189,4 +1189,42 @@ __rte_internal
int
rte_event_pmd_release(struct rte_eventdev *eventdev);
+/**
+ *
+ * @internal
+ * This is the last step of device probing.
+ * It must be called after a port is allocated and initialized successfully.
+ *
+ * @param eventdev
+ * New event device.
+ */
+__rte_internal
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev);
+
+/**
+ * Reset eventdevice fastpath APIs to dummy values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to reset.
+ */
+__rte_internal
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op);
+
+/**
+ * Set eventdevice fastpath APIs to event device values.
+ *
+ * @param fp_ops
+ * The *fp_ops* pointer to set.
+ */
+__rte_internal
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_ops,
+ const struct rte_eventdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* _RTE_EVENTDEV_PMD_H_ */
@@ -67,8 +67,10 @@ rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
/* Invoke PMD device initialization function */
retval = devinit(eventdev);
- if (retval == 0)
+ if (retval == 0) {
+ event_dev_probing_finish(eventdev);
return 0;
+ }
RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
" failed", pci_drv->driver.name,
new file mode 100644
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "eventdev_pmd.h"
+#include "rte_eventdev.h"
+
+static uint16_t
+dummy_event_enqueue(__rte_unused void *port,
+ __rte_unused const struct rte_event *ev)
+{
+ RTE_EDEV_LOG_ERR(
+ "event enqueue requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_enqueue_burst(__rte_unused void *port,
+ __rte_unused const struct rte_event ev[],
+ __rte_unused uint16_t nb_events)
+{
+ RTE_EDEV_LOG_ERR(
+ "event enqueue burst requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
+ __rte_unused uint64_t timeout_ticks)
+{
+ RTE_EDEV_LOG_ERR(
+ "event dequeue requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_dequeue_burst(__rte_unused void *port,
+ __rte_unused struct rte_event ev[],
+ __rte_unused uint16_t nb_events,
+ __rte_unused uint64_t timeout_ticks)
+{
+ RTE_EDEV_LOG_ERR(
+ "event dequeue burst requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue(__rte_unused void *port,
+ __rte_unused struct rte_event ev[],
+ __rte_unused uint16_t nb_events)
+{
+ RTE_EDEV_LOG_ERR(
+ "event Tx adapter enqueue requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
+ __rte_unused struct rte_event ev[],
+ __rte_unused uint16_t nb_events)
+{
+ RTE_EDEV_LOG_ERR(
+ "event Tx adapter enqueue same destination requested for unconfigured event device");
+ return 0;
+}
+
+static uint16_t
+dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
+ __rte_unused struct rte_event ev[],
+ __rte_unused uint16_t nb_events)
+{
+ RTE_EDEV_LOG_ERR(
+ "event crypto adapter enqueue requested for unconfigured event device");
+ return 0;
+}
+
+void
+event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
+{
+ static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static const struct rte_event_fp_ops dummy = {
+ .enqueue = dummy_event_enqueue,
+ .enqueue_burst = dummy_event_enqueue_burst,
+ .enqueue_new_burst = dummy_event_enqueue_burst,
+ .enqueue_forward_burst = dummy_event_enqueue_burst,
+ .dequeue = dummy_event_dequeue,
+ .dequeue_burst = dummy_event_dequeue_burst,
+ .txa_enqueue = dummy_event_tx_adapter_enqueue,
+ .txa_enqueue_same_dest =
+ dummy_event_tx_adapter_enqueue_same_dest,
+ .ca_enqueue = dummy_event_crypto_adapter_enqueue,
+ .data = dummy_data,
+ };
+
+ *fp_op = dummy;
+}
+
+void
+event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
+ const struct rte_eventdev *dev)
+{
+ fp_op->enqueue = dev->enqueue;
+ fp_op->enqueue_burst = dev->enqueue_burst;
+ fp_op->enqueue_new_burst = dev->enqueue_new_burst;
+ fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+ fp_op->dequeue = dev->dequeue;
+ fp_op->dequeue_burst = dev->dequeue_burst;
+ fp_op->txa_enqueue = dev->txa_enqueue;
+ fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
+ fp_op->ca_enqueue = dev->ca_enqueue;
+ fp_op->data = dev->data->ports;
+}
@@ -8,6 +8,7 @@ else
endif
sources = files(
+ 'eventdev_private.c',
'rte_eventdev.c',
'rte_event_ring.c',
'eventdev_trace_points.c',
@@ -46,6 +46,9 @@ static struct rte_eventdev_global eventdev_globals = {
.nb_devs = 0
};
+/* Public fastpath APIs. */
+struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
/* Event dev north bound API implementation */
uint8_t
@@ -300,8 +303,8 @@ int
rte_event_dev_configure(uint8_t dev_id,
const struct rte_event_dev_config *dev_conf)
{
- struct rte_eventdev *dev;
struct rte_event_dev_info info;
+ struct rte_eventdev *dev;
int diag;
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -470,10 +473,13 @@ rte_event_dev_configure(uint8_t dev_id,
return diag;
}
+ event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
+
/* Configure the device */
diag = (*dev->dev_ops->dev_configure)(dev);
if (diag != 0) {
RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+ event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
event_dev_queue_config(dev, 0);
event_dev_port_config(dev, 0);
}
@@ -1244,6 +1250,8 @@ rte_event_dev_start(uint8_t dev_id)
else
return diag;
+ event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
+
return 0;
}
@@ -1284,6 +1292,7 @@ rte_event_dev_stop(uint8_t dev_id)
dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
rte_eventdev_trace_stop(dev_id);
+ event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
}
int
@@ -1302,6 +1311,7 @@ rte_event_dev_close(uint8_t dev_id)
return -EBUSY;
}
+ event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
rte_eventdev_trace_close(dev_id);
return (*dev->dev_ops->dev_close)(dev);
}
@@ -1435,6 +1445,7 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
if (eventdev == NULL)
return -EINVAL;
+ event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
eventdev->attached = RTE_EVENTDEV_DETACHED;
eventdev_globals.nb_devs--;
@@ -1460,6 +1471,15 @@ rte_event_pmd_release(struct rte_eventdev *eventdev)
return 0;
}
+void
+event_dev_probing_finish(struct rte_eventdev *eventdev)
+{
+ if (eventdev == NULL)
+ return;
+
+ event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
+ eventdev);
+}
static int
handle_dev_list(const char *cmd __rte_unused,
@@ -39,6 +39,34 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
uint16_t nb_events);
/**< @internal Enqueue burst of events on crypto adapter */
+struct rte_event_fp_ops {
+ event_enqueue_t enqueue;
+ /**< PMD enqueue function. */
+ event_enqueue_burst_t enqueue_burst;
+ /**< PMD enqueue burst function. */
+ event_enqueue_burst_t enqueue_new_burst;
+ /**< PMD enqueue burst new function. */
+ event_enqueue_burst_t enqueue_forward_burst;
+ /**< PMD enqueue burst fwd function. */
+ event_dequeue_t dequeue;
+ /**< PMD dequeue function. */
+ event_dequeue_burst_t dequeue_burst;
+ /**< PMD dequeue burst function. */
+ event_tx_adapter_enqueue_t txa_enqueue;
+ /**< PMD Tx adapter enqueue function. */
+ event_tx_adapter_enqueue_t txa_enqueue_same_dest;
+ /**< PMD Tx adapter enqueue same destination function. */
+ event_crypto_adapter_enqueue_t ca_enqueue;
+ /**< PMD Crypto adapter enqueue function. */
+ uintptr_t reserved[2];
+
+ void **data;
+ /**< points to array of internal port data pointers */
+ uintptr_t reserved2[4];
+} __rte_cache_aligned;
+
+extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
+
#define RTE_EVENTDEV_NAME_MAX_LEN (64)
/**< @internal Max length of name of event PMD */
@@ -85,6 +85,9 @@ DPDK_22 {
rte_event_timer_cancel_burst;
rte_eventdevs;
+ #added in 21.11
+ rte_event_fp_ops;
+
local: *;
};
@@ -141,6 +144,9 @@ EXPERIMENTAL {
INTERNAL {
global:
+ event_dev_fp_ops_reset;
+ event_dev_fp_ops_set;
+ event_dev_probing_finish;
rte_event_pmd_selftest_seqn_dynfield_offset;
rte_event_pmd_allocate;
rte_event_pmd_get_named_dev;