Hi Vipin,
>
> Add callback handlers for enqueue-dequeue operation on crypto
> device. The pre-enqueue and post-dequeue are selected on invoke
> user registered callback functions.
>
> Use cases:
> - allow user to investigate the contents pre-enqueue.
> - allow user to investigate the contents post-dequeue.
> - modify pre-enqueue and post-dequeue stage content.
> - investigate PMD meta data.
>
> Signed-off-by: Vipin Varghese <vipin.varghese@intel.com>
> ---
[..]
> @@ -907,6 +926,19 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t
> qp_id,
> nb_ops = (*dev->dequeue_burst)
> (dev->data->queue_pairs[qp_id], ops, nb_ops);
>
> +#ifdef RTE_CRYPTODEV_ENQDEQ_CALLBACKS
> + struct rte_cryptodev_enqdeq_callback *cb = NULL;
> +
> + TAILQ_FOREACH(cb, &(dev->deq_cbs), next) {
> + if (cb->cb_fn == NULL)
> + continue;
> +
> + cb->active = 1;
> + nb_ops = cb->cb_fn(dev_id, qp_id, ops, nb_ops, cb->cb_arg);
> + cb->active = 0;
> + }
Shouldn't this cb->active be set atomically.
Will it be thread safe? There may be multiple threads enqueuing on the same device.
One may be executing while the other finished and set the active to 0, and may unregister
While the some other thread is still executing.
One more thing, it would be better to have a debug prints about how many nb_ops have been
Successfully passed through each of the callback.
And in the callback, it should be assumed that it will return back just after the first failed ops, so that
The application can free the remaining one. This should be documented in the callback API.
> +#endif
> +
> return nb_ops;
> }
>
Thanks Akhil, I will work on the suggested inputs.
> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Thursday, June 27, 2019 7:55 PM
> To: Varghese, Vipin <vipin.varghese@intel.com>; Wiles, Keith
> <keith.wiles@intel.com>; dev@dpdk.org; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>
> Cc: Padubidri, Sanjay A <sanjay.padubidri@intel.com>
> Subject: RE: [PATCH v2 1/2] lib/crypto: add callback handlers for crypto
>
> Hi Vipin,
>
> >
> > Add callback handlers for enqueue-dequeue operation on crypto device.
> > The pre-enqueue and post-dequeue are selected on invoke user
> > registered callback functions.
> >
> > Use cases:
> > - allow user to investigate the contents pre-enqueue.
> > - allow user to investigate the contents post-dequeue.
> > - modify pre-enqueue and post-dequeue stage content.
> > - investigate PMD meta data.
> >
> > Signed-off-by: Vipin Varghese <vipin.varghese@intel.com>
> > ---
>
> [..]
>
> > @@ -907,6 +926,19 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id,
> > uint16_t qp_id,
> > nb_ops = (*dev->dequeue_burst)
> > (dev->data->queue_pairs[qp_id], ops, nb_ops);
> >
> > +#ifdef RTE_CRYPTODEV_ENQDEQ_CALLBACKS
> > + struct rte_cryptodev_enqdeq_callback *cb = NULL;
> > +
> > + TAILQ_FOREACH(cb, &(dev->deq_cbs), next) {
> > + if (cb->cb_fn == NULL)
> > + continue;
> > +
> > + cb->active = 1;
> > + nb_ops = cb->cb_fn(dev_id, qp_id, ops, nb_ops, cb->cb_arg);
> > + cb->active = 0;
> > + }
>
> Shouldn't this cb->active be set atomically.
>
> Will it be thread safe? There may be multiple threads enqueuing on the same
> device.
> One may be executing while the other finished and set the active to 0, and may
> unregister While the some other thread is still executing.
>
> One more thing, it would be better to have a debug prints about how many
> nb_ops have been Successfully passed through each of the callback.
> And in the callback, it should be assumed that it will return back just after the first
> failed ops, so that The application can free the remaining one. This should be
> documented in the callback API.
>
> > +#endif
> > +
> > return nb_ops;
> > }
> >
@@ -540,6 +540,7 @@ CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
CONFIG_RTE_CRYPTO_MAX_DEVS=64
+CONFIG_RTE_CRYPTODEV_ENQDEQ_CALLBACKS=n
#
# Compile PMD for ARMv8 Crypto device
@@ -57,6 +57,11 @@ static struct rte_cryptodev_global cryptodev_globals = {
/* spinlock for crypto device callbacks */
static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+/* spinlock for pre-enqueue callbacks */
+rte_spinlock_t rte_cryptodev_preenq_cb_lock = RTE_SPINLOCK_INITIALIZER;
+/* spinlock for post-dequeue callbacks */
+rte_spinlock_t rte_cryptodev_pstdeq_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
/**
* The user application callback description.
@@ -707,6 +712,8 @@ rte_cryptodev_pmd_allocate(const char *name, int socket_id)
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
+ TAILQ_INIT(&(cryptodev->enq_cbs));
+ TAILQ_INIT(&(cryptodev->deq_cbs));
cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
@@ -1736,3 +1743,177 @@ rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
return nb_drivers++;
}
+
+int __rte_experimental
+rte_cryptodev_preenq_callback_register(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_enqdeq_callback *user_cb;
+ rte_spinlock_t *lock = &rte_cryptodev_preenq_cb_lock;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%"PRIu8 " qp_id=%"PRIu8,
+ dev_id, qp_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -EINVAL);
+
+ rte_spinlock_lock(lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->enq_cbs), next) {
+ if ((void *) user_cb->cb_fn == (void *) cb_fn &&
+ user_cb->cb_arg == cb_arg)
+ break;
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("CRYPTO_USER_CALLBACK",
+ sizeof(struct rte_cryptodev_enqdeq_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ TAILQ_INSERT_TAIL(&(dev->enq_cbs), user_cb, next);
+ }
+ }
+
+ rte_spinlock_unlock(lock);
+
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int __rte_experimental
+rte_cryptodev_preenq_callback_unregister(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg)
+{
+ int ret = 0;
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_enqdeq_callback *cb, *next;
+ rte_spinlock_t *lock = &rte_cryptodev_preenq_cb_lock;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%"PRIu8 " qp_id=%"PRIu8,
+ dev_id, qp_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -EINVAL);
+
+ rte_spinlock_lock(lock);
+
+ ret = -EINVAL;
+ for (cb = TAILQ_FIRST(&dev->enq_cbs); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->cb_arg != cb_arg)
+ continue;
+
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->enq_cbs), cb, next);
+ rte_free(cb);
+ ret = 0;
+ } else
+ ret = -EAGAIN;
+ }
+
+ rte_spinlock_unlock(lock);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_cryptodev_pstdeq_callback_register(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg)
+{
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_enqdeq_callback *user_cb;
+ rte_spinlock_t *lock = &rte_cryptodev_pstdeq_cb_lock;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%"PRIu8 " qp_id=%"PRIu8,
+ dev_id, qp_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -EINVAL);
+
+ rte_spinlock_lock(lock);
+
+ TAILQ_FOREACH(user_cb, &(dev->deq_cbs), next) {
+ if ((void *) user_cb->cb_fn == (void *) cb_fn &&
+ user_cb->cb_arg == cb_arg)
+ break;
+ }
+
+ /* create a new callback. */
+ if (user_cb == NULL) {
+ user_cb = rte_zmalloc("CRYPTO_USER_CALLBACK",
+ sizeof(struct rte_cryptodev_enqdeq_callback), 0);
+ if (user_cb != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ TAILQ_INSERT_TAIL(&(dev->deq_cbs), user_cb, next);
+ }
+ }
+
+ rte_spinlock_unlock(lock);
+
+ return (user_cb == NULL) ? -ENOMEM : 0;
+}
+
+int __rte_experimental
+rte_cryptodev_pstdeq_callback_unregister(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg)
+{
+ int ret = 0;
+ struct rte_cryptodev *dev;
+ struct rte_cryptodev_enqdeq_callback *cb, *next;
+ rte_spinlock_t *lock = &rte_cryptodev_pstdeq_cb_lock;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%"PRIu8 " qp_id=%"PRIu8,
+ dev_id, qp_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -EINVAL);
+
+ rte_spinlock_lock(lock);
+
+ ret = -EINVAL;
+ for (cb = TAILQ_FIRST(&dev->deq_cbs); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->cb_arg != cb_arg)
+ continue;
+
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->deq_cbs), cb, next);
+ rte_free(cb);
+ ret = 0;
+ } else
+ ret = -EAGAIN;
+ }
+
+ rte_spinlock_unlock(lock);
+
+ return ret;
+}
@@ -18,6 +18,8 @@
extern "C" {
#endif
+#include <sys/queue.h>
+
#include "rte_kvargs.h"
#include "rte_crypto.h"
#include "rte_dev.h"
@@ -794,9 +796,23 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
struct rte_cryptodev_callback;
+struct rte_cryptodev_enqdeq_callback;
/** Structure to keep track of registered callbacks */
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
+TAILQ_HEAD(rte_cryptodev_enq_cb_list, rte_cryptodev_enqdeq_callback);
+TAILQ_HEAD(rte_cryptodev_deq_cb_list, rte_cryptodev_enqdeq_callback);
+
+typedef uint16_t (*rte_cryptodev_enqdeq_cb_fn)(uint8_t dev_id, uint8_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops,
+ void *cb_arg);
+
+struct rte_cryptodev_enqdeq_callback {
+ TAILQ_ENTRY(rte_cryptodev_enqdeq_callback) next; /* Callbacks list */
+ rte_cryptodev_enqdeq_cb_fn cb_fn; /* Callback address */
+ void *cb_arg; /* Parameter for callback */
+ uint32_t active; /* Callback is executing */
+};
/** The data structure associated with each crypto device. */
struct rte_cryptodev {
@@ -823,6 +839,9 @@ struct rte_cryptodev {
void *security_ctx;
/**< Context for security ops */
+ struct rte_cryptodev_enq_cb_list enq_cbs;
+ struct rte_cryptodev_deq_cb_list deq_cbs;
+
__extension__
uint8_t attached : 1;
/**< Flag indicating the device is attached */
@@ -907,6 +926,19 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
nb_ops = (*dev->dequeue_burst)
(dev->data->queue_pairs[qp_id], ops, nb_ops);
+#ifdef RTE_CRYPTODEV_ENQDEQ_CALLBACKS
+ struct rte_cryptodev_enqdeq_callback *cb = NULL;
+
+ TAILQ_FOREACH(cb, &(dev->deq_cbs), next) {
+ if (cb->cb_fn == NULL)
+ continue;
+
+ cb->active = 1;
+ nb_ops = cb->cb_fn(dev_id, qp_id, ops, nb_ops, cb->cb_arg);
+ cb->active = 0;
+ }
+#endif
+
return nb_ops;
}
@@ -947,6 +979,19 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+#ifdef RTE_CRYPTODEV_ENQDEQ_CALLBACKS
+ struct rte_cryptodev_enqdeq_callback *cb = NULL;
+
+ TAILQ_FOREACH(cb, &(dev->enq_cbs), next) {
+ if (cb->cb_fn == NULL)
+ continue;
+
+ cb->active = 1;
+ nb_ops = cb->cb_fn(dev_id, qp_id, ops, nb_ops, cb->cb_arg);
+ cb->active = 0;
+ }
+#endif
+
return (*dev->enqueue_burst)(
dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -1249,6 +1294,86 @@ void * __rte_experimental
rte_cryptodev_sym_session_get_user_data(
struct rte_cryptodev_sym_session *sess);
+/**
+ * Register user callback for pre-enqueue of crypto.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param qp_id
+ * The index of the queue pair.
+ * @param cb_fn
+ * user callback to register.
+ * @param cb_arg
+ * user args to be passed during invoke.
+ *
+ * @return
+ * - On success returns 0.
+ * - On failure returns < 0.
+ */
+int
+rte_cryptodev_preenq_callback_register(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * unregister user callback for pre-enqueue of crypto.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param qp_id
+ * The index of the queue pair.
+ * @param cb_fn
+ * user callback to register.
+ * @param cb_arg
+ * user args to be passed during invoke.
+ *
+ * @return
+ * - On success returns 0.
+ * - On failure returns < 0.
+ */
+int
+rte_cryptodev_preenq_callback_unregister(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Register user callback for post-dequeue of crypto.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param qp_id
+ * The index of the queue pair.
+ * @param cb_fn
+ * user callback to register.
+ * @param cb_arg
+ * user args to be passed during invoke.
+ *
+ * @return
+ * - On success returns 0.
+ * - On failure returns < 0.
+ */
+int
+rte_cryptodev_pstdeq_callback_register(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * unregister user callback for post-dequeue of crypto.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param qp_id
+ * The index of the queue pair.
+ * @param cb_fn
+ * user callback to register.
+ * @param cb_arg
+ * user args to be passed during invoke.
+ *
+ * @return
+ * - On success returns 0.
+ * - On failure returns < 0.
+ */
+int
+rte_cryptodev_pstdeq_callback_unregister(uint8_t dev_id, uint8_t qp_id,
+ rte_cryptodev_enqdeq_cb_fn cb_fn, void *cb_arg);
+
#ifdef __cplusplus
}
#endif
@@ -101,6 +101,10 @@ EXPERIMENTAL {
rte_cryptodev_asym_session_init;
rte_cryptodev_asym_xform_capability_check_modlen;
rte_cryptodev_asym_xform_capability_check_optype;
+ rte_cryptodev_preenq_callback_register;
+ rte_cryptodev_preenq_callback_unregister;
+ rte_cryptodev_pstdeq_callback_register;
+ rte_cryptodev_pstdeq_callback_unregister;
rte_cryptodev_sym_get_existing_header_session_size;
rte_cryptodev_sym_session_get_user_data;
rte_cryptodev_sym_session_pool_create;