[v4,1/3] cryptodev: support enqueue callback functions
diff mbox series

Message ID 1603619090-118652-2-git-send-email-abhinandan.gujjar@intel.com
State Changes Requested, archived
Delegated to: akhil goyal
Headers show
Series
  • support enqueue callbacks on cryptodev
Related show

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Gujjar, Abhinandan S Oct. 25, 2020, 9:44 a.m. UTC
This patch adds APIs to add/remove callback functions. The callback
function will be called for each burst of crypto ops received on a
given crypto device queue pair.

Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
---
 config/rte_config.h                            |   1 +
 lib/librte_cryptodev/meson.build               |   2 +-
 lib/librte_cryptodev/rte_cryptodev.c           | 230 +++++++++++++++++++++++++
 lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
 lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
 5 files changed, 391 insertions(+), 2 deletions(-)

Comments

Ananyev, Konstantin Oct. 27, 2020, 12:47 p.m. UTC | #1
> 
> This patch adds APIs to add/remove callback functions. The callback
> function will be called for each burst of crypto ops received on a
> given crypto device queue pair.
> 
> Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> ---
>  config/rte_config.h                            |   1 +
>  lib/librte_cryptodev/meson.build               |   2 +-
>  lib/librte_cryptodev/rte_cryptodev.c           | 230 +++++++++++++++++++++++++
>  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
>  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
>  5 files changed, 391 insertions(+), 2 deletions(-)
> 
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 03d90d7..e999d93 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -61,6 +61,7 @@
>  /* cryptodev defines */
>  #define RTE_CRYPTO_MAX_DEVS 64
>  #define RTE_CRYPTODEV_NAME_LEN 64
> +#define RTE_CRYPTO_CALLBACKS 1
> 
>  /* compressdev defines */
>  #define RTE_COMPRESS_MAX_DEVS 64
> diff --git a/lib/librte_cryptodev/meson.build b/lib/librte_cryptodev/meson.build
> index c4c6b3b..8c5493f 100644
> --- a/lib/librte_cryptodev/meson.build
> +++ b/lib/librte_cryptodev/meson.build
> @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
>  	'rte_crypto.h',
>  	'rte_crypto_sym.h',
>  	'rte_crypto_asym.h')
> -deps += ['kvargs', 'mbuf']
> +deps += ['kvargs', 'mbuf', 'rcu']
> diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
> index 3d95ac6..0880d9b 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.c
> +++ b/lib/librte_cryptodev/rte_cryptodev.c
> @@ -448,6 +448,91 @@ struct rte_cryptodev_sym_session_pool_private_data {
>  	return 0;
>  }
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/* spinlock for crypto device enq callbacks */
> +static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
> +
> +static void
> +cryptodev_cb_cleanup(struct rte_cryptodev *dev)
> +{
> +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	uint16_t qp_id;
> +
> +	if (dev->enq_cbs == NULL)
> +		return;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		list = &dev->enq_cbs[qp_id];
> +		prev_cb = &list->next;
> +
> +		while (*prev_cb != NULL) {
> +			curr_cb = *prev_cb;
> +			/* Remove the user cb from the callback list. */
> +			__atomic_store_n(prev_cb, curr_cb->next,
> +				__ATOMIC_RELAXED);
> +			rte_rcu_qsbr_synchronize(list->qsbr,
> +				RTE_QSBR_THRID_INVALID);

You call this function (cb_cleanup) only at dev_confiture().
At that moment DP threads can't do enqueue/dequeue anyway.
So you can safely skip all this synchronization code here and just do:

cb = list->next;
while (cb != NULL) {
	next = cb->next;
	rte_free(cb);
	cb = next;
}


> +			rte_free(curr_cb);

One thing that makes it sort of grey area:
we do free() for cb itself, but user provided data will be sort of 'lost'.
As it is not referenced from our cb struct anymore...
I see two options here - first just document explicitly that callbacks wouldn't
survive cryptodev_configure() and it is user responsibility to remove all
installed callbacks before doing dev_configure() to avoid possible memory leakage.
Another option - add user provided cleanup() function pointer into struct rte_cryptodev_cb
and call it here if provided:
struct rte_cryptodev_cb {
	struct rte_cryptodev_cb *next;
	/** < Pointer to next callback */
	rte_cryptodev_callback_fn fn;
	/** < Pointer to callback function */
	void *arg;
	/** < Pointer to argument */
	void (*cleanup)(void *);	
};

And here:
	If (curr_cb->cleanup != NULL)
		curr_cb->cleanup(curr_cb->arg);

 	rte_free(curr_cb);

Rest of the code - LGTM.
So with that addressed:
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
 
> +		}
> +
> +		rte_free(list->qsbr);
> +	}
> +
> +	rte_free(dev->enq_cbs);
> +	dev->enq_cbs = NULL;
> +}
> +
> +static int
> +cryptodev_cb_init(struct rte_cryptodev *dev)
> +{
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	struct rte_rcu_qsbr *qsbr;
> +	uint16_t qp_id;
> +	size_t size;
> +
> +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> +	const uint32_t max_threads = 1;
> +
> +	dev->enq_cbs = rte_zmalloc(NULL,
> +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> +				   dev->data->nb_queue_pairs, 0);
> +	if (dev->enq_cbs == NULL) {
> +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> +		rte_errno = ENOMEM;
> +		return -1;
> +	}
> +
> +	/* Create RCU QSBR variable */
> +	size = rte_rcu_qsbr_get_memsize(max_threads);
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		list = &dev->enq_cbs[qp_id];
> +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> +		if (qsbr == NULL) {
> +			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
> +				"queue_pair_id=%d", qp_id);
> +			goto cb_init_err;
> +		}
> +
> +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> +				"queue_pair_id=%d", qp_id);
> +			goto cb_init_err;
> +		}
> +
> +		list->qsbr = qsbr;
> +	}
> +
> +	return 0;
> +
> +cb_init_err:
> +	rte_errno = ENOMEM;
> +	cryptodev_cb_cleanup(dev);
> +	return -1;
> +
> +}
> +#endif
> 
>  const char *
>  rte_cryptodev_get_feature_name(uint64_t flag)
> @@ -927,6 +1012,11 @@ struct rte_cryptodev *
> 
>  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	cryptodev_cb_cleanup(dev);
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +#endif
>  	/* Setup new number of queue pairs and reconfigure device. */
>  	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
>  			config->socket_id);
> @@ -936,6 +1026,15 @@ struct rte_cryptodev *
>  		return diag;
>  	}
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	diag = cryptodev_cb_init(dev);
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +	if (diag) {
> +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> +		return -ENOMEM;
> +	}
> +#endif
>  	rte_cryptodev_trace_configure(dev_id, config);
>  	return (*dev->dev_ops->dev_configure)(dev, config);
>  }
> @@ -1136,6 +1235,137 @@ struct rte_cryptodev *
>  			socket_id);
>  }
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +struct rte_cryptodev_cb *
> +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> +			       uint16_t qp_id,
> +			       rte_cryptodev_callback_fn cb_fn,
> +			       void *cb_arg)
> +{
> +	struct rte_cryptodev *dev;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	struct rte_cryptodev_cb *cb, *tail;
> +
> +	if (!cb_fn)
> +		return NULL;
> +
> +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> +		return NULL;
> +	}
> +
> +	dev = &rte_crypto_devices[dev_id];
> +	if (qp_id >= dev->data->nb_queue_pairs) {
> +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> +		return NULL;
> +	}
> +
> +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> +	if (cb == NULL) {
> +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> +		rte_errno = ENOMEM;
> +		return NULL;
> +	}
> +
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +
> +	cb->fn = cb_fn;
> +	cb->arg = cb_arg;
> +
> +	/* Add the callbacks in fifo order. */
> +	list = &dev->enq_cbs[qp_id];
> +	tail = list->next;
> +
> +	if (tail) {
> +		while (tail->next)
> +			tail = tail->next;
> +		/* Stores to cb->fn and cb->param should complete before
> +		 * cb is visible to data plane.
> +		 */
> +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> +	} else {
> +		/* Stores to cb->fn and cb->param should complete before
> +		 * cb is visible to data plane.
> +		 */
> +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> +	}
> +
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +
> +	return cb;
> +}
> +
> +int
> +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> +				  uint16_t qp_id,
> +				  struct rte_cryptodev_cb *cb)
> +{
> +	struct rte_cryptodev *dev;
> +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	int ret;
> +
> +	ret = -EINVAL;
> +
> +	if (!cb) {
> +		CDEV_LOG_ERR("cb is NULL");
> +		return ret;
> +	}
> +
> +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> +		return ret;
> +	}
> +
> +	dev = &rte_crypto_devices[dev_id];
> +	if (qp_id >= dev->data->nb_queue_pairs) {
> +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> +		return ret;
> +	}
> +
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	if (dev->enq_cbs == NULL) {
> +		CDEV_LOG_ERR("Callback not initialized");
> +		goto cb_err;
> +	}
> +
> +	list = &dev->enq_cbs[qp_id];
> +	if (list == NULL) {
> +		CDEV_LOG_ERR("Callback list is NULL");
> +		goto cb_err;
> +	}
> +
> +	if (list->qsbr == NULL) {
> +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> +		goto cb_err;
> +	}
> +
> +	prev_cb = &list->next;
> +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> +		curr_cb = *prev_cb;
> +		if (curr_cb == cb) {
> +			/* Remove the user cb from the callback list. */
> +			__atomic_store_n(prev_cb, curr_cb->next,
> +				__ATOMIC_RELAXED);
> +			ret = 0;
> +			break;
> +		}
> +	}
> +
> +	if (!ret) {
> +		/* Call sync with invalid thread id as this is part of
> +		 * control plane API
> +		 */
> +		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
> +		rte_free(cb);
> +	}
> +
> +cb_err:
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +	return ret;
> +}
> +#endif
> 
>  int
>  rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
> diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
> index 0935fd5..1b7d7ef 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.h
> +++ b/lib/librte_cryptodev/rte_cryptodev.h
> @@ -23,6 +23,7 @@
>  #include "rte_dev.h"
>  #include <rte_common.h>
>  #include <rte_config.h>
> +#include <rte_rcu_qsbr.h>
> 
>  #include "rte_cryptodev_trace_fp.h"
> 
> @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
>  	/**< The mempool for creating sess private data in sessionless mode */
>  };
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * Function type used for pre processing crypto ops when enqueue burst is
> + * called.
> + *
> + * The callback function is called on enqueue burst immediately
> + * before the crypto ops are put onto the hardware queue for processing.
> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	ops		The address of an array of *nb_ops* pointers
> + *				to *rte_crypto_op* structures which contain
> + *				the crypto operations to be processed.
> + * @param	nb_ops		The number of operations to process.
> + * @param	user_param	The arbitrary user parameter passed in by the
> + *				application when the callback was originally
> + *				registered.
> + * @return			The number of ops to be enqueued to the
> + *				crypto device.
> + */
> +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
> +		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
> +#endif
> +
>  /**
>   * Typedef for application callback function to be registered by application
>   * software for notification of device events
> @@ -822,7 +851,6 @@ struct rte_cryptodev_config {
>  		enum rte_cryptodev_event_type event,
>  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> 
> -
>  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
>  		struct rte_crypto_op **ops,	uint16_t nb_ops);
>  /**< Dequeue processed packets from queue pair of a device. */
> @@ -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
>  /** Structure to keep track of registered callbacks */
>  TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * @internal
> + * Structure used to hold information about the callbacks to be called for a
> + * queue pair on enqueue.
> + */
> +struct rte_cryptodev_cb {
> +	struct rte_cryptodev_cb *next;
> +	/** < Pointer to next callback */
> +	rte_cryptodev_callback_fn fn;
> +	/** < Pointer to callback function */
> +	void *arg;
> +	/** < Pointer to argument */
> +};
> +
> +/**
> + * @internal
> + * Structure used to hold information about the RCU for a queue pair.
> + */
> +struct rte_cryptodev_enq_cb_rcu {
> +	struct rte_cryptodev_cb *next;
> +	/** < Pointer to next callback */
> +	struct rte_rcu_qsbr *qsbr;
> +	/** < RCU QSBR variable per queue pair */
> +};
> +#endif
> +
>  /** The data structure associated with each crypto device. */
>  struct rte_cryptodev {
>  	dequeue_pkt_burst_t dequeue_burst;
> @@ -867,6 +922,10 @@ struct rte_cryptodev {
>  	__extension__
>  	uint8_t attached : 1;
>  	/**< Flag indicating the device is attached */
> +
> +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> +	/**< User application callback for pre enqueue processing */
> +
>  } __rte_cache_aligned;
> 
>  void *
> @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {
>  {
>  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	if (unlikely(dev->enq_cbs != NULL)) {
> +		struct rte_cryptodev_enq_cb_rcu *list;
> +		struct rte_cryptodev_cb *cb;
> +
> +		/* __ATOMIC_RELEASE memory order was used when the
> +		* call back was inserted into the list.
> +		* Since there is a clear dependency between loading
> +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
> +		* not required.
> +		*/
> +		list = &dev->enq_cbs[qp_id];
> +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> +
> +		while (cb != NULL) {
> +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> +					cb->arg);
> +			cb = cb->next;
> +		};
> +
> +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> +	}
> +#endif
> +
>  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
>  	return (*dev->enqueue_burst)(
>  			dev->data->queue_pairs[qp_id], ops, nb_ops);
> @@ -1730,6 +1814,78 @@ struct rte_crypto_raw_dp_ctx {
>  rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
>  		uint32_t n);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Add a user callback for a given crypto device and queue pair which will be
> + * called on crypto ops enqueue.
> + *
> + * This API configures a function to be called for each burst of crypto ops
> + * received on a given crypto device queue pair. The return value is a pointer
> + * that can be used later to remove the callback using
> + * rte_cryptodev_remove_enq_callback().
> + *
> + * Multiple functions are called in the order that they are added.
> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	cb_fn		The callback function
> + * @param	cb_arg		A generic pointer parameter which will be passed
> + *				to each invocation of the callback function on
> + *				this crypto device and queue pair.
> + *
> + * @return
> + *   NULL on error.
> + *   On success, a pointer value which can later be used to remove the callback.
> + */
> +
> +__rte_experimental
> +struct rte_cryptodev_cb *
> +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> +			       uint16_t qp_id,
> +			       rte_cryptodev_callback_fn cb_fn,
> +			       void *cb_arg);
> +
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Remove a user callback function for given crypto device and queue pair.
> + *
> + * This function is used to removed callbacks that were added to a crypto
> + * device queue pair using rte_cryptodev_add_enq_callback().
> + *
> + *
> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	cb		Pointer to user supplied callback created via
> + *				rte_cryptodev_add_enq_callback().
> + *
> + * @return
> + *   - 0: Success. Callback was removed.
> + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> + *               is NULL or not found for the crypto device queue pair.
> + */
> +
> +__rte_experimental
> +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> +				      uint16_t qp_id,
> +				      struct rte_cryptodev_cb *cb);
> +
> +#endif
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
> index 7e4360f..5d8d6b0 100644
> --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> @@ -101,6 +101,7 @@ EXPERIMENTAL {
>  	rte_cryptodev_get_qp_status;
> 
>  	# added in 20.11
> +	rte_cryptodev_add_enq_callback;
>  	rte_cryptodev_configure_raw_dp_ctx;
>  	rte_cryptodev_get_raw_dp_ctx_size;
>  	rte_cryptodev_raw_dequeue;
> @@ -109,4 +110,5 @@ EXPERIMENTAL {
>  	rte_cryptodev_raw_enqueue;
>  	rte_cryptodev_raw_enqueue_burst;
>  	rte_cryptodev_raw_enqueue_done;
> +	rte_cryptodev_remove_enq_callback;
>  };
> --
> 1.9.1
Gujjar, Abhinandan S Oct. 27, 2020, 5:16 p.m. UTC | #2
> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Tuesday, October 27, 2020 6:18 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>; akhil.goyal@nxp.com;
> Honnappa.Nagarahalli@arm.com
> Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> 
> 
> >
> > This patch adds APIs to add/remove callback functions. The callback
> > function will be called for each burst of crypto ops received on a
> > given crypto device queue pair.
> >
> > Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> > ---
> >  config/rte_config.h                            |   1 +
> >  lib/librte_cryptodev/meson.build               |   2 +-
> >  lib/librte_cryptodev/rte_cryptodev.c           | 230
> +++++++++++++++++++++++++
> >  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
> >  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
> >  5 files changed, 391 insertions(+), 2 deletions(-)
> >
> > diff --git a/config/rte_config.h b/config/rte_config.h index
> > 03d90d7..e999d93 100644
> > --- a/config/rte_config.h
> > +++ b/config/rte_config.h
> > @@ -61,6 +61,7 @@
> >  /* cryptodev defines */
> >  #define RTE_CRYPTO_MAX_DEVS 64
> >  #define RTE_CRYPTODEV_NAME_LEN 64
> > +#define RTE_CRYPTO_CALLBACKS 1
> >
> >  /* compressdev defines */
> >  #define RTE_COMPRESS_MAX_DEVS 64
> > diff --git a/lib/librte_cryptodev/meson.build
> > b/lib/librte_cryptodev/meson.build
> > index c4c6b3b..8c5493f 100644
> > --- a/lib/librte_cryptodev/meson.build
> > +++ b/lib/librte_cryptodev/meson.build
> > @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
> >  	'rte_crypto.h',
> >  	'rte_crypto_sym.h',
> >  	'rte_crypto_asym.h')
> > -deps += ['kvargs', 'mbuf']
> > +deps += ['kvargs', 'mbuf', 'rcu']
> > diff --git a/lib/librte_cryptodev/rte_cryptodev.c
> > b/lib/librte_cryptodev/rte_cryptodev.c
> > index 3d95ac6..0880d9b 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev.c
> > +++ b/lib/librte_cryptodev/rte_cryptodev.c
> > @@ -448,6 +448,91 @@ struct
> rte_cryptodev_sym_session_pool_private_data {
> >  	return 0;
> >  }
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/* spinlock for crypto device enq callbacks */ static rte_spinlock_t
> > +rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
> > +
> > +static void
> > +cryptodev_cb_cleanup(struct rte_cryptodev *dev) {
> > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	uint16_t qp_id;
> > +
> > +	if (dev->enq_cbs == NULL)
> > +		return;
> > +
> > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > +		list = &dev->enq_cbs[qp_id];
> > +		prev_cb = &list->next;
> > +
> > +		while (*prev_cb != NULL) {
> > +			curr_cb = *prev_cb;
> > +			/* Remove the user cb from the callback list. */
> > +			__atomic_store_n(prev_cb, curr_cb->next,
> > +				__ATOMIC_RELAXED);
> > +			rte_rcu_qsbr_synchronize(list->qsbr,
> > +				RTE_QSBR_THRID_INVALID);
> 
> You call this function (cb_cleanup) only at dev_confiture().
> At that moment DP threads can't do enqueue/dequeue anyway.
> So you can safely skip all this synchronization code here and just do:
> 
> cb = list->next;
> while (cb != NULL) {
> 	next = cb->next;
> 	rte_free(cb);
> 	cb = next;
> }
> 
Ok
> 
> > +			rte_free(curr_cb);
> 
> One thing that makes it sort of grey area:
> we do free() for cb itself, but user provided data will be sort of 'lost'.
> As it is not referenced from our cb struct anymore...
> I see two options here - first just document explicitly that callbacks wouldn't
> survive cryptodev_configure() and it is user responsibility to remove all
> installed callbacks before doing dev_configure() to avoid possible memory
> leakage.
Ok. I will update the documentation for this and send a new patch set.
> Another option - add user provided cleanup() function pointer into struct
> rte_cryptodev_cb and call it here if provided:
> struct rte_cryptodev_cb {
> 	struct rte_cryptodev_cb *next;
> 	/** < Pointer to next callback */
> 	rte_cryptodev_callback_fn fn;
> 	/** < Pointer to callback function */
> 	void *arg;
> 	/** < Pointer to argument */
> 	void (*cleanup)(void *);
> };
> 
> And here:
> 	If (curr_cb->cleanup != NULL)
> 		curr_cb->cleanup(curr_cb->arg);
> 
>  	rte_free(curr_cb);
> 
> Rest of the code - LGTM.
> So with that addressed:
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> 
> > +		}
> > +
> > +		rte_free(list->qsbr);
> > +	}
> > +
> > +	rte_free(dev->enq_cbs);
> > +	dev->enq_cbs = NULL;
> > +}
> > +
> > +static int
> > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	struct rte_rcu_qsbr *qsbr;
> > +	uint16_t qp_id;
> > +	size_t size;
> > +
> > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > +	const uint32_t max_threads = 1;
> > +
> > +	dev->enq_cbs = rte_zmalloc(NULL,
> > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > +				   dev->data->nb_queue_pairs, 0);
> > +	if (dev->enq_cbs == NULL) {
> > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > +		rte_errno = ENOMEM;
> > +		return -1;
> > +	}
> > +
> > +	/* Create RCU QSBR variable */
> > +	size = rte_rcu_qsbr_get_memsize(max_threads);
> > +
> > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > +		list = &dev->enq_cbs[qp_id];
> > +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> > +		if (qsbr == NULL) {
> > +			CDEV_LOG_ERR("Failed to allocate memory for RCU
> on "
> > +				"queue_pair_id=%d", qp_id);
> > +			goto cb_init_err;
> > +		}
> > +
> > +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> > +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> > +				"queue_pair_id=%d", qp_id);
> > +			goto cb_init_err;
> > +		}
> > +
> > +		list->qsbr = qsbr;
> > +	}
> > +
> > +	return 0;
> > +
> > +cb_init_err:
> > +	rte_errno = ENOMEM;
> > +	cryptodev_cb_cleanup(dev);
> > +	return -1;
> > +
> > +}
> > +#endif
> >
> >  const char *
> >  rte_cryptodev_get_feature_name(uint64_t flag) @@ -927,6 +1012,11 @@
> > struct rte_cryptodev *
> >
> >  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> ENOTSUP);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	cryptodev_cb_cleanup(dev);
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +#endif
> >  	/* Setup new number of queue pairs and reconfigure device. */
> >  	diag = rte_cryptodev_queue_pairs_config(dev, config-
> >nb_queue_pairs,
> >  			config->socket_id);
> > @@ -936,6 +1026,15 @@ struct rte_cryptodev *
> >  		return diag;
> >  	}
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	diag = cryptodev_cb_init(dev);
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +	if (diag) {
> > +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> > +		return -ENOMEM;
> > +	}
> > +#endif
> >  	rte_cryptodev_trace_configure(dev_id, config);
> >  	return (*dev->dev_ops->dev_configure)(dev, config);  } @@ -1136,6
> > +1235,137 @@ struct rte_cryptodev *
> >  			socket_id);
> >  }
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +struct rte_cryptodev_cb *
> > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > +			       uint16_t qp_id,
> > +			       rte_cryptodev_callback_fn cb_fn,
> > +			       void *cb_arg)
> > +{
> > +	struct rte_cryptodev *dev;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	struct rte_cryptodev_cb *cb, *tail;
> > +
> > +	if (!cb_fn)
> > +		return NULL;
> > +
> > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > +		return NULL;
> > +	}
> > +
> > +	dev = &rte_crypto_devices[dev_id];
> > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > +		return NULL;
> > +	}
> > +
> > +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> > +	if (cb == NULL) {
> > +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> > +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> > +		rte_errno = ENOMEM;
> > +		return NULL;
> > +	}
> > +
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +
> > +	cb->fn = cb_fn;
> > +	cb->arg = cb_arg;
> > +
> > +	/* Add the callbacks in fifo order. */
> > +	list = &dev->enq_cbs[qp_id];
> > +	tail = list->next;
> > +
> > +	if (tail) {
> > +		while (tail->next)
> > +			tail = tail->next;
> > +		/* Stores to cb->fn and cb->param should complete before
> > +		 * cb is visible to data plane.
> > +		 */
> > +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> > +	} else {
> > +		/* Stores to cb->fn and cb->param should complete before
> > +		 * cb is visible to data plane.
> > +		 */
> > +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> > +	}
> > +
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +
> > +	return cb;
> > +}
> > +
> > +int
> > +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > +				  uint16_t qp_id,
> > +				  struct rte_cryptodev_cb *cb)
> > +{
> > +	struct rte_cryptodev *dev;
> > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	int ret;
> > +
> > +	ret = -EINVAL;
> > +
> > +	if (!cb) {
> > +		CDEV_LOG_ERR("cb is NULL");
> > +		return ret;
> > +	}
> > +
> > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > +		return ret;
> > +	}
> > +
> > +	dev = &rte_crypto_devices[dev_id];
> > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > +		return ret;
> > +	}
> > +
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	if (dev->enq_cbs == NULL) {
> > +		CDEV_LOG_ERR("Callback not initialized");
> > +		goto cb_err;
> > +	}
> > +
> > +	list = &dev->enq_cbs[qp_id];
> > +	if (list == NULL) {
> > +		CDEV_LOG_ERR("Callback list is NULL");
> > +		goto cb_err;
> > +	}
> > +
> > +	if (list->qsbr == NULL) {
> > +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> > +		goto cb_err;
> > +	}
> > +
> > +	prev_cb = &list->next;
> > +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> > +		curr_cb = *prev_cb;
> > +		if (curr_cb == cb) {
> > +			/* Remove the user cb from the callback list. */
> > +			__atomic_store_n(prev_cb, curr_cb->next,
> > +				__ATOMIC_RELAXED);
> > +			ret = 0;
> > +			break;
> > +		}
> > +	}
> > +
> > +	if (!ret) {
> > +		/* Call sync with invalid thread id as this is part of
> > +		 * control plane API
> > +		 */
> > +		rte_rcu_qsbr_synchronize(list->qsbr,
> RTE_QSBR_THRID_INVALID);
> > +		rte_free(cb);
> > +	}
> > +
> > +cb_err:
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +	return ret;
> > +}
> > +#endif
> >
> >  int
> >  rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats
> > *stats) diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> > b/lib/librte_cryptodev/rte_cryptodev.h
> > index 0935fd5..1b7d7ef 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev.h
> > +++ b/lib/librte_cryptodev/rte_cryptodev.h
> > @@ -23,6 +23,7 @@
> >  #include "rte_dev.h"
> >  #include <rte_common.h>
> >  #include <rte_config.h>
> > +#include <rte_rcu_qsbr.h>
> >
> >  #include "rte_cryptodev_trace_fp.h"
> >
> > @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
> >  	/**< The mempool for creating sess private data in sessionless mode
> > */  };
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * Function type used for pre processing crypto ops when enqueue
> > +burst is
> > + * called.
> > + *
> > + * The callback function is called on enqueue burst immediately
> > + * before the crypto ops are put onto the hardware queue for processing.
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	ops		The address of an array of *nb_ops* pointers
> > + *				to *rte_crypto_op* structures which contain
> > + *				the crypto operations to be processed.
> > + * @param	nb_ops		The number of operations to process.
> > + * @param	user_param	The arbitrary user parameter passed in by the
> > + *				application when the callback was originally
> > + *				registered.
> > + * @return			The number of ops to be enqueued to the
> > + *				crypto device.
> > + */
> > +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t
> qp_id,
> > +		struct rte_crypto_op **ops, uint16_t nb_ops, void
> *user_param);
> > +#endif
> > +
> >  /**
> >   * Typedef for application callback function to be registered by application
> >   * software for notification of device events @@ -822,7 +851,6 @@
> > struct rte_cryptodev_config {
> >  		enum rte_cryptodev_event_type event,
> >  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> >
> > -
> >  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
> >  		struct rte_crypto_op **ops,	uint16_t nb_ops);
> >  /**< Dequeue processed packets from queue pair of a device. */ @@
> > -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
> >  /** Structure to keep track of registered callbacks */
> > TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * @internal
> > + * Structure used to hold information about the callbacks to be
> > +called for a
> > + * queue pair on enqueue.
> > + */
> > +struct rte_cryptodev_cb {
> > +	struct rte_cryptodev_cb *next;
> > +	/** < Pointer to next callback */
> > +	rte_cryptodev_callback_fn fn;
> > +	/** < Pointer to callback function */
> > +	void *arg;
> > +	/** < Pointer to argument */
> > +};
> > +
> > +/**
> > + * @internal
> > + * Structure used to hold information about the RCU for a queue pair.
> > + */
> > +struct rte_cryptodev_enq_cb_rcu {
> > +	struct rte_cryptodev_cb *next;
> > +	/** < Pointer to next callback */
> > +	struct rte_rcu_qsbr *qsbr;
> > +	/** < RCU QSBR variable per queue pair */ }; #endif
> > +
> >  /** The data structure associated with each crypto device. */  struct
> > rte_cryptodev {
> >  	dequeue_pkt_burst_t dequeue_burst;
> > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> >  	__extension__
> >  	uint8_t attached : 1;
> >  	/**< Flag indicating the device is attached */
> > +
> > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > +	/**< User application callback for pre enqueue processing */
> > +
> >  } __rte_cache_aligned;
> >
> >  void *
> > @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {  {
> >  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	if (unlikely(dev->enq_cbs != NULL)) {
> > +		struct rte_cryptodev_enq_cb_rcu *list;
> > +		struct rte_cryptodev_cb *cb;
> > +
> > +		/* __ATOMIC_RELEASE memory order was used when the
> > +		* call back was inserted into the list.
> > +		* Since there is a clear dependency between loading
> > +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order
> is
> > +		* not required.
> > +		*/
> > +		list = &dev->enq_cbs[qp_id];
> > +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> > +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> > +
> > +		while (cb != NULL) {
> > +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> > +					cb->arg);
> > +			cb = cb->next;
> > +		};
> > +
> > +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> > +	}
> > +#endif
> > +
> >  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops,
> nb_ops);
> >  	return (*dev->enqueue_burst)(
> >  			dev->data->queue_pairs[qp_id], ops, nb_ops); @@ -
> 1730,6 +1814,78
> > @@ struct rte_crypto_raw_dp_ctx {
> > rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
> >  		uint32_t n);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * Add a user callback for a given crypto device and queue pair which
> > +will be
> > + * called on crypto ops enqueue.
> > + *
> > + * This API configures a function to be called for each burst of
> > +crypto ops
> > + * received on a given crypto device queue pair. The return value is
> > +a pointer
> > + * that can be used later to remove the callback using
> > + * rte_cryptodev_remove_enq_callback().
> > + *
> > + * Multiple functions are called in the order that they are added.
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	cb_fn		The callback function
> > + * @param	cb_arg		A generic pointer parameter which will be
> passed
> > + *				to each invocation of the callback function on
> > + *				this crypto device and queue pair.
> > + *
> > + * @return
> > + *   NULL on error.
> > + *   On success, a pointer value which can later be used to remove the
> callback.
> > + */
> > +
> > +__rte_experimental
> > +struct rte_cryptodev_cb *
> > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > +			       uint16_t qp_id,
> > +			       rte_cryptodev_callback_fn cb_fn,
> > +			       void *cb_arg);
> > +
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * Remove a user callback function for given crypto device and queue pair.
> > + *
> > + * This function is used to removed callbacks that were added to a
> > +crypto
> > + * device queue pair using rte_cryptodev_add_enq_callback().
> > + *
> > + *
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	cb		Pointer to user supplied callback created via
> > + *				rte_cryptodev_add_enq_callback().
> > + *
> > + * @return
> > + *   - 0: Success. Callback was removed.
> > + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> > + *               is NULL or not found for the crypto device queue pair.
> > + */
> > +
> > +__rte_experimental
> > +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > +				      uint16_t qp_id,
> > +				      struct rte_cryptodev_cb *cb);
> > +
> > +#endif
> > +
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map
> > b/lib/librte_cryptodev/rte_cryptodev_version.map
> > index 7e4360f..5d8d6b0 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> > +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> > @@ -101,6 +101,7 @@ EXPERIMENTAL {
> >  	rte_cryptodev_get_qp_status;
> >
> >  	# added in 20.11
> > +	rte_cryptodev_add_enq_callback;
> >  	rte_cryptodev_configure_raw_dp_ctx;
> >  	rte_cryptodev_get_raw_dp_ctx_size;
> >  	rte_cryptodev_raw_dequeue;
> > @@ -109,4 +110,5 @@ EXPERIMENTAL {
> >  	rte_cryptodev_raw_enqueue;
> >  	rte_cryptodev_raw_enqueue_burst;
> >  	rte_cryptodev_raw_enqueue_done;
> > +	rte_cryptodev_remove_enq_callback;
> >  };
> > --
> > 1.9.1
Ananyev, Konstantin Oct. 27, 2020, 5:20 p.m. UTC | #3
> 
> > -----Original Message-----
> > From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> > Sent: Tuesday, October 27, 2020 6:18 PM
> > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> > Doherty, Declan <declan.doherty@intel.com>; akhil.goyal@nxp.com;
> > Honnappa.Nagarahalli@arm.com
> > Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> > Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> >
> >
> > >
> > > This patch adds APIs to add/remove callback functions. The callback
> > > function will be called for each burst of crypto ops received on a
> > > given crypto device queue pair.
> > >
> > > Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> > > ---
> > >  config/rte_config.h                            |   1 +
> > >  lib/librte_cryptodev/meson.build               |   2 +-
> > >  lib/librte_cryptodev/rte_cryptodev.c           | 230
> > +++++++++++++++++++++++++
> > >  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
> > >  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
> > >  5 files changed, 391 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/config/rte_config.h b/config/rte_config.h index
> > > 03d90d7..e999d93 100644
> > > --- a/config/rte_config.h
> > > +++ b/config/rte_config.h
> > > @@ -61,6 +61,7 @@
> > >  /* cryptodev defines */
> > >  #define RTE_CRYPTO_MAX_DEVS 64
> > >  #define RTE_CRYPTODEV_NAME_LEN 64
> > > +#define RTE_CRYPTO_CALLBACKS 1
> > >
> > >  /* compressdev defines */
> > >  #define RTE_COMPRESS_MAX_DEVS 64
> > > diff --git a/lib/librte_cryptodev/meson.build
> > > b/lib/librte_cryptodev/meson.build
> > > index c4c6b3b..8c5493f 100644
> > > --- a/lib/librte_cryptodev/meson.build
> > > +++ b/lib/librte_cryptodev/meson.build
> > > @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
> > >  	'rte_crypto.h',
> > >  	'rte_crypto_sym.h',
> > >  	'rte_crypto_asym.h')
> > > -deps += ['kvargs', 'mbuf']
> > > +deps += ['kvargs', 'mbuf', 'rcu']
> > > diff --git a/lib/librte_cryptodev/rte_cryptodev.c
> > > b/lib/librte_cryptodev/rte_cryptodev.c
> > > index 3d95ac6..0880d9b 100644
> > > --- a/lib/librte_cryptodev/rte_cryptodev.c
> > > +++ b/lib/librte_cryptodev/rte_cryptodev.c
> > > @@ -448,6 +448,91 @@ struct
> > rte_cryptodev_sym_session_pool_private_data {
> > >  	return 0;
> > >  }
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +/* spinlock for crypto device enq callbacks */ static rte_spinlock_t
> > > +rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
> > > +
> > > +static void
> > > +cryptodev_cb_cleanup(struct rte_cryptodev *dev) {
> > > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > +	uint16_t qp_id;
> > > +
> > > +	if (dev->enq_cbs == NULL)
> > > +		return;
> > > +
> > > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > > +		list = &dev->enq_cbs[qp_id];
> > > +		prev_cb = &list->next;
> > > +
> > > +		while (*prev_cb != NULL) {
> > > +			curr_cb = *prev_cb;
> > > +			/* Remove the user cb from the callback list. */
> > > +			__atomic_store_n(prev_cb, curr_cb->next,
> > > +				__ATOMIC_RELAXED);
> > > +			rte_rcu_qsbr_synchronize(list->qsbr,
> > > +				RTE_QSBR_THRID_INVALID);
> >
> > You call this function (cb_cleanup) only at dev_confiture().
> > At that moment DP threads can't do enqueue/dequeue anyway.
> > So you can safely skip all this synchronization code here and just do:
> >
> > cb = list->next;
> > while (cb != NULL) {
> > 	next = cb->next;
> > 	rte_free(cb);
> > 	cb = next;
> > }
> >
> Ok
> >
> > > +			rte_free(curr_cb);
> >
> > One thing that makes it sort of grey area:
> > we do free() for cb itself, but user provided data will be sort of 'lost'.
> > As it is not referenced from our cb struct anymore...
> > I see two options here - first just document explicitly that callbacks wouldn't
> > survive cryptodev_configure() and it is user responsibility to remove all
> > installed callbacks before doing dev_configure() to avoid possible memory
> > leakage.
> Ok. I will update the documentation for this and send a new patch set.

Ok, please keep my ack on your new version.

> > Another option - add user provided cleanup() function pointer into struct
> > rte_cryptodev_cb and call it here if provided:
> > struct rte_cryptodev_cb {
> > 	struct rte_cryptodev_cb *next;
> > 	/** < Pointer to next callback */
> > 	rte_cryptodev_callback_fn fn;
> > 	/** < Pointer to callback function */
> > 	void *arg;
> > 	/** < Pointer to argument */
> > 	void (*cleanup)(void *);
> > };
> >
> > And here:
> > 	If (curr_cb->cleanup != NULL)
> > 		curr_cb->cleanup(curr_cb->arg);
> >
> >  	rte_free(curr_cb);
> >
> > Rest of the code - LGTM.
> > So with that addressed:
> > Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> >
> > > +		}
> > > +
> > > +		rte_free(list->qsbr);
> > > +	}
> > > +
> > > +	rte_free(dev->enq_cbs);
> > > +	dev->enq_cbs = NULL;
> > > +}
> > > +
> > > +static int
> > > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > +	struct rte_rcu_qsbr *qsbr;
> > > +	uint16_t qp_id;
> > > +	size_t size;
> > > +
> > > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > > +	const uint32_t max_threads = 1;
> > > +
> > > +	dev->enq_cbs = rte_zmalloc(NULL,
> > > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > > +				   dev->data->nb_queue_pairs, 0);
> > > +	if (dev->enq_cbs == NULL) {
> > > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > > +		rte_errno = ENOMEM;
> > > +		return -1;
> > > +	}
> > > +
> > > +	/* Create RCU QSBR variable */
> > > +	size = rte_rcu_qsbr_get_memsize(max_threads);
> > > +
> > > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > > +		list = &dev->enq_cbs[qp_id];
> > > +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> > > +		if (qsbr == NULL) {
> > > +			CDEV_LOG_ERR("Failed to allocate memory for RCU
> > on "
> > > +				"queue_pair_id=%d", qp_id);
> > > +			goto cb_init_err;
> > > +		}
> > > +
> > > +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> > > +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> > > +				"queue_pair_id=%d", qp_id);
> > > +			goto cb_init_err;
> > > +		}
> > > +
> > > +		list->qsbr = qsbr;
> > > +	}
> > > +
> > > +	return 0;
> > > +
> > > +cb_init_err:
> > > +	rte_errno = ENOMEM;
> > > +	cryptodev_cb_cleanup(dev);
> > > +	return -1;
> > > +
> > > +}
> > > +#endif
> > >
> > >  const char *
> > >  rte_cryptodev_get_feature_name(uint64_t flag) @@ -927,6 +1012,11 @@
> > > struct rte_cryptodev *
> > >
> > >  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> > ENOTSUP);
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > +	cryptodev_cb_cleanup(dev);
> > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > +#endif
> > >  	/* Setup new number of queue pairs and reconfigure device. */
> > >  	diag = rte_cryptodev_queue_pairs_config(dev, config-
> > >nb_queue_pairs,
> > >  			config->socket_id);
> > > @@ -936,6 +1026,15 @@ struct rte_cryptodev *
> > >  		return diag;
> > >  	}
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > +	diag = cryptodev_cb_init(dev);
> > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > +	if (diag) {
> > > +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> > > +		return -ENOMEM;
> > > +	}
> > > +#endif
> > >  	rte_cryptodev_trace_configure(dev_id, config);
> > >  	return (*dev->dev_ops->dev_configure)(dev, config);  } @@ -1136,6
> > > +1235,137 @@ struct rte_cryptodev *
> > >  			socket_id);
> > >  }
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +struct rte_cryptodev_cb *
> > > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > > +			       uint16_t qp_id,
> > > +			       rte_cryptodev_callback_fn cb_fn,
> > > +			       void *cb_arg)
> > > +{
> > > +	struct rte_cryptodev *dev;
> > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > +	struct rte_cryptodev_cb *cb, *tail;
> > > +
> > > +	if (!cb_fn)
> > > +		return NULL;
> > > +
> > > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > > +		return NULL;
> > > +	}
> > > +
> > > +	dev = &rte_crypto_devices[dev_id];
> > > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > > +		return NULL;
> > > +	}
> > > +
> > > +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> > > +	if (cb == NULL) {
> > > +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> > > +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> > > +		rte_errno = ENOMEM;
> > > +		return NULL;
> > > +	}
> > > +
> > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > +
> > > +	cb->fn = cb_fn;
> > > +	cb->arg = cb_arg;
> > > +
> > > +	/* Add the callbacks in fifo order. */
> > > +	list = &dev->enq_cbs[qp_id];
> > > +	tail = list->next;
> > > +
> > > +	if (tail) {
> > > +		while (tail->next)
> > > +			tail = tail->next;
> > > +		/* Stores to cb->fn and cb->param should complete before
> > > +		 * cb is visible to data plane.
> > > +		 */
> > > +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> > > +	} else {
> > > +		/* Stores to cb->fn and cb->param should complete before
> > > +		 * cb is visible to data plane.
> > > +		 */
> > > +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> > > +	}
> > > +
> > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > +
> > > +	return cb;
> > > +}
> > > +
> > > +int
> > > +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > > +				  uint16_t qp_id,
> > > +				  struct rte_cryptodev_cb *cb)
> > > +{
> > > +	struct rte_cryptodev *dev;
> > > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > +	int ret;
> > > +
> > > +	ret = -EINVAL;
> > > +
> > > +	if (!cb) {
> > > +		CDEV_LOG_ERR("cb is NULL");
> > > +		return ret;
> > > +	}
> > > +
> > > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > > +		return ret;
> > > +	}
> > > +
> > > +	dev = &rte_crypto_devices[dev_id];
> > > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > > +		return ret;
> > > +	}
> > > +
> > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > +	if (dev->enq_cbs == NULL) {
> > > +		CDEV_LOG_ERR("Callback not initialized");
> > > +		goto cb_err;
> > > +	}
> > > +
> > > +	list = &dev->enq_cbs[qp_id];
> > > +	if (list == NULL) {
> > > +		CDEV_LOG_ERR("Callback list is NULL");
> > > +		goto cb_err;
> > > +	}
> > > +
> > > +	if (list->qsbr == NULL) {
> > > +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> > > +		goto cb_err;
> > > +	}
> > > +
> > > +	prev_cb = &list->next;
> > > +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> > > +		curr_cb = *prev_cb;
> > > +		if (curr_cb == cb) {
> > > +			/* Remove the user cb from the callback list. */
> > > +			__atomic_store_n(prev_cb, curr_cb->next,
> > > +				__ATOMIC_RELAXED);
> > > +			ret = 0;
> > > +			break;
> > > +		}
> > > +	}
> > > +
> > > +	if (!ret) {
> > > +		/* Call sync with invalid thread id as this is part of
> > > +		 * control plane API
> > > +		 */
> > > +		rte_rcu_qsbr_synchronize(list->qsbr,
> > RTE_QSBR_THRID_INVALID);
> > > +		rte_free(cb);
> > > +	}
> > > +
> > > +cb_err:
> > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > +	return ret;
> > > +}
> > > +#endif
> > >
> > >  int
> > >  rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats
> > > *stats) diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> > > b/lib/librte_cryptodev/rte_cryptodev.h
> > > index 0935fd5..1b7d7ef 100644
> > > --- a/lib/librte_cryptodev/rte_cryptodev.h
> > > +++ b/lib/librte_cryptodev/rte_cryptodev.h
> > > @@ -23,6 +23,7 @@
> > >  #include "rte_dev.h"
> > >  #include <rte_common.h>
> > >  #include <rte_config.h>
> > > +#include <rte_rcu_qsbr.h>
> > >
> > >  #include "rte_cryptodev_trace_fp.h"
> > >
> > > @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
> > >  	/**< The mempool for creating sess private data in sessionless mode
> > > */  };
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +/**
> > > + * Function type used for pre processing crypto ops when enqueue
> > > +burst is
> > > + * called.
> > > + *
> > > + * The callback function is called on enqueue burst immediately
> > > + * before the crypto ops are put onto the hardware queue for processing.
> > > + *
> > > + * @param	dev_id		The identifier of the device.
> > > + * @param	qp_id		The index of the queue pair in which ops are
> > > + *				to be enqueued for processing. The value
> > > + *				must be in the range [0, nb_queue_pairs - 1]
> > > + *				previously supplied to
> > > + *				*rte_cryptodev_configure*.
> > > + * @param	ops		The address of an array of *nb_ops* pointers
> > > + *				to *rte_crypto_op* structures which contain
> > > + *				the crypto operations to be processed.
> > > + * @param	nb_ops		The number of operations to process.
> > > + * @param	user_param	The arbitrary user parameter passed in by the
> > > + *				application when the callback was originally
> > > + *				registered.
> > > + * @return			The number of ops to be enqueued to the
> > > + *				crypto device.
> > > + */
> > > +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t
> > qp_id,
> > > +		struct rte_crypto_op **ops, uint16_t nb_ops, void
> > *user_param);
> > > +#endif
> > > +
> > >  /**
> > >   * Typedef for application callback function to be registered by application
> > >   * software for notification of device events @@ -822,7 +851,6 @@
> > > struct rte_cryptodev_config {
> > >  		enum rte_cryptodev_event_type event,
> > >  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> > >
> > > -
> > >  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
> > >  		struct rte_crypto_op **ops,	uint16_t nb_ops);
> > >  /**< Dequeue processed packets from queue pair of a device. */ @@
> > > -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
> > >  /** Structure to keep track of registered callbacks */
> > > TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +/**
> > > + * @internal
> > > + * Structure used to hold information about the callbacks to be
> > > +called for a
> > > + * queue pair on enqueue.
> > > + */
> > > +struct rte_cryptodev_cb {
> > > +	struct rte_cryptodev_cb *next;
> > > +	/** < Pointer to next callback */
> > > +	rte_cryptodev_callback_fn fn;
> > > +	/** < Pointer to callback function */
> > > +	void *arg;
> > > +	/** < Pointer to argument */
> > > +};
> > > +
> > > +/**
> > > + * @internal
> > > + * Structure used to hold information about the RCU for a queue pair.
> > > + */
> > > +struct rte_cryptodev_enq_cb_rcu {
> > > +	struct rte_cryptodev_cb *next;
> > > +	/** < Pointer to next callback */
> > > +	struct rte_rcu_qsbr *qsbr;
> > > +	/** < RCU QSBR variable per queue pair */ }; #endif
> > > +
> > >  /** The data structure associated with each crypto device. */  struct
> > > rte_cryptodev {
> > >  	dequeue_pkt_burst_t dequeue_burst;
> > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > >  	__extension__
> > >  	uint8_t attached : 1;
> > >  	/**< Flag indicating the device is attached */
> > > +
> > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > +	/**< User application callback for pre enqueue processing */
> > > +
> > >  } __rte_cache_aligned;
> > >
> > >  void *
> > > @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {  {
> > >  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +	if (unlikely(dev->enq_cbs != NULL)) {
> > > +		struct rte_cryptodev_enq_cb_rcu *list;
> > > +		struct rte_cryptodev_cb *cb;
> > > +
> > > +		/* __ATOMIC_RELEASE memory order was used when the
> > > +		* call back was inserted into the list.
> > > +		* Since there is a clear dependency between loading
> > > +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order
> > is
> > > +		* not required.
> > > +		*/
> > > +		list = &dev->enq_cbs[qp_id];
> > > +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> > > +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> > > +
> > > +		while (cb != NULL) {
> > > +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> > > +					cb->arg);
> > > +			cb = cb->next;
> > > +		};
> > > +
> > > +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> > > +	}
> > > +#endif
> > > +
> > >  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops,
> > nb_ops);
> > >  	return (*dev->enqueue_burst)(
> > >  			dev->data->queue_pairs[qp_id], ops, nb_ops); @@ -
> > 1730,6 +1814,78
> > > @@ struct rte_crypto_raw_dp_ctx {
> > > rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
> > >  		uint32_t n);
> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > + *
> > > + * Add a user callback for a given crypto device and queue pair which
> > > +will be
> > > + * called on crypto ops enqueue.
> > > + *
> > > + * This API configures a function to be called for each burst of
> > > +crypto ops
> > > + * received on a given crypto device queue pair. The return value is
> > > +a pointer
> > > + * that can be used later to remove the callback using
> > > + * rte_cryptodev_remove_enq_callback().
> > > + *
> > > + * Multiple functions are called in the order that they are added.
> > > + *
> > > + * @param	dev_id		The identifier of the device.
> > > + * @param	qp_id		The index of the queue pair in which ops are
> > > + *				to be enqueued for processing. The value
> > > + *				must be in the range [0, nb_queue_pairs - 1]
> > > + *				previously supplied to
> > > + *				*rte_cryptodev_configure*.
> > > + * @param	cb_fn		The callback function
> > > + * @param	cb_arg		A generic pointer parameter which will be
> > passed
> > > + *				to each invocation of the callback function on
> > > + *				this crypto device and queue pair.
> > > + *
> > > + * @return
> > > + *   NULL on error.
> > > + *   On success, a pointer value which can later be used to remove the
> > callback.
> > > + */
> > > +
> > > +__rte_experimental
> > > +struct rte_cryptodev_cb *
> > > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > > +			       uint16_t qp_id,
> > > +			       rte_cryptodev_callback_fn cb_fn,
> > > +			       void *cb_arg);
> > > +
> > > +
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > + *
> > > + * Remove a user callback function for given crypto device and queue pair.
> > > + *
> > > + * This function is used to removed callbacks that were added to a
> > > +crypto
> > > + * device queue pair using rte_cryptodev_add_enq_callback().
> > > + *
> > > + *
> > > + *
> > > + * @param	dev_id		The identifier of the device.
> > > + * @param	qp_id		The index of the queue pair in which ops are
> > > + *				to be enqueued for processing. The value
> > > + *				must be in the range [0, nb_queue_pairs - 1]
> > > + *				previously supplied to
> > > + *				*rte_cryptodev_configure*.
> > > + * @param	cb		Pointer to user supplied callback created via
> > > + *				rte_cryptodev_add_enq_callback().
> > > + *
> > > + * @return
> > > + *   - 0: Success. Callback was removed.
> > > + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> > > + *               is NULL or not found for the crypto device queue pair.
> > > + */
> > > +
> > > +__rte_experimental
> > > +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > > +				      uint16_t qp_id,
> > > +				      struct rte_cryptodev_cb *cb);
> > > +
> > > +#endif
> > > +
> > >  #ifdef __cplusplus
> > >  }
> > >  #endif
> > > diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map
> > > b/lib/librte_cryptodev/rte_cryptodev_version.map
> > > index 7e4360f..5d8d6b0 100644
> > > --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> > > +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> > > @@ -101,6 +101,7 @@ EXPERIMENTAL {
> > >  	rte_cryptodev_get_qp_status;
> > >
> > >  	# added in 20.11
> > > +	rte_cryptodev_add_enq_callback;
> > >  	rte_cryptodev_configure_raw_dp_ctx;
> > >  	rte_cryptodev_get_raw_dp_ctx_size;
> > >  	rte_cryptodev_raw_dequeue;
> > > @@ -109,4 +110,5 @@ EXPERIMENTAL {
> > >  	rte_cryptodev_raw_enqueue;
> > >  	rte_cryptodev_raw_enqueue_burst;
> > >  	rte_cryptodev_raw_enqueue_done;
> > > +	rte_cryptodev_remove_enq_callback;
> > >  };
> > > --
> > > 1.9.1
Gujjar, Abhinandan S Oct. 27, 2020, 5:22 p.m. UTC | #4
Sure Konstantin.

Thanks
Abhinandan

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Tuesday, October 27, 2020 10:51 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>; akhil.goyal@nxp.com;
> Honnappa.Nagarahalli@arm.com
> Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> 
> >
> > > -----Original Message-----
> > > From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> > > Sent: Tuesday, October 27, 2020 6:18 PM
> > > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>;
> > > dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>;
> > > akhil.goyal@nxp.com; Honnappa.Nagarahalli@arm.com
> > > Cc: Vangati, Narender <narender.vangati@intel.com>;
> > > jerinj@marvell.com
> > > Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> > >
> > >
> > > >
> > > > This patch adds APIs to add/remove callback functions. The
> > > > callback function will be called for each burst of crypto ops
> > > > received on a given crypto device queue pair.
> > > >
> > > > Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> > > > ---
> > > >  config/rte_config.h                            |   1 +
> > > >  lib/librte_cryptodev/meson.build               |   2 +-
> > > >  lib/librte_cryptodev/rte_cryptodev.c           | 230
> > > +++++++++++++++++++++++++
> > > >  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
> > > >  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
> > > >  5 files changed, 391 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/config/rte_config.h b/config/rte_config.h index
> > > > 03d90d7..e999d93 100644
> > > > --- a/config/rte_config.h
> > > > +++ b/config/rte_config.h
> > > > @@ -61,6 +61,7 @@
> > > >  /* cryptodev defines */
> > > >  #define RTE_CRYPTO_MAX_DEVS 64
> > > >  #define RTE_CRYPTODEV_NAME_LEN 64
> > > > +#define RTE_CRYPTO_CALLBACKS 1
> > > >
> > > >  /* compressdev defines */
> > > >  #define RTE_COMPRESS_MAX_DEVS 64
> > > > diff --git a/lib/librte_cryptodev/meson.build
> > > > b/lib/librte_cryptodev/meson.build
> > > > index c4c6b3b..8c5493f 100644
> > > > --- a/lib/librte_cryptodev/meson.build
> > > > +++ b/lib/librte_cryptodev/meson.build
> > > > @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
> > > >  	'rte_crypto.h',
> > > >  	'rte_crypto_sym.h',
> > > >  	'rte_crypto_asym.h')
> > > > -deps += ['kvargs', 'mbuf']
> > > > +deps += ['kvargs', 'mbuf', 'rcu']
> > > > diff --git a/lib/librte_cryptodev/rte_cryptodev.c
> > > > b/lib/librte_cryptodev/rte_cryptodev.c
> > > > index 3d95ac6..0880d9b 100644
> > > > --- a/lib/librte_cryptodev/rte_cryptodev.c
> > > > +++ b/lib/librte_cryptodev/rte_cryptodev.c
> > > > @@ -448,6 +448,91 @@ struct
> > > rte_cryptodev_sym_session_pool_private_data {
> > > >  	return 0;
> > > >  }
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +/* spinlock for crypto device enq callbacks */ static
> > > > +rte_spinlock_t rte_cryptodev_callback_lock =
> > > > +RTE_SPINLOCK_INITIALIZER;
> > > > +
> > > > +static void
> > > > +cryptodev_cb_cleanup(struct rte_cryptodev *dev) {
> > > > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > > +	uint16_t qp_id;
> > > > +
> > > > +	if (dev->enq_cbs == NULL)
> > > > +		return;
> > > > +
> > > > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > > > +		list = &dev->enq_cbs[qp_id];
> > > > +		prev_cb = &list->next;
> > > > +
> > > > +		while (*prev_cb != NULL) {
> > > > +			curr_cb = *prev_cb;
> > > > +			/* Remove the user cb from the callback list. */
> > > > +			__atomic_store_n(prev_cb, curr_cb->next,
> > > > +				__ATOMIC_RELAXED);
> > > > +			rte_rcu_qsbr_synchronize(list->qsbr,
> > > > +				RTE_QSBR_THRID_INVALID);
> > >
> > > You call this function (cb_cleanup) only at dev_confiture().
> > > At that moment DP threads can't do enqueue/dequeue anyway.
> > > So you can safely skip all this synchronization code here and just do:
> > >
> > > cb = list->next;
> > > while (cb != NULL) {
> > > 	next = cb->next;
> > > 	rte_free(cb);
> > > 	cb = next;
> > > }
> > >
> > Ok
> > >
> > > > +			rte_free(curr_cb);
> > >
> > > One thing that makes it sort of grey area:
> > > we do free() for cb itself, but user provided data will be sort of 'lost'.
> > > As it is not referenced from our cb struct anymore...
> > > I see two options here - first just document explicitly that
> > > callbacks wouldn't survive cryptodev_configure() and it is user
> > > responsibility to remove all installed callbacks before doing
> > > dev_configure() to avoid possible memory leakage.
> > Ok. I will update the documentation for this and send a new patch set.
> 
> Ok, please keep my ack on your new version.
> 
> > > Another option - add user provided cleanup() function pointer into
> > > struct rte_cryptodev_cb and call it here if provided:
> > > struct rte_cryptodev_cb {
> > > 	struct rte_cryptodev_cb *next;
> > > 	/** < Pointer to next callback */
> > > 	rte_cryptodev_callback_fn fn;
> > > 	/** < Pointer to callback function */
> > > 	void *arg;
> > > 	/** < Pointer to argument */
> > > 	void (*cleanup)(void *);
> > > };
> > >
> > > And here:
> > > 	If (curr_cb->cleanup != NULL)
> > > 		curr_cb->cleanup(curr_cb->arg);
> > >
> > >  	rte_free(curr_cb);
> > >
> > > Rest of the code - LGTM.
> > > So with that addressed:
> > > Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > >
> > > > +		}
> > > > +
> > > > +		rte_free(list->qsbr);
> > > > +	}
> > > > +
> > > > +	rte_free(dev->enq_cbs);
> > > > +	dev->enq_cbs = NULL;
> > > > +}
> > > > +
> > > > +static int
> > > > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > > +	struct rte_rcu_qsbr *qsbr;
> > > > +	uint16_t qp_id;
> > > > +	size_t size;
> > > > +
> > > > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > > > +	const uint32_t max_threads = 1;
> > > > +
> > > > +	dev->enq_cbs = rte_zmalloc(NULL,
> > > > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > > > +				   dev->data->nb_queue_pairs, 0);
> > > > +	if (dev->enq_cbs == NULL) {
> > > > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > > > +		rte_errno = ENOMEM;
> > > > +		return -1;
> > > > +	}
> > > > +
> > > > +	/* Create RCU QSBR variable */
> > > > +	size = rte_rcu_qsbr_get_memsize(max_threads);
> > > > +
> > > > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > > > +		list = &dev->enq_cbs[qp_id];
> > > > +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> > > > +		if (qsbr == NULL) {
> > > > +			CDEV_LOG_ERR("Failed to allocate memory for RCU
> > > on "
> > > > +				"queue_pair_id=%d", qp_id);
> > > > +			goto cb_init_err;
> > > > +		}
> > > > +
> > > > +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> > > > +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> > > > +				"queue_pair_id=%d", qp_id);
> > > > +			goto cb_init_err;
> > > > +		}
> > > > +
> > > > +		list->qsbr = qsbr;
> > > > +	}
> > > > +
> > > > +	return 0;
> > > > +
> > > > +cb_init_err:
> > > > +	rte_errno = ENOMEM;
> > > > +	cryptodev_cb_cleanup(dev);
> > > > +	return -1;
> > > > +
> > > > +}
> > > > +#endif
> > > >
> > > >  const char *
> > > >  rte_cryptodev_get_feature_name(uint64_t flag) @@ -927,6 +1012,11
> > > > @@ struct rte_cryptodev *
> > > >
> > > >  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> > > ENOTSUP);
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > > +	cryptodev_cb_cleanup(dev);
> > > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > > +#endif
> > > >  	/* Setup new number of queue pairs and reconfigure device. */
> > > >  	diag = rte_cryptodev_queue_pairs_config(dev, config-
> > > >nb_queue_pairs,
> > > >  			config->socket_id);
> > > > @@ -936,6 +1026,15 @@ struct rte_cryptodev *
> > > >  		return diag;
> > > >  	}
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > > +	diag = cryptodev_cb_init(dev);
> > > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > > +	if (diag) {
> > > > +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> > > > +		return -ENOMEM;
> > > > +	}
> > > > +#endif
> > > >  	rte_cryptodev_trace_configure(dev_id, config);
> > > >  	return (*dev->dev_ops->dev_configure)(dev, config);  } @@
> > > > -1136,6
> > > > +1235,137 @@ struct rte_cryptodev *
> > > >  			socket_id);
> > > >  }
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +struct rte_cryptodev_cb *
> > > > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > > > +			       uint16_t qp_id,
> > > > +			       rte_cryptodev_callback_fn cb_fn,
> > > > +			       void *cb_arg)
> > > > +{
> > > > +	struct rte_cryptodev *dev;
> > > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > > +	struct rte_cryptodev_cb *cb, *tail;
> > > > +
> > > > +	if (!cb_fn)
> > > > +		return NULL;
> > > > +
> > > > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > > > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > > > +		return NULL;
> > > > +	}
> > > > +
> > > > +	dev = &rte_crypto_devices[dev_id];
> > > > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > > > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > > > +		return NULL;
> > > > +	}
> > > > +
> > > > +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> > > > +	if (cb == NULL) {
> > > > +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> > > > +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> > > > +		rte_errno = ENOMEM;
> > > > +		return NULL;
> > > > +	}
> > > > +
> > > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > > +
> > > > +	cb->fn = cb_fn;
> > > > +	cb->arg = cb_arg;
> > > > +
> > > > +	/* Add the callbacks in fifo order. */
> > > > +	list = &dev->enq_cbs[qp_id];
> > > > +	tail = list->next;
> > > > +
> > > > +	if (tail) {
> > > > +		while (tail->next)
> > > > +			tail = tail->next;
> > > > +		/* Stores to cb->fn and cb->param should complete before
> > > > +		 * cb is visible to data plane.
> > > > +		 */
> > > > +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> > > > +	} else {
> > > > +		/* Stores to cb->fn and cb->param should complete before
> > > > +		 * cb is visible to data plane.
> > > > +		 */
> > > > +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> > > > +	}
> > > > +
> > > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > > +
> > > > +	return cb;
> > > > +}
> > > > +
> > > > +int
> > > > +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > > > +				  uint16_t qp_id,
> > > > +				  struct rte_cryptodev_cb *cb) {
> > > > +	struct rte_cryptodev *dev;
> > > > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > > +	int ret;
> > > > +
> > > > +	ret = -EINVAL;
> > > > +
> > > > +	if (!cb) {
> > > > +		CDEV_LOG_ERR("cb is NULL");
> > > > +		return ret;
> > > > +	}
> > > > +
> > > > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > > > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > > > +		return ret;
> > > > +	}
> > > > +
> > > > +	dev = &rte_crypto_devices[dev_id];
> > > > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > > > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > > > +		return ret;
> > > > +	}
> > > > +
> > > > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > > > +	if (dev->enq_cbs == NULL) {
> > > > +		CDEV_LOG_ERR("Callback not initialized");
> > > > +		goto cb_err;
> > > > +	}
> > > > +
> > > > +	list = &dev->enq_cbs[qp_id];
> > > > +	if (list == NULL) {
> > > > +		CDEV_LOG_ERR("Callback list is NULL");
> > > > +		goto cb_err;
> > > > +	}
> > > > +
> > > > +	if (list->qsbr == NULL) {
> > > > +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> > > > +		goto cb_err;
> > > > +	}
> > > > +
> > > > +	prev_cb = &list->next;
> > > > +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> > > > +		curr_cb = *prev_cb;
> > > > +		if (curr_cb == cb) {
> > > > +			/* Remove the user cb from the callback list. */
> > > > +			__atomic_store_n(prev_cb, curr_cb->next,
> > > > +				__ATOMIC_RELAXED);
> > > > +			ret = 0;
> > > > +			break;
> > > > +		}
> > > > +	}
> > > > +
> > > > +	if (!ret) {
> > > > +		/* Call sync with invalid thread id as this is part of
> > > > +		 * control plane API
> > > > +		 */
> > > > +		rte_rcu_qsbr_synchronize(list->qsbr,
> > > RTE_QSBR_THRID_INVALID);
> > > > +		rte_free(cb);
> > > > +	}
> > > > +
> > > > +cb_err:
> > > > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > > > +	return ret;
> > > > +}
> > > > +#endif
> > > >
> > > >  int
> > > >  rte_cryptodev_stats_get(uint8_t dev_id, struct
> > > > rte_cryptodev_stats
> > > > *stats) diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> > > > b/lib/librte_cryptodev/rte_cryptodev.h
> > > > index 0935fd5..1b7d7ef 100644
> > > > --- a/lib/librte_cryptodev/rte_cryptodev.h
> > > > +++ b/lib/librte_cryptodev/rte_cryptodev.h
> > > > @@ -23,6 +23,7 @@
> > > >  #include "rte_dev.h"
> > > >  #include <rte_common.h>
> > > >  #include <rte_config.h>
> > > > +#include <rte_rcu_qsbr.h>
> > > >
> > > >  #include "rte_cryptodev_trace_fp.h"
> > > >
> > > > @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
> > > >  	/**< The mempool for creating sess private data in sessionless
> > > > mode */  };
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +/**
> > > > + * Function type used for pre processing crypto ops when enqueue
> > > > +burst is
> > > > + * called.
> > > > + *
> > > > + * The callback function is called on enqueue burst immediately
> > > > + * before the crypto ops are put onto the hardware queue for processing.
> > > > + *
> > > > + * @param	dev_id		The identifier of the device.
> > > > + * @param	qp_id		The index of the queue pair in which
> ops are
> > > > + *				to be enqueued for processing. The
> value
> > > > + *				must be in the range [0,
> nb_queue_pairs - 1]
> > > > + *				previously supplied to
> > > > + *				*rte_cryptodev_configure*.
> > > > + * @param	ops		The address of an array of *nb_ops*
> pointers
> > > > + *				to *rte_crypto_op* structures which
> contain
> > > > + *				the crypto operations to be processed.
> > > > + * @param	nb_ops		The number of operations to process.
> > > > + * @param	user_param	The arbitrary user parameter passed
> in by the
> > > > + *				application when the callback was
> originally
> > > > + *				registered.
> > > > + * @return			The number of ops to be enqueued to
> the
> > > > + *				crypto device.
> > > > + */
> > > > +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id,
> > > > +uint16_t
> > > qp_id,
> > > > +		struct rte_crypto_op **ops, uint16_t nb_ops, void
> > > *user_param);
> > > > +#endif
> > > > +
> > > >  /**
> > > >   * Typedef for application callback function to be registered by
> application
> > > >   * software for notification of device events @@ -822,7 +851,6 @@
> > > > struct rte_cryptodev_config {
> > > >  		enum rte_cryptodev_event_type event,
> > > >  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> > > >
> > > > -
> > > >  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
> > > >  		struct rte_crypto_op **ops,	uint16_t nb_ops);
> > > >  /**< Dequeue processed packets from queue pair of a device. */ @@
> > > > -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void
> > > > *qp,
> > > >  /** Structure to keep track of registered callbacks */
> > > > TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +/**
> > > > + * @internal
> > > > + * Structure used to hold information about the callbacks to be
> > > > +called for a
> > > > + * queue pair on enqueue.
> > > > + */
> > > > +struct rte_cryptodev_cb {
> > > > +	struct rte_cryptodev_cb *next;
> > > > +	/** < Pointer to next callback */
> > > > +	rte_cryptodev_callback_fn fn;
> > > > +	/** < Pointer to callback function */
> > > > +	void *arg;
> > > > +	/** < Pointer to argument */
> > > > +};
> > > > +
> > > > +/**
> > > > + * @internal
> > > > + * Structure used to hold information about the RCU for a queue pair.
> > > > + */
> > > > +struct rte_cryptodev_enq_cb_rcu {
> > > > +	struct rte_cryptodev_cb *next;
> > > > +	/** < Pointer to next callback */
> > > > +	struct rte_rcu_qsbr *qsbr;
> > > > +	/** < RCU QSBR variable per queue pair */ }; #endif
> > > > +
> > > >  /** The data structure associated with each crypto device. */
> > > > struct rte_cryptodev {
> > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10 @@ struct
> > > > rte_cryptodev {
> > > >  	__extension__
> > > >  	uint8_t attached : 1;
> > > >  	/**< Flag indicating the device is attached */
> > > > +
> > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > +	/**< User application callback for pre enqueue processing */
> > > > +
> > > >  } __rte_cache_aligned;
> > > >
> > > >  void *
> > > > @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {  {
> > > >  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +	if (unlikely(dev->enq_cbs != NULL)) {
> > > > +		struct rte_cryptodev_enq_cb_rcu *list;
> > > > +		struct rte_cryptodev_cb *cb;
> > > > +
> > > > +		/* __ATOMIC_RELEASE memory order was used when the
> > > > +		* call back was inserted into the list.
> > > > +		* Since there is a clear dependency between loading
> > > > +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order
> > > is
> > > > +		* not required.
> > > > +		*/
> > > > +		list = &dev->enq_cbs[qp_id];
> > > > +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> > > > +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> > > > +
> > > > +		while (cb != NULL) {
> > > > +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> > > > +					cb->arg);
> > > > +			cb = cb->next;
> > > > +		};
> > > > +
> > > > +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> > > > +	}
> > > > +#endif
> > > > +
> > > >  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops,
> > > nb_ops);
> > > >  	return (*dev->enqueue_burst)(
> > > >  			dev->data->queue_pairs[qp_id], ops, nb_ops); @@ -
> > > 1730,6 +1814,78
> > > > @@ struct rte_crypto_raw_dp_ctx {
> > > > rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
> > > >  		uint32_t n);
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > > + *
> > > > + * Add a user callback for a given crypto device and queue pair
> > > > +which will be
> > > > + * called on crypto ops enqueue.
> > > > + *
> > > > + * This API configures a function to be called for each burst of
> > > > +crypto ops
> > > > + * received on a given crypto device queue pair. The return value
> > > > +is a pointer
> > > > + * that can be used later to remove the callback using
> > > > + * rte_cryptodev_remove_enq_callback().
> > > > + *
> > > > + * Multiple functions are called in the order that they are added.
> > > > + *
> > > > + * @param	dev_id		The identifier of the device.
> > > > + * @param	qp_id		The index of the queue pair in which
> ops are
> > > > + *				to be enqueued for processing. The
> value
> > > > + *				must be in the range [0,
> nb_queue_pairs - 1]
> > > > + *				previously supplied to
> > > > + *				*rte_cryptodev_configure*.
> > > > + * @param	cb_fn		The callback function
> > > > + * @param	cb_arg		A generic pointer parameter which
> will be
> > > passed
> > > > + *				to each invocation of the callback
> function on
> > > > + *				this crypto device and queue pair.
> > > > + *
> > > > + * @return
> > > > + *   NULL on error.
> > > > + *   On success, a pointer value which can later be used to remove the
> > > callback.
> > > > + */
> > > > +
> > > > +__rte_experimental
> > > > +struct rte_cryptodev_cb *
> > > > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > > > +			       uint16_t qp_id,
> > > > +			       rte_cryptodev_callback_fn cb_fn,
> > > > +			       void *cb_arg);
> > > > +
> > > > +
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > > + *
> > > > + * Remove a user callback function for given crypto device and queue
> pair.
> > > > + *
> > > > + * This function is used to removed callbacks that were added to
> > > > +a crypto
> > > > + * device queue pair using rte_cryptodev_add_enq_callback().
> > > > + *
> > > > + *
> > > > + *
> > > > + * @param	dev_id		The identifier of the device.
> > > > + * @param	qp_id		The index of the queue pair in which
> ops are
> > > > + *				to be enqueued for processing. The
> value
> > > > + *				must be in the range [0,
> nb_queue_pairs - 1]
> > > > + *				previously supplied to
> > > > + *				*rte_cryptodev_configure*.
> > > > + * @param	cb		Pointer to user supplied callback
> created via
> > > > + *				rte_cryptodev_add_enq_callback().
> > > > + *
> > > > + * @return
> > > > + *   - 0: Success. Callback was removed.
> > > > + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> > > > + *               is NULL or not found for the crypto device queue pair.
> > > > + */
> > > > +
> > > > +__rte_experimental
> > > > +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > > > +				      uint16_t qp_id,
> > > > +				      struct rte_cryptodev_cb *cb);
> > > > +
> > > > +#endif
> > > > +
> > > >  #ifdef __cplusplus
> > > >  }
> > > >  #endif
> > > > diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map
> > > > b/lib/librte_cryptodev/rte_cryptodev_version.map
> > > > index 7e4360f..5d8d6b0 100644
> > > > --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> > > > +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> > > > @@ -101,6 +101,7 @@ EXPERIMENTAL {
> > > >  	rte_cryptodev_get_qp_status;
> > > >
> > > >  	# added in 20.11
> > > > +	rte_cryptodev_add_enq_callback;
> > > >  	rte_cryptodev_configure_raw_dp_ctx;
> > > >  	rte_cryptodev_get_raw_dp_ctx_size;
> > > >  	rte_cryptodev_raw_dequeue;
> > > > @@ -109,4 +110,5 @@ EXPERIMENTAL {
> > > >  	rte_cryptodev_raw_enqueue;
> > > >  	rte_cryptodev_raw_enqueue_burst;
> > > >  	rte_cryptodev_raw_enqueue_done;
> > > > +	rte_cryptodev_remove_enq_callback;
> > > >  };
> > > > --
> > > > 1.9.1
Akhil Goyal Oct. 27, 2020, 6:19 p.m. UTC | #5
Hi Abhinandan,
> Subject: [v4 1/3] cryptodev: support enqueue callback functions
> 
> This patch adds APIs to add/remove callback functions. The callback
> function will be called for each burst of crypto ops received on a
> given crypto device queue pair.
> 
> Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> ---
>  config/rte_config.h                            |   1 +
>  lib/librte_cryptodev/meson.build               |   2 +-
>  lib/librte_cryptodev/rte_cryptodev.c           | 230 +++++++++++++++++++++++++
>  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
>  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
>  5 files changed, 391 insertions(+), 2 deletions(-)
> 
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 03d90d7..e999d93 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -61,6 +61,7 @@
>  /* cryptodev defines */
>  #define RTE_CRYPTO_MAX_DEVS 64
>  #define RTE_CRYPTODEV_NAME_LEN 64
> +#define RTE_CRYPTO_CALLBACKS 1
> 
>  /* compressdev defines */
>  #define RTE_COMPRESS_MAX_DEVS 64
> diff --git a/lib/librte_cryptodev/meson.build b/lib/librte_cryptodev/meson.build
> index c4c6b3b..8c5493f 100644
> --- a/lib/librte_cryptodev/meson.build
> +++ b/lib/librte_cryptodev/meson.build
> @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
>  	'rte_crypto.h',
>  	'rte_crypto_sym.h',
>  	'rte_crypto_asym.h')
> -deps += ['kvargs', 'mbuf']
> +deps += ['kvargs', 'mbuf', 'rcu']
> diff --git a/lib/librte_cryptodev/rte_cryptodev.c
> b/lib/librte_cryptodev/rte_cryptodev.c
> index 3d95ac6..0880d9b 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.c
> +++ b/lib/librte_cryptodev/rte_cryptodev.c
> @@ -448,6 +448,91 @@ struct rte_cryptodev_sym_session_pool_private_data
> {
>  	return 0;
>  }
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/* spinlock for crypto device enq callbacks */
> +static rte_spinlock_t rte_cryptodev_callback_lock =
> RTE_SPINLOCK_INITIALIZER;
> +
> +static void
> +cryptodev_cb_cleanup(struct rte_cryptodev *dev)
> +{
> +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	uint16_t qp_id;
> +
> +	if (dev->enq_cbs == NULL)
> +		return;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		list = &dev->enq_cbs[qp_id];
> +		prev_cb = &list->next;
> +
> +		while (*prev_cb != NULL) {
> +			curr_cb = *prev_cb;
> +			/* Remove the user cb from the callback list. */
> +			__atomic_store_n(prev_cb, curr_cb->next,
> +				__ATOMIC_RELAXED);
> +			rte_rcu_qsbr_synchronize(list->qsbr,
> +				RTE_QSBR_THRID_INVALID);
> +			rte_free(curr_cb);
> +		}
> +
> +		rte_free(list->qsbr);
> +	}
> +
> +	rte_free(dev->enq_cbs);
> +	dev->enq_cbs = NULL;
> +}
> +
> +static int
> +cryptodev_cb_init(struct rte_cryptodev *dev)
> +{
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	struct rte_rcu_qsbr *qsbr;
> +	uint16_t qp_id;
> +	size_t size;
> +
> +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> +	const uint32_t max_threads = 1;
> +
> +	dev->enq_cbs = rte_zmalloc(NULL,
> +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> +				   dev->data->nb_queue_pairs, 0);
> +	if (dev->enq_cbs == NULL) {
> +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> +		rte_errno = ENOMEM;
> +		return -1;
> +	}

Why not return ENOMEM here? You are not using rte_errno while returning
from this function, so setting it does not have any meaning.

> +
> +	/* Create RCU QSBR variable */
> +	size = rte_rcu_qsbr_get_memsize(max_threads);
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		list = &dev->enq_cbs[qp_id];
> +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> +		if (qsbr == NULL) {
> +			CDEV_LOG_ERR("Failed to allocate memory for RCU on
> "
> +				"queue_pair_id=%d", qp_id);
> +			goto cb_init_err;
> +		}
> +
> +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> +				"queue_pair_id=%d", qp_id);
> +			goto cb_init_err;
> +		}
> +
> +		list->qsbr = qsbr;
> +	}
> +
> +	return 0;
> +
> +cb_init_err:
> +	rte_errno = ENOMEM;
> +	cryptodev_cb_cleanup(dev);
> +	return -1;
Same here, return -ENOMEM

> +
Extra line

> +}
> +#endif
> 
>  const char *
>  rte_cryptodev_get_feature_name(uint64_t flag)
> @@ -927,6 +1012,11 @@ struct rte_cryptodev *
> 
>  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> ENOTSUP);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	cryptodev_cb_cleanup(dev);
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +#endif
>  	/* Setup new number of queue pairs and reconfigure device. */
>  	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
>  			config->socket_id);
> @@ -936,6 +1026,15 @@ struct rte_cryptodev *
>  		return diag;
>  	}
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	diag = cryptodev_cb_init(dev);
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +	if (diag) {
> +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> +		return -ENOMEM;
> +	}
> +#endif
>  	rte_cryptodev_trace_configure(dev_id, config);
>  	return (*dev->dev_ops->dev_configure)(dev, config);
>  }
> @@ -1136,6 +1235,137 @@ struct rte_cryptodev *
>  			socket_id);
>  }
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +struct rte_cryptodev_cb *
> +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> +			       uint16_t qp_id,
> +			       rte_cryptodev_callback_fn cb_fn,
> +			       void *cb_arg)
> +{
> +	struct rte_cryptodev *dev;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	struct rte_cryptodev_cb *cb, *tail;
> +
> +	if (!cb_fn)
> +		return NULL;
> +
> +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> +		return NULL;
> +	}
> +
> +	dev = &rte_crypto_devices[dev_id];
> +	if (qp_id >= dev->data->nb_queue_pairs) {
> +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> +		return NULL;
> +	}

Errno is not set before above three returns.

> +
> +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> +	if (cb == NULL) {
> +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> +		rte_errno = ENOMEM;
> +		return NULL;
> +	}
> +
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +
> +	cb->fn = cb_fn;
> +	cb->arg = cb_arg;
> +
> +	/* Add the callbacks in fifo order. */
> +	list = &dev->enq_cbs[qp_id];
> +	tail = list->next;
> +
> +	if (tail) {
> +		while (tail->next)
> +			tail = tail->next;
> +		/* Stores to cb->fn and cb->param should complete before
> +		 * cb is visible to data plane.
> +		 */
> +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> +	} else {
> +		/* Stores to cb->fn and cb->param should complete before
> +		 * cb is visible to data plane.
> +		 */
> +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> +	}
> +
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +
> +	return cb;
> +}
> +
> +int
> +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> +				  uint16_t qp_id,
> +				  struct rte_cryptodev_cb *cb)
> +{
> +	struct rte_cryptodev *dev;
> +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> +	struct rte_cryptodev_enq_cb_rcu *list;
> +	int ret;
> +
> +	ret = -EINVAL;
No need to set EINVAL here. You are returning same value everywhere.
The error numbers can be different at each exit.

> +
> +	if (!cb) {
> +		CDEV_LOG_ERR("cb is NULL");
> +		return ret;
You should directly return -EINVAL here and in below cases as well.

> +	}
> +
> +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> +		return ret;
Here return value should be -ENODEV


> +	}
> +
> +	dev = &rte_crypto_devices[dev_id];
> +	if (qp_id >= dev->data->nb_queue_pairs) {
> +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> +		return ret;
> +	}
> +
> +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> +	if (dev->enq_cbs == NULL) {
> +		CDEV_LOG_ERR("Callback not initialized");
> +		goto cb_err;
> +	}
> +
> +	list = &dev->enq_cbs[qp_id];
> +	if (list == NULL) {
> +		CDEV_LOG_ERR("Callback list is NULL");
> +		goto cb_err;
> +	}
> +
> +	if (list->qsbr == NULL) {
> +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> +		goto cb_err;
> +	}
> +
> +	prev_cb = &list->next;
> +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> +		curr_cb = *prev_cb;
> +		if (curr_cb == cb) {
> +			/* Remove the user cb from the callback list. */
> +			__atomic_store_n(prev_cb, curr_cb->next,
> +				__ATOMIC_RELAXED);
> +			ret = 0;
> +			break;
> +		}
> +	}
> +
> +	if (!ret) {
> +		/* Call sync with invalid thread id as this is part of
> +		 * control plane API
> +		 */
> +		rte_rcu_qsbr_synchronize(list->qsbr,
> RTE_QSBR_THRID_INVALID);
> +		rte_free(cb);
> +	}
> +
> +cb_err:
> +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> +	return ret;
> +}
> +#endif
> 
>  int
>  rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
> diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> b/lib/librte_cryptodev/rte_cryptodev.h
> index 0935fd5..1b7d7ef 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.h
> +++ b/lib/librte_cryptodev/rte_cryptodev.h
> @@ -23,6 +23,7 @@
>  #include "rte_dev.h"
>  #include <rte_common.h>
>  #include <rte_config.h>
> +#include <rte_rcu_qsbr.h>
> 
>  #include "rte_cryptodev_trace_fp.h"
> 
> @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
>  	/**< The mempool for creating sess private data in sessionless mode */
>  };
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * Function type used for pre processing crypto ops when enqueue burst is
> + * called.
> + *
> + * The callback function is called on enqueue burst immediately
> + * before the crypto ops are put onto the hardware queue for processing.
> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	ops		The address of an array of *nb_ops* pointers
> + *				to *rte_crypto_op* structures which contain
> + *				the crypto operations to be processed.
> + * @param	nb_ops		The number of operations to process.
> + * @param	user_param	The arbitrary user parameter passed in by the
> + *				application when the callback was originally
> + *				registered.
> + * @return			The number of ops to be enqueued to the
> + *				crypto device.
> + */
> +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
> +		struct rte_crypto_op **ops, uint16_t nb_ops, void
> *user_param);
> +#endif
> +
>  /**
>   * Typedef for application callback function to be registered by application
>   * software for notification of device events
> @@ -822,7 +851,6 @@ struct rte_cryptodev_config {
>  		enum rte_cryptodev_event_type event,
>  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> 
> -
>  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
>  		struct rte_crypto_op **ops,	uint16_t nb_ops);
>  /**< Dequeue processed packets from queue pair of a device. */
> @@ -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
>  /** Structure to keep track of registered callbacks */
>  TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * @internal
> + * Structure used to hold information about the callbacks to be called for a
> + * queue pair on enqueue.
> + */
> +struct rte_cryptodev_cb {
> +	struct rte_cryptodev_cb *next;
> +	/** < Pointer to next callback */
> +	rte_cryptodev_callback_fn fn;
> +	/** < Pointer to callback function */
> +	void *arg;
> +	/** < Pointer to argument */
> +};
> +
> +/**
> + * @internal
> + * Structure used to hold information about the RCU for a queue pair.
> + */
> +struct rte_cryptodev_enq_cb_rcu {
> +	struct rte_cryptodev_cb *next;
> +	/** < Pointer to next callback */
> +	struct rte_rcu_qsbr *qsbr;
> +	/** < RCU QSBR variable per queue pair */
> +};
> +#endif
> +
>  /** The data structure associated with each crypto device. */
>  struct rte_cryptodev {
>  	dequeue_pkt_burst_t dequeue_burst;
> @@ -867,6 +922,10 @@ struct rte_cryptodev {
>  	__extension__
>  	uint8_t attached : 1;
>  	/**< Flag indicating the device is attached */
> +
> +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> +	/**< User application callback for pre enqueue processing */
> +
Extra line

We should add support for post dequeue callbacks also. Since this is an LTS release
And we wont be very flexible in future quarterly release, we should do all the changes
In one go.
I believe we should also double check with techboard if this is an ABI breakage.
IMO, it is ABI breakage, rte_cryprodevs is part of stable APIs, but not sure.

>  } __rte_cache_aligned;
> 
>  void *
> @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {
>  {
>  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	if (unlikely(dev->enq_cbs != NULL)) {
> +		struct rte_cryptodev_enq_cb_rcu *list;
> +		struct rte_cryptodev_cb *cb;
> +
> +		/* __ATOMIC_RELEASE memory order was used when the
> +		* call back was inserted into the list.
> +		* Since there is a clear dependency between loading
> +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order
> is
> +		* not required.
> +		*/
> +		list = &dev->enq_cbs[qp_id];
> +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> +
> +		while (cb != NULL) {
> +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> +					cb->arg);
> +			cb = cb->next;
> +		};
> +
> +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> +	}
> +#endif
> +
>  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops,
> nb_ops);
>  	return (*dev->enqueue_burst)(
>  			dev->data->queue_pairs[qp_id], ops, nb_ops);
> @@ -1730,6 +1814,78 @@ struct rte_crypto_raw_dp_ctx {
>  rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
>  		uint32_t n);
> 
> +#ifdef RTE_CRYPTO_CALLBACKS
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Add a user callback for a given crypto device and queue pair which will be
> + * called on crypto ops enqueue.
> + *
> + * This API configures a function to be called for each burst of crypto ops
> + * received on a given crypto device queue pair. The return value is a pointer
> + * that can be used later to remove the callback using
> + * rte_cryptodev_remove_enq_callback().
> + *
> + * Multiple functions are called in the order that they are added.

Is there a limit for the number of cbs that can be added? Better to add a
Comment here.

> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	cb_fn		The callback function
> + * @param	cb_arg		A generic pointer parameter which will be
> passed
> + *				to each invocation of the callback function on
> + *				this crypto device and queue pair.
> + *
> + * @return
> + *   NULL on error.
> + *   On success, a pointer value which can later be used to remove the callback.
> + */
> +
> +__rte_experimental
> +struct rte_cryptodev_cb *
> +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> +			       uint16_t qp_id,
> +			       rte_cryptodev_callback_fn cb_fn,
> +			       void *cb_arg);
> +
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Remove a user callback function for given crypto device and queue pair.
> + *
> + * This function is used to removed callbacks that were added to a crypto
> + * device queue pair using rte_cryptodev_add_enq_callback().
> + *
> + *
> + *
> + * @param	dev_id		The identifier of the device.
> + * @param	qp_id		The index of the queue pair in which ops are
> + *				to be enqueued for processing. The value
> + *				must be in the range [0, nb_queue_pairs - 1]
> + *				previously supplied to
> + *				*rte_cryptodev_configure*.
> + * @param	cb		Pointer to user supplied callback created via
> + *				rte_cryptodev_add_enq_callback().
> + *
> + * @return
> + *   - 0: Success. Callback was removed.
> + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> + *               is NULL or not found for the crypto device queue pair.
> + */
> +
> +__rte_experimental
> +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> +				      uint16_t qp_id,
> +				      struct rte_cryptodev_cb *cb);
> +
> +#endif
> +
>  #ifdef __cplusplus
>  }
>  #endif
> diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map
> b/lib/librte_cryptodev/rte_cryptodev_version.map
> index 7e4360f..5d8d6b0 100644
> --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> @@ -101,6 +101,7 @@ EXPERIMENTAL {
>  	rte_cryptodev_get_qp_status;
> 
>  	# added in 20.11
> +	rte_cryptodev_add_enq_callback;
>  	rte_cryptodev_configure_raw_dp_ctx;
>  	rte_cryptodev_get_raw_dp_ctx_size;
>  	rte_cryptodev_raw_dequeue;
> @@ -109,4 +110,5 @@ EXPERIMENTAL {
>  	rte_cryptodev_raw_enqueue;
>  	rte_cryptodev_raw_enqueue_burst;
>  	rte_cryptodev_raw_enqueue_done;
> +	rte_cryptodev_remove_enq_callback;
>  };
> --
> 1.9.1
Akhil Goyal Oct. 27, 2020, 6:28 p.m. UTC | #6
Hi Tech board members,

I have a doubt about the ABI breakage in below addition of field.
Could you please comment.

>  /** The data structure associated with each crypto device. */
>  struct rte_cryptodev {
>  	dequeue_pkt_burst_t dequeue_burst;
> @@ -867,6 +922,10 @@ struct rte_cryptodev {
>  	__extension__
>  	uint8_t attached : 1;
>  	/**< Flag indicating the device is attached */
> +
> +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> +	/**< User application callback for pre enqueue processing */
> +
>  } __rte_cache_aligned;

Here rte_cryptodevs is defined in stable API list in map file which is a pointer
To all rte_cryptodev and the above change is changing the size of the structure.
IMO, it seems an ABI breakage, but not sure. So wanted to double check.
Now if it is an ABI breakage, then can we allow it? There was no deprecation notice
Prior to this release.

Also I think if we are allowing the above change, then we should also add another
Field for deq_cbs also for post crypto processing in this patch only.

Regards,
Akhil
Gujjar, Abhinandan S Oct. 27, 2020, 7:16 p.m. UTC | #7
> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Tuesday, October 27, 2020 11:49 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>;
> Honnappa.Nagarahalli@arm.com; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> 
> Hi Abhinandan,
> > Subject: [v4 1/3] cryptodev: support enqueue callback functions
> >
> > This patch adds APIs to add/remove callback functions. The callback
> > function will be called for each burst of crypto ops received on a
> > given crypto device queue pair.
> >
> > Signed-off-by: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
> > ---
> >  config/rte_config.h                            |   1 +
> >  lib/librte_cryptodev/meson.build               |   2 +-
> >  lib/librte_cryptodev/rte_cryptodev.c           | 230
> +++++++++++++++++++++++++
> >  lib/librte_cryptodev/rte_cryptodev.h           | 158 ++++++++++++++++-
> >  lib/librte_cryptodev/rte_cryptodev_version.map |   2 +
> >  5 files changed, 391 insertions(+), 2 deletions(-)
> >
> > diff --git a/config/rte_config.h b/config/rte_config.h index
> > 03d90d7..e999d93 100644
> > --- a/config/rte_config.h
> > +++ b/config/rte_config.h
> > @@ -61,6 +61,7 @@
> >  /* cryptodev defines */
> >  #define RTE_CRYPTO_MAX_DEVS 64
> >  #define RTE_CRYPTODEV_NAME_LEN 64
> > +#define RTE_CRYPTO_CALLBACKS 1
> >
> >  /* compressdev defines */
> >  #define RTE_COMPRESS_MAX_DEVS 64
> > diff --git a/lib/librte_cryptodev/meson.build
> > b/lib/librte_cryptodev/meson.build
> > index c4c6b3b..8c5493f 100644
> > --- a/lib/librte_cryptodev/meson.build
> > +++ b/lib/librte_cryptodev/meson.build
> > @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h',
> >  	'rte_crypto.h',
> >  	'rte_crypto_sym.h',
> >  	'rte_crypto_asym.h')
> > -deps += ['kvargs', 'mbuf']
> > +deps += ['kvargs', 'mbuf', 'rcu']
> > diff --git a/lib/librte_cryptodev/rte_cryptodev.c
> > b/lib/librte_cryptodev/rte_cryptodev.c
> > index 3d95ac6..0880d9b 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev.c
> > +++ b/lib/librte_cryptodev/rte_cryptodev.c
> > @@ -448,6 +448,91 @@ struct
> > rte_cryptodev_sym_session_pool_private_data
> > {
> >  	return 0;
> >  }
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/* spinlock for crypto device enq callbacks */ static rte_spinlock_t
> > +rte_cryptodev_callback_lock =
> > RTE_SPINLOCK_INITIALIZER;
> > +
> > +static void
> > +cryptodev_cb_cleanup(struct rte_cryptodev *dev) {
> > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	uint16_t qp_id;
> > +
> > +	if (dev->enq_cbs == NULL)
> > +		return;
> > +
> > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > +		list = &dev->enq_cbs[qp_id];
> > +		prev_cb = &list->next;
> > +
> > +		while (*prev_cb != NULL) {
> > +			curr_cb = *prev_cb;
> > +			/* Remove the user cb from the callback list. */
> > +			__atomic_store_n(prev_cb, curr_cb->next,
> > +				__ATOMIC_RELAXED);
> > +			rte_rcu_qsbr_synchronize(list->qsbr,
> > +				RTE_QSBR_THRID_INVALID);
> > +			rte_free(curr_cb);
> > +		}
> > +
> > +		rte_free(list->qsbr);
> > +	}
> > +
> > +	rte_free(dev->enq_cbs);
> > +	dev->enq_cbs = NULL;
> > +}
> > +
> > +static int
> > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	struct rte_rcu_qsbr *qsbr;
> > +	uint16_t qp_id;
> > +	size_t size;
> > +
> > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > +	const uint32_t max_threads = 1;
> > +
> > +	dev->enq_cbs = rte_zmalloc(NULL,
> > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > +				   dev->data->nb_queue_pairs, 0);
> > +	if (dev->enq_cbs == NULL) {
> > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > +		rte_errno = ENOMEM;
> > +		return -1;
> > +	}
> 
> Why not return ENOMEM here? You are not using rte_errno while returning
> from this function, so setting it does not have any meaning.
This is a internal function. The caller is returning ENOMEM.
> 
> > +
> > +	/* Create RCU QSBR variable */
> > +	size = rte_rcu_qsbr_get_memsize(max_threads);
> > +
> > +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> > +		list = &dev->enq_cbs[qp_id];
> > +		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> > +		if (qsbr == NULL) {
> > +			CDEV_LOG_ERR("Failed to allocate memory for RCU
> on
> > "
> > +				"queue_pair_id=%d", qp_id);
> > +			goto cb_init_err;
> > +		}
> > +
> > +		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
> > +			CDEV_LOG_ERR("Failed to initialize for RCU on "
> > +				"queue_pair_id=%d", qp_id);
> > +			goto cb_init_err;
> > +		}
> > +
> > +		list->qsbr = qsbr;
> > +	}
> > +
> > +	return 0;
> > +
> > +cb_init_err:
> > +	rte_errno = ENOMEM;
> > +	cryptodev_cb_cleanup(dev);
> > +	return -1;
> Same here, return -ENOMEM
Same as above
> 
> > +
> Extra line
ok
> 
> > +}
> > +#endif
> >
> >  const char *
> >  rte_cryptodev_get_feature_name(uint64_t flag) @@ -927,6 +1012,11 @@
> > struct rte_cryptodev *
> >
> >  	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> ENOTSUP);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	cryptodev_cb_cleanup(dev);
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +#endif
> >  	/* Setup new number of queue pairs and reconfigure device. */
> >  	diag = rte_cryptodev_queue_pairs_config(dev, config-
> >nb_queue_pairs,
> >  			config->socket_id);
> > @@ -936,6 +1026,15 @@ struct rte_cryptodev *
> >  		return diag;
> >  	}
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	diag = cryptodev_cb_init(dev);
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +	if (diag) {
> > +		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
> > +		return -ENOMEM;
> > +	}
> > +#endif
> >  	rte_cryptodev_trace_configure(dev_id, config);
> >  	return (*dev->dev_ops->dev_configure)(dev, config);  } @@ -1136,6
> > +1235,137 @@ struct rte_cryptodev *
> >  			socket_id);
> >  }
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +struct rte_cryptodev_cb *
> > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > +			       uint16_t qp_id,
> > +			       rte_cryptodev_callback_fn cb_fn,
> > +			       void *cb_arg)
> > +{
> > +	struct rte_cryptodev *dev;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	struct rte_cryptodev_cb *cb, *tail;
> > +
> > +	if (!cb_fn)
> > +		return NULL;
> > +
> > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > +		return NULL;
> > +	}
> > +
> > +	dev = &rte_crypto_devices[dev_id];
> > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > +		return NULL;
> > +	}
> 
> Errno is not set before above three returns.
I will update it in the next version of the patch.
> 
> > +
> > +	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
> > +	if (cb == NULL) {
> > +		CDEV_LOG_ERR("Failed to allocate memory for callback on "
> > +			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
> > +		rte_errno = ENOMEM;
> > +		return NULL;
> > +	}
> > +
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +
> > +	cb->fn = cb_fn;
> > +	cb->arg = cb_arg;
> > +
> > +	/* Add the callbacks in fifo order. */
> > +	list = &dev->enq_cbs[qp_id];
> > +	tail = list->next;
> > +
> > +	if (tail) {
> > +		while (tail->next)
> > +			tail = tail->next;
> > +		/* Stores to cb->fn and cb->param should complete before
> > +		 * cb is visible to data plane.
> > +		 */
> > +		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
> > +	} else {
> > +		/* Stores to cb->fn and cb->param should complete before
> > +		 * cb is visible to data plane.
> > +		 */
> > +		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
> > +	}
> > +
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +
> > +	return cb;
> > +}
> > +
> > +int
> > +rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > +				  uint16_t qp_id,
> > +				  struct rte_cryptodev_cb *cb)
> > +{
> > +	struct rte_cryptodev *dev;
> > +	struct rte_cryptodev_cb **prev_cb, *curr_cb;
> > +	struct rte_cryptodev_enq_cb_rcu *list;
> > +	int ret;
> > +
> > +	ret = -EINVAL;
> No need to set EINVAL here. You are returning same value everywhere.
> The error numbers can be different at each exit.
Sure. I will take care returning different error numbers.
The initialized is required because of below during just before calling
Rcu sync. 
> 
> > +
> > +	if (!cb) {
> > +		CDEV_LOG_ERR("cb is NULL");
> > +		return ret;
> You should directly return -EINVAL here and in below cases as well.
> 
> > +	}
> > +
> > +	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
> > +		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
> > +		return ret;
> Here return value should be -ENODEV
> 
> 
> > +	}
> > +
> > +	dev = &rte_crypto_devices[dev_id];
> > +	if (qp_id >= dev->data->nb_queue_pairs) {
> > +		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
> > +		return ret;
> > +	}
> > +
> > +	rte_spinlock_lock(&rte_cryptodev_callback_lock);
> > +	if (dev->enq_cbs == NULL) {
> > +		CDEV_LOG_ERR("Callback not initialized");
> > +		goto cb_err;
> > +	}
> > +
> > +	list = &dev->enq_cbs[qp_id];
> > +	if (list == NULL) {
> > +		CDEV_LOG_ERR("Callback list is NULL");
> > +		goto cb_err;
> > +	}
> > +
> > +	if (list->qsbr == NULL) {
> > +		CDEV_LOG_ERR("Rcu qsbr is NULL");
> > +		goto cb_err;
> > +	}
> > +
> > +	prev_cb = &list->next;
> > +	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
> > +		curr_cb = *prev_cb;
> > +		if (curr_cb == cb) {
> > +			/* Remove the user cb from the callback list. */
> > +			__atomic_store_n(prev_cb, curr_cb->next,
> > +				__ATOMIC_RELAXED);
> > +			ret = 0;
> > +			break;
> > +		}
> > +	}
> > +
> > +	if (!ret) {
> > +		/* Call sync with invalid thread id as this is part of
> > +		 * control plane API
> > +		 */
> > +		rte_rcu_qsbr_synchronize(list->qsbr,
> > RTE_QSBR_THRID_INVALID);
> > +		rte_free(cb);
> > +	}
> > +
> > +cb_err:
> > +	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
> > +	return ret;
> > +}
> > +#endif
> >
> >  int
> >  rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats
> > *stats) diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> > b/lib/librte_cryptodev/rte_cryptodev.h
> > index 0935fd5..1b7d7ef 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev.h
> > +++ b/lib/librte_cryptodev/rte_cryptodev.h
> > @@ -23,6 +23,7 @@
> >  #include "rte_dev.h"
> >  #include <rte_common.h>
> >  #include <rte_config.h>
> > +#include <rte_rcu_qsbr.h>
> >
> >  #include "rte_cryptodev_trace_fp.h"
> >
> > @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf {
> >  	/**< The mempool for creating sess private data in sessionless mode
> > */  };
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * Function type used for pre processing crypto ops when enqueue
> > +burst is
> > + * called.
> > + *
> > + * The callback function is called on enqueue burst immediately
> > + * before the crypto ops are put onto the hardware queue for processing.
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	ops		The address of an array of *nb_ops* pointers
> > + *				to *rte_crypto_op* structures which contain
> > + *				the crypto operations to be processed.
> > + * @param	nb_ops		The number of operations to process.
> > + * @param	user_param	The arbitrary user parameter passed in by the
> > + *				application when the callback was originally
> > + *				registered.
> > + * @return			The number of ops to be enqueued to the
> > + *				crypto device.
> > + */
> > +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t
> qp_id,
> > +		struct rte_crypto_op **ops, uint16_t nb_ops, void
> > *user_param);
> > +#endif
> > +
> >  /**
> >   * Typedef for application callback function to be registered by application
> >   * software for notification of device events @@ -822,7 +851,6 @@
> > struct rte_cryptodev_config {
> >  		enum rte_cryptodev_event_type event,
> >  		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
> >
> > -
> >  typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
> >  		struct rte_crypto_op **ops,	uint16_t nb_ops);
> >  /**< Dequeue processed packets from queue pair of a device. */ @@
> > -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
> >  /** Structure to keep track of registered callbacks */
> > TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * @internal
> > + * Structure used to hold information about the callbacks to be
> > +called for a
> > + * queue pair on enqueue.
> > + */
> > +struct rte_cryptodev_cb {
> > +	struct rte_cryptodev_cb *next;
> > +	/** < Pointer to next callback */
> > +	rte_cryptodev_callback_fn fn;
> > +	/** < Pointer to callback function */
> > +	void *arg;
> > +	/** < Pointer to argument */
> > +};
> > +
> > +/**
> > + * @internal
> > + * Structure used to hold information about the RCU for a queue pair.
> > + */
> > +struct rte_cryptodev_enq_cb_rcu {
> > +	struct rte_cryptodev_cb *next;
> > +	/** < Pointer to next callback */
> > +	struct rte_rcu_qsbr *qsbr;
> > +	/** < RCU QSBR variable per queue pair */ }; #endif
> > +
> >  /** The data structure associated with each crypto device. */  struct
> > rte_cryptodev {
> >  	dequeue_pkt_burst_t dequeue_burst;
> > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> >  	__extension__
> >  	uint8_t attached : 1;
> >  	/**< Flag indicating the device is attached */
> > +
> > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > +	/**< User application callback for pre enqueue processing */
> > +
> Extra line
ok
> 
> We should add support for post dequeue callbacks also. Since this is an LTS
> release And we wont be very flexible in future quarterly release, we should do
> all the changes In one go.
This patch set is driven by requirement. Recently, we have a requirement to have
callback for dequeue as well. Looking at code freeze date, I am not sure we can
target that as well. Let this patch go in and I will send a separate patch for
dequeue callback.

> I believe we should also double check with techboard if this is an ABI breakage.
> IMO, it is ABI breakage, rte_cryprodevs is part of stable APIs, but not sure.
> 
> >  } __rte_cache_aligned;
> >
> >  void *
> > @@ -989,6 +1048,31 @@ struct rte_cryptodev_data {  {
> >  	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	if (unlikely(dev->enq_cbs != NULL)) {
> > +		struct rte_cryptodev_enq_cb_rcu *list;
> > +		struct rte_cryptodev_cb *cb;
> > +
> > +		/* __ATOMIC_RELEASE memory order was used when the
> > +		* call back was inserted into the list.
> > +		* Since there is a clear dependency between loading
> > +		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order
> > is
> > +		* not required.
> > +		*/
> > +		list = &dev->enq_cbs[qp_id];
> > +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> > +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> > +
> > +		while (cb != NULL) {
> > +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> > +					cb->arg);
> > +			cb = cb->next;
> > +		};
> > +
> > +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> > +	}
> > +#endif
> > +
> >  	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops,
> > nb_ops);
> >  	return (*dev->enqueue_burst)(
> >  			dev->data->queue_pairs[qp_id], ops, nb_ops); @@ -
> 1730,6 +1814,78
> > @@ struct rte_crypto_raw_dp_ctx {
> > rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
> >  		uint32_t n);
> >
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * Add a user callback for a given crypto device and queue pair which
> > +will be
> > + * called on crypto ops enqueue.
> > + *
> > + * This API configures a function to be called for each burst of
> > +crypto ops
> > + * received on a given crypto device queue pair. The return value is
> > +a pointer
> > + * that can be used later to remove the callback using
> > + * rte_cryptodev_remove_enq_callback().
> > + *
> > + * Multiple functions are called in the order that they are added.
> 
> Is there a limit for the number of cbs that can be added? Better to add a
> Comment here.
> 
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	cb_fn		The callback function
> > + * @param	cb_arg		A generic pointer parameter which will be
> > passed
> > + *				to each invocation of the callback function on
> > + *				this crypto device and queue pair.
> > + *
> > + * @return
> > + *   NULL on error.
> > + *   On success, a pointer value which can later be used to remove the
> callback.
> > + */
> > +
> > +__rte_experimental
> > +struct rte_cryptodev_cb *
> > +rte_cryptodev_add_enq_callback(uint8_t dev_id,
> > +			       uint16_t qp_id,
> > +			       rte_cryptodev_callback_fn cb_fn,
> > +			       void *cb_arg);
> > +
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * Remove a user callback function for given crypto device and queue pair.
> > + *
> > + * This function is used to removed callbacks that were added to a
> > +crypto
> > + * device queue pair using rte_cryptodev_add_enq_callback().
> > + *
> > + *
> > + *
> > + * @param	dev_id		The identifier of the device.
> > + * @param	qp_id		The index of the queue pair in which ops are
> > + *				to be enqueued for processing. The value
> > + *				must be in the range [0, nb_queue_pairs - 1]
> > + *				previously supplied to
> > + *				*rte_cryptodev_configure*.
> > + * @param	cb		Pointer to user supplied callback created via
> > + *				rte_cryptodev_add_enq_callback().
> > + *
> > + * @return
> > + *   - 0: Success. Callback was removed.
> > + *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
> > + *               is NULL or not found for the crypto device queue pair.
> > + */
> > +
> > +__rte_experimental
> > +int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
> > +				      uint16_t qp_id,
> > +				      struct rte_cryptodev_cb *cb);
> > +
> > +#endif
> > +
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map
> > b/lib/librte_cryptodev/rte_cryptodev_version.map
> > index 7e4360f..5d8d6b0 100644
> > --- a/lib/librte_cryptodev/rte_cryptodev_version.map
> > +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
> > @@ -101,6 +101,7 @@ EXPERIMENTAL {
> >  	rte_cryptodev_get_qp_status;
> >
> >  	# added in 20.11
> > +	rte_cryptodev_add_enq_callback;
> >  	rte_cryptodev_configure_raw_dp_ctx;
> >  	rte_cryptodev_get_raw_dp_ctx_size;
> >  	rte_cryptodev_raw_dequeue;
> > @@ -109,4 +110,5 @@ EXPERIMENTAL {
> >  	rte_cryptodev_raw_enqueue;
> >  	rte_cryptodev_raw_enqueue_burst;
> >  	rte_cryptodev_raw_enqueue_done;
> > +	rte_cryptodev_remove_enq_callback;
> >  };
> > --
> > 1.9.1
Akhil Goyal Oct. 27, 2020, 7:26 p.m. UTC | #8
Hi Abhinandan,

> > > +static int
> > > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > +	struct rte_rcu_qsbr *qsbr;
> > > +	uint16_t qp_id;
> > > +	size_t size;
> > > +
> > > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > > +	const uint32_t max_threads = 1;
> > > +
> > > +	dev->enq_cbs = rte_zmalloc(NULL,
> > > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > > +				   dev->data->nb_queue_pairs, 0);
> > > +	if (dev->enq_cbs == NULL) {
> > > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > > +		rte_errno = ENOMEM;
> > > +		return -1;
> > > +	}
> >
> > Why not return ENOMEM here? You are not using rte_errno while returning
> > from this function, so setting it does not have any meaning.
> This is a internal function. The caller is returning ENOMEM.

The caller can return the returned value from cryptodev_cb_init, instead of explicitly
Returning ENOMEM.
There is no point of setting rte_errno here.


> > >  /** The data structure associated with each crypto device. */  struct
> > > rte_cryptodev {
> > >  	dequeue_pkt_burst_t dequeue_burst;
> > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > >  	__extension__
> > >  	uint8_t attached : 1;
> > >  	/**< Flag indicating the device is attached */
> > > +
> > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > +	/**< User application callback for pre enqueue processing */
> > > +
> > Extra line
> ok
> >
> > We should add support for post dequeue callbacks also. Since this is an LTS
> > release And we wont be very flexible in future quarterly release, we should do
> > all the changes In one go.
> This patch set is driven by requirement. Recently, we have a requirement to have
> callback for dequeue as well. Looking at code freeze date, I am not sure we can
> target that as well. Let this patch go in and I will send a separate patch for
> dequeue callback.
> 

We may not be able to change the rte_cryptodev structure so frequently.
It may be allowed to change it 21.11 release. Which is too far.
I think atleast the cryptodev changes can go in RC2 and test app for deq cbs
Can go in RC3 if not RC2.

> > I believe we should also double check with techboard if this is an ABI breakage.
> > IMO, it is ABI breakage, rte_cryprodevs is part of stable APIs, but not sure.
> >
> > >  } __rte_cache_aligned;
> > >



> > >
> > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > + *
> > > + * Add a user callback for a given crypto device and queue pair which
> > > +will be
> > > + * called on crypto ops enqueue.
> > > + *
> > > + * This API configures a function to be called for each burst of
> > > +crypto ops
> > > + * received on a given crypto device queue pair. The return value is
> > > +a pointer
> > > + * that can be used later to remove the callback using
> > > + * rte_cryptodev_remove_enq_callback().
> > > + *
> > > + * Multiple functions are called in the order that they are added.
> >
> > Is there a limit for the number of cbs that can be added? Better to add a
> > Comment here.

I think you missed this comment.
Gujjar, Abhinandan S Oct. 27, 2020, 7:41 p.m. UTC | #9
Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Wednesday, October 28, 2020 12:56 AM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>;
> Honnappa.Nagarahalli@arm.com; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> 
> Hi Abhinandan,
> 
> > > > +static int
> > > > +cryptodev_cb_init(struct rte_cryptodev *dev) {
> > > > +	struct rte_cryptodev_enq_cb_rcu *list;
> > > > +	struct rte_rcu_qsbr *qsbr;
> > > > +	uint16_t qp_id;
> > > > +	size_t size;
> > > > +
> > > > +	/* Max thread set to 1, as one DP thread accessing a queue-pair */
> > > > +	const uint32_t max_threads = 1;
> > > > +
> > > > +	dev->enq_cbs = rte_zmalloc(NULL,
> > > > +				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
> > > > +				   dev->data->nb_queue_pairs, 0);
> > > > +	if (dev->enq_cbs == NULL) {
> > > > +		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
> > > > +		rte_errno = ENOMEM;
> > > > +		return -1;
> > > > +	}
> > >
> > > Why not return ENOMEM here? You are not using rte_errno while
> > > returning from this function, so setting it does not have any meaning.
> > This is a internal function. The caller is returning ENOMEM.
> 
> The caller can return the returned value from cryptodev_cb_init, instead of
> explicitly Returning ENOMEM.
> There is no point of setting rte_errno here.
Ok. I will update the patch.
> 
> 
> > > >  /** The data structure associated with each crypto device. */
> > > > struct rte_cryptodev {
> > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10 @@ struct
> > > > rte_cryptodev {
> > > >  	__extension__
> > > >  	uint8_t attached : 1;
> > > >  	/**< Flag indicating the device is attached */
> > > > +
> > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > +	/**< User application callback for pre enqueue processing */
> > > > +
> > > Extra line
> > ok
> > >
> > > We should add support for post dequeue callbacks also. Since this is
> > > an LTS release And we wont be very flexible in future quarterly
> > > release, we should do all the changes In one go.
> > This patch set is driven by requirement. Recently, we have a
> > requirement to have callback for dequeue as well. Looking at code
> > freeze date, I am not sure we can target that as well. Let this patch
> > go in and I will send a separate patch for dequeue callback.
> >
> 
> We may not be able to change the rte_cryptodev structure so frequently.
> It may be allowed to change it 21.11 release. Which is too far.
> I think atleast the cryptodev changes can go in RC2 and test app for deq cbs
> Can go in RC3 if not RC2.
" cryptodev changes " -> Is it rte_cryptodev structure changes alone or supporting
dequeue callback as well in RC2? And then have test app changes in RC3?
If it is about adding dequeue callback support in RC2, I will try.
If it does not work, hope we can still the get the enqueue callback + rte_cryptodev structure
changes to support dequeue callbacks in the next patch set.
> 
> > > I believe we should also double check with techboard if this is an ABI
> breakage.
> > > IMO, it is ABI breakage, rte_cryprodevs is part of stable APIs, but not sure.
> > >
> > > >  } __rte_cache_aligned;
> > > >
> 
> 
> 
> > > >
> > > > +#ifdef RTE_CRYPTO_CALLBACKS
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice
> > > > + *
> > > > + * Add a user callback for a given crypto device and queue pair
> > > > +which will be
> > > > + * called on crypto ops enqueue.
> > > > + *
> > > > + * This API configures a function to be called for each burst of
> > > > +crypto ops
> > > > + * received on a given crypto device queue pair. The return value
> > > > +is a pointer
> > > > + * that can be used later to remove the callback using
> > > > + * rte_cryptodev_remove_enq_callback().
> > > > + *
> > > > + * Multiple functions are called in the order that they are added.
> > >
> > > Is there a limit for the number of cbs that can be added? Better to
> > > add a Comment here.
> 
> I think you missed this comment.
There is not limitation as of now. I will add a comment on the same.
>
Gujjar, Abhinandan S Oct. 28, 2020, 8:20 a.m. UTC | #10
Hi Tech board members,

Could you please clarify the concern?
The latest patch (https://patches.dpdk.org/patch/82536/) supports both enqueue and dequeue callback functionality.

Thanks
Abhinandan

> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Tuesday, October 27, 2020 11:59 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>;
> Honnappa.Nagarahalli@arm.com; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; techboard@dpdk.org
> Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> 
> Hi Tech board members,
> 
> I have a doubt about the ABI breakage in below addition of field.
> Could you please comment.
> 
> >  /** The data structure associated with each crypto device. */  struct
> > rte_cryptodev {
> >  	dequeue_pkt_burst_t dequeue_burst;
> > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> >  	__extension__
> >  	uint8_t attached : 1;
> >  	/**< Flag indicating the device is attached */
> > +
> > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > +	/**< User application callback for pre enqueue processing */
> > +
> >  } __rte_cache_aligned;
> 
> Here rte_cryptodevs is defined in stable API list in map file which is a pointer
> To all rte_cryptodev and the above change is changing the size of the structure.
> IMO, it seems an ABI breakage, but not sure. So wanted to double check.
> Now if it is an ABI breakage, then can we allow it? There was no deprecation
> notice Prior to this release.
> 
> Also I think if we are allowing the above change, then we should also add
> another Field for deq_cbs also for post crypto processing in this patch only.
> 
> Regards,
> Akhil
Ananyev, Konstantin Oct. 28, 2020, 12:55 p.m. UTC | #11
> 
> Hi Tech board members,
> 
> Could you please clarify the concern?
> The latest patch (https://patches.dpdk.org/patch/82536/) supports both enqueue and dequeue callback functionality.
> 
> Thanks
> Abhinandan
> 
> > -----Original Message-----
> > From: Akhil Goyal <akhil.goyal@nxp.com>
> > Sent: Tuesday, October 27, 2020 11:59 PM
> > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; dev@dpdk.org;
> > Doherty, Declan <declan.doherty@intel.com>;
> > Honnappa.Nagarahalli@arm.com; Ananyev, Konstantin
> > <konstantin.ananyev@intel.com>; techboard@dpdk.org
> > Cc: Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com
> > Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions
> >
> > Hi Tech board members,
> >
> > I have a doubt about the ABI breakage in below addition of field.
> > Could you please comment.
> >
> > >  /** The data structure associated with each crypto device. */  struct
> > > rte_cryptodev {
> > >  	dequeue_pkt_burst_t dequeue_burst;
> > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > >  	__extension__
> > >  	uint8_t attached : 1;
> > >  	/**< Flag indicating the device is attached */
> > > +
> > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > +	/**< User application callback for pre enqueue processing */
> > > +
> > >  } __rte_cache_aligned;
> >
> > Here rte_cryptodevs is defined in stable API list in map file which is a pointer
> > To all rte_cryptodev and the above change is changing the size of the structure.

While this patch adds new fields into rte_cryptodev structure,
it doesn't change the size of it.
struct rte_cryptodev is cache line aligned, so it's current size:
128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
So for 64-bit we have 47B implicitly reserved, and for 32-bit we have 19B reserved.
That's enough to add two pointers without changing size of this struct. 

> > IMO, it seems an ABI breakage, but not sure. So wanted to double check.
> > Now if it is an ABI breakage, then can we allow it? There was no deprecation
> > notice Prior to this release.

Yes, there was no deprecation note in advance.
Though I think the risk is minimal - size of the struct will remain unchanged (see above).
My vote to let it in for 20.11.

> > Also I think if we are allowing the above change, then we should also add
> > another Field for deq_cbs also for post crypto processing in this patch only.

+1 for this.
I think it was already addressed in v5.

Konstantin
Akhil Goyal Oct. 28, 2020, 2:28 p.m. UTC | #12
Hi Konstantin,

> > > Hi Tech board members,
> > >
> > > I have a doubt about the ABI breakage in below addition of field.
> > > Could you please comment.
> > >
> > > >  /** The data structure associated with each crypto device. */  struct
> > > > rte_cryptodev {
> > > >  	dequeue_pkt_burst_t dequeue_burst;
> > > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > > >  	__extension__
> > > >  	uint8_t attached : 1;
> > > >  	/**< Flag indicating the device is attached */
> > > > +
> > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > +	/**< User application callback for pre enqueue processing */
> > > > +
> > > >  } __rte_cache_aligned;
> > >
> > > Here rte_cryptodevs is defined in stable API list in map file which is a pointer
> > > To all rte_cryptodev and the above change is changing the size of the
> structure.
> 
> While this patch adds new fields into rte_cryptodev structure,
> it doesn't change the size of it.
> struct rte_cryptodev is cache line aligned, so it's current size:
> 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> So for 64-bit we have 47B implicitly reserved, and for 32-bit we have 19B
> reserved.
> That's enough to add two pointers without changing size of this struct.
> 

The structure is cache aligned, and if the cache line size in 32Byte and the compilation
is done on 64bit machine, then we will be left with 15Bytes which is not sufficient for 2
pointers.
Do we have such systems? Am I missing something?

The reason I brought this into techboard is to have a consensus on such change
As rte_cryptodev is a very popular and stable structure. Any changes to it may
Have impacts which one person cannot judge all use cases.

> > > IMO, it seems an ABI breakage, but not sure. So wanted to double check.
> > > Now if it is an ABI breakage, then can we allow it? There was no deprecation
> > > notice Prior to this release.
> 
> Yes, there was no deprecation note in advance.
> Though I think the risk is minimal - size of the struct will remain unchanged (see
> above).
> My vote to let it in for 20.11.
> 
> > > Also I think if we are allowing the above change, then we should also add
> > > another Field for deq_cbs also for post crypto processing in this patch only.
> 
> +1 for this.
> I think it was already addressed in v5.
> 
> Konstantin
Ananyev, Konstantin Oct. 28, 2020, 2:52 p.m. UTC | #13
Hi Akhil,
 
> Hi Konstantin,
> 
> > > > Hi Tech board members,
> > > >
> > > > I have a doubt about the ABI breakage in below addition of field.
> > > > Could you please comment.
> > > >
> > > > >  /** The data structure associated with each crypto device. */  struct
> > > > > rte_cryptodev {
> > > > >  	dequeue_pkt_burst_t dequeue_burst;
> > > > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > > > >  	__extension__
> > > > >  	uint8_t attached : 1;
> > > > >  	/**< Flag indicating the device is attached */
> > > > > +
> > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > +	/**< User application callback for pre enqueue processing */
> > > > > +
> > > > >  } __rte_cache_aligned;
> > > >
> > > > Here rte_cryptodevs is defined in stable API list in map file which is a pointer
> > > > To all rte_cryptodev and the above change is changing the size of the
> > structure.
> >
> > While this patch adds new fields into rte_cryptodev structure,
> > it doesn't change the size of it.
> > struct rte_cryptodev is cache line aligned, so it's current size:
> > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > So for 64-bit we have 47B implicitly reserved, and for 32-bit we have 19B
> > reserved.
> > That's enough to add two pointers without changing size of this struct.
> >
> 
> The structure is cache aligned, and if the cache line size in 32Byte and the compilation
> is done on 64bit machine, then we will be left with 15Bytes which is not sufficient for 2
> pointers.
> Do we have such systems? 

AFAIK - no, minimal supported cache-line size: 64B:
lib/librte_eal/include/rte_common.h:#define RTE_CACHE_LINE_MIN_SIZE 64

> Am I missing something?

> The reason I brought this into techboard is to have a consensus on such change
> As rte_cryptodev is a very popular and stable structure. Any changes to it may
> Have impacts which one person cannot judge all use cases.

+1 here.
I also think it would be good to get other TB members opinion about proposed changes.
 
> > > > IMO, it seems an ABI breakage, but not sure. So wanted to double check.
> > > > Now if it is an ABI breakage, then can we allow it? There was no deprecation
> > > > notice Prior to this release.
> >
> > Yes, there was no deprecation note in advance.
> > Though I think the risk is minimal - size of the struct will remain unchanged (see
> > above).
> > My vote to let it in for 20.11.
> >
> > > > Also I think if we are allowing the above change, then we should also add
> > > > another Field for deq_cbs also for post crypto processing in this patch only.
> >
> > +1 for this.
> > I think it was already addressed in v5.
> >
> > Konstantin
Bruce Richardson Oct. 28, 2020, 3:11 p.m. UTC | #14
On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> 
> Hi Konstantin,
> 
> > > > Hi Tech board members,
> > > >
> > > > I have a doubt about the ABI breakage in below addition of field.
> > > > Could you please comment.
> > > >
> > > > >  /** The data structure associated with each crypto device. */  struct
> > > > > rte_cryptodev {
> > > > >  	dequeue_pkt_burst_t dequeue_burst;
> > > > > @@ -867,6 +922,10 @@ struct rte_cryptodev {
> > > > >  	__extension__
> > > > >  	uint8_t attached : 1;
> > > > >  	/**< Flag indicating the device is attached */
> > > > > +
> > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > +	/**< User application callback for pre enqueue processing */
> > > > > +
> > > > >  } __rte_cache_aligned;
> > > >
> > > > Here rte_cryptodevs is defined in stable API list in map file which is a pointer
> > > > To all rte_cryptodev and the above change is changing the size of the
> > structure.
> > 
> > While this patch adds new fields into rte_cryptodev structure,
> > it doesn't change the size of it.
> > struct rte_cryptodev is cache line aligned, so it's current size:
> > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > So for 64-bit we have 47B implicitly reserved, and for 32-bit we have 19B
> > reserved.
> > That's enough to add two pointers without changing size of this struct.
> > 
> 
> The structure is cache aligned, and if the cache line size in 32Byte and the compilation
> is done on 64bit machine, then we will be left with 15Bytes which is not sufficient for 2
> pointers.
> Do we have such systems? Am I missing something?
> 

I don't think we support any such systems, so unless someone can point out
a specific case where we need to support 32-byte CLs, I'd tend towards
ignoring this as a non-issue.

> The reason I brought this into techboard is to have a consensus on such change
> As rte_cryptodev is a very popular and stable structure. Any changes to it may
> Have impacts which one person cannot judge all use cases.
>

Haven't been tracking this discussion much, but from what I read here, this
doesn't look like an ABI break and should be ok.

Regards,
/Bruce
Honnappa Nagarahalli Oct. 28, 2020, 3:22 p.m. UTC | #15
+ Ray for ABI

<snip>

> 
> On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> >
> > Hi Konstantin,
> >
> > > > > Hi Tech board members,
> > > > >
> > > > > I have a doubt about the ABI breakage in below addition of field.
> > > > > Could you please comment.
> > > > >
> > > > > >  /** The data structure associated with each crypto device. */
> > > > > > struct rte_cryptodev {
> > > > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10
> @@
> > > > > > struct rte_cryptodev {
> > > > > >  	__extension__
> > > > > >  	uint8_t attached : 1;
> > > > > >  	/**< Flag indicating the device is attached */
> > > > > > +
> > > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > > +	/**< User application callback for pre enqueue processing */
> > > > > > +
> > > > > >  } __rte_cache_aligned;
> > > > >
> > > > > Here rte_cryptodevs is defined in stable API list in map file
> > > > > which is a pointer To all rte_cryptodev and the above change is
> > > > > changing the size of the
> > > structure.
> > >
> > > While this patch adds new fields into rte_cryptodev structure, it
> > > doesn't change the size of it.
> > > struct rte_cryptodev is cache line aligned, so it's current size:
> > > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > > So for 64-bit we have 47B implicitly reserved, and for 32-bit we
> > > have 19B reserved.
> > > That's enough to add two pointers without changing size of this struct.
> > >
> >
> > The structure is cache aligned, and if the cache line size in 32Byte
> > and the compilation is done on 64bit machine, then we will be left
> > with 15Bytes which is not sufficient for 2 pointers.
> > Do we have such systems? Am I missing something?
> >
> 
> I don't think we support any such systems, so unless someone can point out
> a specific case where we need to support 32-byte CLs, I'd tend towards
> ignoring this as a non-issue.
Agree. I have not come across 32B cache line.

> 
> > The reason I brought this into techboard is to have a consensus on
> > such change As rte_cryptodev is a very popular and stable structure.
> > Any changes to it may Have impacts which one person cannot judge all use
> cases.
> >
> 
> Haven't been tracking this discussion much, but from what I read here, this
> doesn't look like an ABI break and should be ok.
If we are filling the holes in the cache line with new fields, it should not be an ABI break.

> 
> Regards,
> /Bruce
Gujjar, Abhinandan S Oct. 29, 2020, 1:52 p.m. UTC | #16
Hi Akhil,

Any updates on this?

Thanks
Abhinandan

> -----Original Message-----
> From: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>
> Sent: Wednesday, October 28, 2020 8:52 PM
> To: Richardson, Bruce <bruce.richardson@intel.com>; Akhil.goyal@nxp.com;
> Ray Kinsella <mdr@ashroe.eu>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Gujjar, Abhinandan S
> <abhinandan.gujjar@intel.com>; dev@dpdk.org; Doherty, Declan
> <declan.doherty@intel.com>; techboard@dpdk.org; Vangati, Narender
> <narender.vangati@intel.com>; jerinj@marvell.com; nd <nd@arm.com>
> Subject: RE: [dpdk-techboard] [v4 1/3] cryptodev: support enqueue callback
> functions
> 
> + Ray for ABI
> 
> <snip>
> 
> >
> > On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> > >
> > > Hi Konstantin,
> > >
> > > > > > Hi Tech board members,
> > > > > >
> > > > > > I have a doubt about the ABI breakage in below addition of field.
> > > > > > Could you please comment.
> > > > > >
> > > > > > >  /** The data structure associated with each crypto device.
> > > > > > > */ struct rte_cryptodev {
> > > > > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10
> > @@
> > > > > > > struct rte_cryptodev {
> > > > > > >  	__extension__
> > > > > > >  	uint8_t attached : 1;
> > > > > > >  	/**< Flag indicating the device is attached */
> > > > > > > +
> > > > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > > > +	/**< User application callback for pre enqueue processing
> > > > > > > +*/
> > > > > > > +
> > > > > > >  } __rte_cache_aligned;
> > > > > >
> > > > > > Here rte_cryptodevs is defined in stable API list in map file
> > > > > > which is a pointer To all rte_cryptodev and the above change
> > > > > > is changing the size of the
> > > > structure.
> > > >
> > > > While this patch adds new fields into rte_cryptodev structure, it
> > > > doesn't change the size of it.
> > > > struct rte_cryptodev is cache line aligned, so it's current size:
> > > > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > > > So for 64-bit we have 47B implicitly reserved, and for 32-bit we
> > > > have 19B reserved.
> > > > That's enough to add two pointers without changing size of this struct.
> > > >
> > >
> > > The structure is cache aligned, and if the cache line size in 32Byte
> > > and the compilation is done on 64bit machine, then we will be left
> > > with 15Bytes which is not sufficient for 2 pointers.
> > > Do we have such systems? Am I missing something?
> > >
> >
> > I don't think we support any such systems, so unless someone can point
> > out a specific case where we need to support 32-byte CLs, I'd tend
> > towards ignoring this as a non-issue.
> Agree. I have not come across 32B cache line.
> 
> >
> > > The reason I brought this into techboard is to have a consensus on
> > > such change As rte_cryptodev is a very popular and stable structure.
> > > Any changes to it may Have impacts which one person cannot judge all
> > > use
> > cases.
> > >
> >
> > Haven't been tracking this discussion much, but from what I read here,
> > this doesn't look like an ABI break and should be ok.
> If we are filling the holes in the cache line with new fields, it should not be an
> ABI break.
> 
> >
> > Regards,
> > /Bruce
Akhil Goyal Oct. 29, 2020, 2 p.m. UTC | #17
> 
> Hi Akhil,
> 
> Any updates on this?
> 
There has been no objections for this patch from techboard.

@Thomas Monjalon: could you please review the release notes.
I believe there should be a bullet for API changes to add 2 new fields in rte_cryptodev.
What do you suggest?

@Gujjar, Abhinandan S
Please send a new version for comments on errno.
If possible add cases for deq_cbs as well. If not, send it by next week.

Regards,
Akhil
> > + Ray for ABI
> >
> > <snip>
> >
> > >
> > > On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> > > >
> > > > Hi Konstantin,
> > > >
> > > > > > > Hi Tech board members,
> > > > > > >
> > > > > > > I have a doubt about the ABI breakage in below addition of field.
> > > > > > > Could you please comment.
> > > > > > >
> > > > > > > >  /** The data structure associated with each crypto device.
> > > > > > > > */ struct rte_cryptodev {
> > > > > > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10
> > > @@
> > > > > > > > struct rte_cryptodev {
> > > > > > > >  	__extension__
> > > > > > > >  	uint8_t attached : 1;
> > > > > > > >  	/**< Flag indicating the device is attached */
> > > > > > > > +
> > > > > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > > > > +	/**< User application callback for pre enqueue processing
> > > > > > > > +*/
> > > > > > > > +
> > > > > > > >  } __rte_cache_aligned;
> > > > > > >
> > > > > > > Here rte_cryptodevs is defined in stable API list in map file
> > > > > > > which is a pointer To all rte_cryptodev and the above change
> > > > > > > is changing the size of the
> > > > > structure.
> > > > >
> > > > > While this patch adds new fields into rte_cryptodev structure, it
> > > > > doesn't change the size of it.
> > > > > struct rte_cryptodev is cache line aligned, so it's current size:
> > > > > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > > > > So for 64-bit we have 47B implicitly reserved, and for 32-bit we
> > > > > have 19B reserved.
> > > > > That's enough to add two pointers without changing size of this struct.
> > > > >
> > > >
> > > > The structure is cache aligned, and if the cache line size in 32Byte
> > > > and the compilation is done on 64bit machine, then we will be left
> > > > with 15Bytes which is not sufficient for 2 pointers.
> > > > Do we have such systems? Am I missing something?
> > > >
> > >
> > > I don't think we support any such systems, so unless someone can point
> > > out a specific case where we need to support 32-byte CLs, I'd tend
> > > towards ignoring this as a non-issue.
> > Agree. I have not come across 32B cache line.
> >
> > >
> > > > The reason I brought this into techboard is to have a consensus on
> > > > such change As rte_cryptodev is a very popular and stable structure.
> > > > Any changes to it may Have impacts which one person cannot judge all
> > > > use
> > > cases.
> > > >
> > >
> > > Haven't been tracking this discussion much, but from what I read here,
> > > this doesn't look like an ABI break and should be ok.
> > If we are filling the holes in the cache line with new fields, it should not be an
> > ABI break.
> >
> > >
> > > Regards,
> > > /Bruce
Kinsella, Ray Oct. 29, 2020, 2:26 p.m. UTC | #18
On 28/10/2020 15:22, Honnappa Nagarahalli wrote:
> + Ray for ABI
> 
> <snip>
> 
>>
>> On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
>>>
>>> Hi Konstantin,
>>>
>>>>>> Hi Tech board members,
>>>>>>
>>>>>> I have a doubt about the ABI breakage in below addition of field.
>>>>>> Could you please comment.
>>>>>>
>>>>>>>  /** The data structure associated with each crypto device. */
>>>>>>> struct rte_cryptodev {
>>>>>>>  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10
>> @@
>>>>>>> struct rte_cryptodev {
>>>>>>>  	__extension__
>>>>>>>  	uint8_t attached : 1;
>>>>>>>  	/**< Flag indicating the device is attached */
>>>>>>> +
>>>>>>> +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
>>>>>>> +	/**< User application callback for pre enqueue processing */
>>>>>>> +
>>>>>>>  } __rte_cache_aligned;
>>>>>>
>>>>>> Here rte_cryptodevs is defined in stable API list in map file
>>>>>> which is a pointer To all rte_cryptodev and the above change is
>>>>>> changing the size of the
>>>> structure.
>>>>
>>>> While this patch adds new fields into rte_cryptodev structure, it
>>>> doesn't change the size of it.
>>>> struct rte_cryptodev is cache line aligned, so it's current size:
>>>> 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
>>>> So for 64-bit we have 47B implicitly reserved, and for 32-bit we
>>>> have 19B reserved.
>>>> That's enough to add two pointers without changing size of this struct.
>>>>
>>>
>>> The structure is cache aligned, and if the cache line size in 32Byte
>>> and the compilation is done on 64bit machine, then we will be left
>>> with 15Bytes which is not sufficient for 2 pointers.
>>> Do we have such systems? Am I missing something?
>>>
>>
>> I don't think we support any such systems, so unless someone can point out
>> a specific case where we need to support 32-byte CLs, I'd tend towards
>> ignoring this as a non-issue.
> Agree. I have not come across 32B cache line.
> 
>>
>>> The reason I brought this into techboard is to have a consensus on
>>> such change As rte_cryptodev is a very popular and stable structure.
>>> Any changes to it may Have impacts which one person cannot judge all use
>> cases.
>>>
>>
>> Haven't been tracking this discussion much, but from what I read here, this
>> doesn't look like an ABI break and should be ok.
> If we are filling the holes in the cache line with new fields, it should not be an ABI break.

Agreed, risk seems minimal  ... it is an ABI Breakage window in anycase.
 
>>
>> Regards,
>> /Bruce
Gujjar, Abhinandan S Oct. 30, 2020, 4:24 a.m. UTC | #19
Thanks Tech board & Akhil for clarifying the concern.
Sure. I will send the new version of the patch.

Regards
Abhinandan

> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Thursday, October 29, 2020 7:31 PM
> To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Honnappa
> Nagarahalli <Honnappa.Nagarahalli@arm.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Thomas
> Monjalon <thomas@monjalon.net>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>; techboard@dpdk.org; Vangati,
> Narender <narender.vangati@intel.com>; jerinj@marvell.com; nd
> <nd@arm.com>
> Subject: RE: [dpdk-techboard] [v4 1/3] cryptodev: support enqueue callback
> functions
> 
> >
> > Hi Akhil,
> >
> > Any updates on this?
> >
> There has been no objections for this patch from techboard.
> 
> @Thomas Monjalon: could you please review the release notes.
> I believe there should be a bullet for API changes to add 2 new fields in
> rte_cryptodev.
> What do you suggest?
> 
> @Gujjar, Abhinandan S
> Please send a new version for comments on errno.
> If possible add cases for deq_cbs as well. If not, send it by next week.

> 
> Regards,
> Akhil
> > > + Ray for ABI
> > >
> > > <snip>
> > >
> > > >
> > > > On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> > > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > > > > Hi Tech board members,
> > > > > > > >
> > > > > > > > I have a doubt about the ABI breakage in below addition of field.
> > > > > > > > Could you please comment.
> > > > > > > >
> > > > > > > > >  /** The data structure associated with each crypto device.
> > > > > > > > > */ struct rte_cryptodev {
> > > > > > > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6 +922,10
> > > > @@
> > > > > > > > > struct rte_cryptodev {
> > > > > > > > >  	__extension__
> > > > > > > > >  	uint8_t attached : 1;
> > > > > > > > >  	/**< Flag indicating the device is attached */
> > > > > > > > > +
> > > > > > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > > > > > +	/**< User application callback for pre enqueue
> > > > > > > > > +processing */
> > > > > > > > > +
> > > > > > > > >  } __rte_cache_aligned;
> > > > > > > >
> > > > > > > > Here rte_cryptodevs is defined in stable API list in map
> > > > > > > > file which is a pointer To all rte_cryptodev and the above
> > > > > > > > change is changing the size of the
> > > > > > structure.
> > > > > >
> > > > > > While this patch adds new fields into rte_cryptodev structure,
> > > > > > it doesn't change the size of it.
> > > > > > struct rte_cryptodev is cache line aligned, so it's current size:
> > > > > > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > > > > > So for 64-bit we have 47B implicitly reserved, and for 32-bit
> > > > > > we have 19B reserved.
> > > > > > That's enough to add two pointers without changing size of this struct.
> > > > > >
> > > > >
> > > > > The structure is cache aligned, and if the cache line size in
> > > > > 32Byte and the compilation is done on 64bit machine, then we
> > > > > will be left with 15Bytes which is not sufficient for 2 pointers.
> > > > > Do we have such systems? Am I missing something?
> > > > >
> > > >
> > > > I don't think we support any such systems, so unless someone can
> > > > point out a specific case where we need to support 32-byte CLs,
> > > > I'd tend towards ignoring this as a non-issue.
> > > Agree. I have not come across 32B cache line.
> > >
> > > >
> > > > > The reason I brought this into techboard is to have a consensus
> > > > > on such change As rte_cryptodev is a very popular and stable structure.
> > > > > Any changes to it may Have impacts which one person cannot judge
> > > > > all use
> > > > cases.
> > > > >
> > > >
> > > > Haven't been tracking this discussion much, but from what I read
> > > > here, this doesn't look like an ABI break and should be ok.
> > > If we are filling the holes in the cache line with new fields, it
> > > should not be an ABI break.
> > >
> > > >
> > > > Regards,
> > > > /Bruce
Gujjar, Abhinandan S Oct. 30, 2020, 5:18 p.m. UTC | #20
Hi Akhil,

I have sent the v6 patch for RC2.
As discussed, I will get the test app updated for dequeue callback for RC3.

Thanks
Abhinandan

> -----Original Message-----
> From: Gujjar, Abhinandan S
> Sent: Friday, October 30, 2020 9:54 AM
> To: Akhil Goyal <akhil.goyal@nxp.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Thomas
> Monjalon <thomas@monjalon.net>
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org;
> Doherty, Declan <declan.doherty@intel.com>; techboard@dpdk.org; Vangati,
> Narender <narender.vangati@intel.com>; jerinj@marvell.com; nd
> <nd@arm.com>
> Subject: RE: [dpdk-techboard] [v4 1/3] cryptodev: support enqueue callback
> functions
> 
> 
> Thanks Tech board & Akhil for clarifying the concern.
> Sure. I will send the new version of the patch.
> 
> Regards
> Abhinandan
> 
> > -----Original Message-----
> > From: Akhil Goyal <akhil.goyal@nxp.com>
> > Sent: Thursday, October 29, 2020 7:31 PM
> > To: Gujjar, Abhinandan S <abhinandan.gujjar@intel.com>; Honnappa
> > Nagarahalli <Honnappa.Nagarahalli@arm.com>; Richardson, Bruce
> > <bruce.richardson@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Thomas
> > Monjalon <thomas@monjalon.net>
> > Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org;
> > Doherty, Declan <declan.doherty@intel.com>; techboard@dpdk.org;
> > Vangati, Narender <narender.vangati@intel.com>; jerinj@marvell.com; nd
> > <nd@arm.com>
> > Subject: RE: [dpdk-techboard] [v4 1/3] cryptodev: support enqueue
> > callback functions
> >
> > >
> > > Hi Akhil,
> > >
> > > Any updates on this?
> > >
> > There has been no objections for this patch from techboard.
> >
> > @Thomas Monjalon: could you please review the release notes.
> > I believe there should be a bullet for API changes to add 2 new fields
> > in rte_cryptodev.
> > What do you suggest?
> >
> > @Gujjar, Abhinandan S
> > Please send a new version for comments on errno.
> > If possible add cases for deq_cbs as well. If not, send it by next week.
> 
> >
> > Regards,
> > Akhil
> > > > + Ray for ABI
> > > >
> > > > <snip>
> > > >
> > > > >
> > > > > On Wed, Oct 28, 2020 at 02:28:43PM +0000, Akhil Goyal wrote:
> > > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > > > > Hi Tech board members,
> > > > > > > > >
> > > > > > > > > I have a doubt about the ABI breakage in below addition of field.
> > > > > > > > > Could you please comment.
> > > > > > > > >
> > > > > > > > > >  /** The data structure associated with each crypto device.
> > > > > > > > > > */ struct rte_cryptodev {
> > > > > > > > > >  	dequeue_pkt_burst_t dequeue_burst; @@ -867,6
> +922,10
> > > > > @@
> > > > > > > > > > struct rte_cryptodev {
> > > > > > > > > >  	__extension__
> > > > > > > > > >  	uint8_t attached : 1;
> > > > > > > > > >  	/**< Flag indicating the device is attached */
> > > > > > > > > > +
> > > > > > > > > > +	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
> > > > > > > > > > +	/**< User application callback for pre enqueue
> > > > > > > > > > +processing */
> > > > > > > > > > +
> > > > > > > > > >  } __rte_cache_aligned;
> > > > > > > > >
> > > > > > > > > Here rte_cryptodevs is defined in stable API list in map
> > > > > > > > > file which is a pointer To all rte_cryptodev and the
> > > > > > > > > above change is changing the size of the
> > > > > > > structure.
> > > > > > >
> > > > > > > While this patch adds new fields into rte_cryptodev
> > > > > > > structure, it doesn't change the size of it.
> > > > > > > struct rte_cryptodev is cache line aligned, so it's current size:
> > > > > > > 128B for 64-bit systems, and 64B(/128B) for 32-bit systems.
> > > > > > > So for 64-bit we have 47B implicitly reserved, and for
> > > > > > > 32-bit we have 19B reserved.
> > > > > > > That's enough to add two pointers without changing size of this
> struct.
> > > > > > >
> > > > > >
> > > > > > The structure is cache aligned, and if the cache line size in
> > > > > > 32Byte and the compilation is done on 64bit machine, then we
> > > > > > will be left with 15Bytes which is not sufficient for 2 pointers.
> > > > > > Do we have such systems? Am I missing something?
> > > > > >
> > > > >
> > > > > I don't think we support any such systems, so unless someone can
> > > > > point out a specific case where we need to support 32-byte CLs,
> > > > > I'd tend towards ignoring this as a non-issue.
> > > > Agree. I have not come across 32B cache line.
> > > >
> > > > >
> > > > > > The reason I brought this into techboard is to have a
> > > > > > consensus on such change As rte_cryptodev is a very popular and
> stable structure.
> > > > > > Any changes to it may Have impacts which one person cannot
> > > > > > judge all use
> > > > > cases.
> > > > > >
> > > > >
> > > > > Haven't been tracking this discussion much, but from what I read
> > > > > here, this doesn't look like an ABI break and should be ok.
> > > > If we are filling the holes in the cache line with new fields, it
> > > > should not be an ABI break.
> > > >
> > > > >
> > > > > Regards,
> > > > > /Bruce

Patch
diff mbox series

diff --git a/config/rte_config.h b/config/rte_config.h
index 03d90d7..e999d93 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -61,6 +61,7 @@ 
 /* cryptodev defines */
 #define RTE_CRYPTO_MAX_DEVS 64
 #define RTE_CRYPTODEV_NAME_LEN 64
+#define RTE_CRYPTO_CALLBACKS 1
 
 /* compressdev defines */
 #define RTE_COMPRESS_MAX_DEVS 64
diff --git a/lib/librte_cryptodev/meson.build b/lib/librte_cryptodev/meson.build
index c4c6b3b..8c5493f 100644
--- a/lib/librte_cryptodev/meson.build
+++ b/lib/librte_cryptodev/meson.build
@@ -9,4 +9,4 @@  headers = files('rte_cryptodev.h',
 	'rte_crypto.h',
 	'rte_crypto_sym.h',
 	'rte_crypto_asym.h')
-deps += ['kvargs', 'mbuf']
+deps += ['kvargs', 'mbuf', 'rcu']
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 3d95ac6..0880d9b 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -448,6 +448,91 @@  struct rte_cryptodev_sym_session_pool_private_data {
 	return 0;
 }
 
+#ifdef RTE_CRYPTO_CALLBACKS
+/* spinlock for crypto device enq callbacks */
+static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+cryptodev_cb_cleanup(struct rte_cryptodev *dev)
+{
+	struct rte_cryptodev_cb **prev_cb, *curr_cb;
+	struct rte_cryptodev_enq_cb_rcu *list;
+	uint16_t qp_id;
+
+	if (dev->enq_cbs == NULL)
+		return;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		list = &dev->enq_cbs[qp_id];
+		prev_cb = &list->next;
+
+		while (*prev_cb != NULL) {
+			curr_cb = *prev_cb;
+			/* Remove the user cb from the callback list. */
+			__atomic_store_n(prev_cb, curr_cb->next,
+				__ATOMIC_RELAXED);
+			rte_rcu_qsbr_synchronize(list->qsbr,
+				RTE_QSBR_THRID_INVALID);
+			rte_free(curr_cb);
+		}
+
+		rte_free(list->qsbr);
+	}
+
+	rte_free(dev->enq_cbs);
+	dev->enq_cbs = NULL;
+}
+
+static int
+cryptodev_cb_init(struct rte_cryptodev *dev)
+{
+	struct rte_cryptodev_enq_cb_rcu *list;
+	struct rte_rcu_qsbr *qsbr;
+	uint16_t qp_id;
+	size_t size;
+
+	/* Max thread set to 1, as one DP thread accessing a queue-pair */
+	const uint32_t max_threads = 1;
+
+	dev->enq_cbs = rte_zmalloc(NULL,
+				   sizeof(struct rte_cryptodev_enq_cb_rcu) *
+				   dev->data->nb_queue_pairs, 0);
+	if (dev->enq_cbs == NULL) {
+		CDEV_LOG_ERR("Failed to allocate memory for callbacks");
+		rte_errno = ENOMEM;
+		return -1;
+	}
+
+	/* Create RCU QSBR variable */
+	size = rte_rcu_qsbr_get_memsize(max_threads);
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		list = &dev->enq_cbs[qp_id];
+		qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+		if (qsbr == NULL) {
+			CDEV_LOG_ERR("Failed to allocate memory for RCU on "
+				"queue_pair_id=%d", qp_id);
+			goto cb_init_err;
+		}
+
+		if (rte_rcu_qsbr_init(qsbr, max_threads)) {
+			CDEV_LOG_ERR("Failed to initialize for RCU on "
+				"queue_pair_id=%d", qp_id);
+			goto cb_init_err;
+		}
+
+		list->qsbr = qsbr;
+	}
+
+	return 0;
+
+cb_init_err:
+	rte_errno = ENOMEM;
+	cryptodev_cb_cleanup(dev);
+	return -1;
+
+}
+#endif
 
 const char *
 rte_cryptodev_get_feature_name(uint64_t flag)
@@ -927,6 +1012,11 @@  struct rte_cryptodev *
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
 
+#ifdef RTE_CRYPTO_CALLBACKS
+	rte_spinlock_lock(&rte_cryptodev_callback_lock);
+	cryptodev_cb_cleanup(dev);
+	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+#endif
 	/* Setup new number of queue pairs and reconfigure device. */
 	diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
 			config->socket_id);
@@ -936,6 +1026,15 @@  struct rte_cryptodev *
 		return diag;
 	}
 
+#ifdef RTE_CRYPTO_CALLBACKS
+	rte_spinlock_lock(&rte_cryptodev_callback_lock);
+	diag = cryptodev_cb_init(dev);
+	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+	if (diag) {
+		CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
+		return -ENOMEM;
+	}
+#endif
 	rte_cryptodev_trace_configure(dev_id, config);
 	return (*dev->dev_ops->dev_configure)(dev, config);
 }
@@ -1136,6 +1235,137 @@  struct rte_cryptodev *
 			socket_id);
 }
 
+#ifdef RTE_CRYPTO_CALLBACKS
+struct rte_cryptodev_cb *
+rte_cryptodev_add_enq_callback(uint8_t dev_id,
+			       uint16_t qp_id,
+			       rte_cryptodev_callback_fn cb_fn,
+			       void *cb_arg)
+{
+	struct rte_cryptodev *dev;
+	struct rte_cryptodev_enq_cb_rcu *list;
+	struct rte_cryptodev_cb *cb, *tail;
+
+	if (!cb_fn)
+		return NULL;
+
+	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return NULL;
+	}
+
+	dev = &rte_crypto_devices[dev_id];
+	if (qp_id >= dev->data->nb_queue_pairs) {
+		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+		return NULL;
+	}
+
+	cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+	if (cb == NULL) {
+		CDEV_LOG_ERR("Failed to allocate memory for callback on "
+			     "dev=%d, queue_pair_id=%d", dev_id, qp_id);
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	rte_spinlock_lock(&rte_cryptodev_callback_lock);
+
+	cb->fn = cb_fn;
+	cb->arg = cb_arg;
+
+	/* Add the callbacks in fifo order. */
+	list = &dev->enq_cbs[qp_id];
+	tail = list->next;
+
+	if (tail) {
+		while (tail->next)
+			tail = tail->next;
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
+	} else {
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
+	}
+
+	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+
+	return cb;
+}
+
+int
+rte_cryptodev_remove_enq_callback(uint8_t dev_id,
+				  uint16_t qp_id,
+				  struct rte_cryptodev_cb *cb)
+{
+	struct rte_cryptodev *dev;
+	struct rte_cryptodev_cb **prev_cb, *curr_cb;
+	struct rte_cryptodev_enq_cb_rcu *list;
+	int ret;
+
+	ret = -EINVAL;
+
+	if (!cb) {
+		CDEV_LOG_ERR("cb is NULL");
+		return ret;
+	}
+
+	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
+		CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
+		return ret;
+	}
+
+	dev = &rte_crypto_devices[dev_id];
+	if (qp_id >= dev->data->nb_queue_pairs) {
+		CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
+		return ret;
+	}
+
+	rte_spinlock_lock(&rte_cryptodev_callback_lock);
+	if (dev->enq_cbs == NULL) {
+		CDEV_LOG_ERR("Callback not initialized");
+		goto cb_err;
+	}
+
+	list = &dev->enq_cbs[qp_id];
+	if (list == NULL) {
+		CDEV_LOG_ERR("Callback list is NULL");
+		goto cb_err;
+	}
+
+	if (list->qsbr == NULL) {
+		CDEV_LOG_ERR("Rcu qsbr is NULL");
+		goto cb_err;
+	}
+
+	prev_cb = &list->next;
+	for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
+		curr_cb = *prev_cb;
+		if (curr_cb == cb) {
+			/* Remove the user cb from the callback list. */
+			__atomic_store_n(prev_cb, curr_cb->next,
+				__ATOMIC_RELAXED);
+			ret = 0;
+			break;
+		}
+	}
+
+	if (!ret) {
+		/* Call sync with invalid thread id as this is part of
+		 * control plane API
+		 */
+		rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
+		rte_free(cb);
+	}
+
+cb_err:
+	rte_spinlock_unlock(&rte_cryptodev_callback_lock);
+	return ret;
+}
+#endif
 
 int
 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 0935fd5..1b7d7ef 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -23,6 +23,7 @@ 
 #include "rte_dev.h"
 #include <rte_common.h>
 #include <rte_config.h>
+#include <rte_rcu_qsbr.h>
 
 #include "rte_cryptodev_trace_fp.h"
 
@@ -522,6 +523,34 @@  struct rte_cryptodev_qp_conf {
 	/**< The mempool for creating sess private data in sessionless mode */
 };
 
+#ifdef RTE_CRYPTO_CALLBACKS
+/**
+ * Function type used for pre processing crypto ops when enqueue burst is
+ * called.
+ *
+ * The callback function is called on enqueue burst immediately
+ * before the crypto ops are put onto the hardware queue for processing.
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	qp_id		The index of the queue pair in which ops are
+ *				to be enqueued for processing. The value
+ *				must be in the range [0, nb_queue_pairs - 1]
+ *				previously supplied to
+ *				*rte_cryptodev_configure*.
+ * @param	ops		The address of an array of *nb_ops* pointers
+ *				to *rte_crypto_op* structures which contain
+ *				the crypto operations to be processed.
+ * @param	nb_ops		The number of operations to process.
+ * @param	user_param	The arbitrary user parameter passed in by the
+ *				application when the callback was originally
+ *				registered.
+ * @return			The number of ops to be enqueued to the
+ *				crypto device.
+ */
+typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
+		struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
+#endif
+
 /**
  * Typedef for application callback function to be registered by application
  * software for notification of device events
@@ -822,7 +851,6 @@  struct rte_cryptodev_config {
 		enum rte_cryptodev_event_type event,
 		rte_cryptodev_cb_fn cb_fn, void *cb_arg);
 
-
 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
 		struct rte_crypto_op **ops,	uint16_t nb_ops);
 /**< Dequeue processed packets from queue pair of a device. */
@@ -839,6 +867,33 @@  typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
 /** Structure to keep track of registered callbacks */
 TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
 
+#ifdef RTE_CRYPTO_CALLBACKS
+/**
+ * @internal
+ * Structure used to hold information about the callbacks to be called for a
+ * queue pair on enqueue.
+ */
+struct rte_cryptodev_cb {
+	struct rte_cryptodev_cb *next;
+	/** < Pointer to next callback */
+	rte_cryptodev_callback_fn fn;
+	/** < Pointer to callback function */
+	void *arg;
+	/** < Pointer to argument */
+};
+
+/**
+ * @internal
+ * Structure used to hold information about the RCU for a queue pair.
+ */
+struct rte_cryptodev_enq_cb_rcu {
+	struct rte_cryptodev_cb *next;
+	/** < Pointer to next callback */
+	struct rte_rcu_qsbr *qsbr;
+	/** < RCU QSBR variable per queue pair */
+};
+#endif
+
 /** The data structure associated with each crypto device. */
 struct rte_cryptodev {
 	dequeue_pkt_burst_t dequeue_burst;
@@ -867,6 +922,10 @@  struct rte_cryptodev {
 	__extension__
 	uint8_t attached : 1;
 	/**< Flag indicating the device is attached */
+
+	struct rte_cryptodev_enq_cb_rcu *enq_cbs;
+	/**< User application callback for pre enqueue processing */
+
 } __rte_cache_aligned;
 
 void *
@@ -989,6 +1048,31 @@  struct rte_cryptodev_data {
 {
 	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
 
+#ifdef RTE_CRYPTO_CALLBACKS
+	if (unlikely(dev->enq_cbs != NULL)) {
+		struct rte_cryptodev_enq_cb_rcu *list;
+		struct rte_cryptodev_cb *cb;
+
+		/* __ATOMIC_RELEASE memory order was used when the
+		* call back was inserted into the list.
+		* Since there is a clear dependency between loading
+		* cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+		* not required.
+		*/
+		list = &dev->enq_cbs[qp_id];
+		rte_rcu_qsbr_thread_online(list->qsbr, 0);
+		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+		while (cb != NULL) {
+			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+					cb->arg);
+			cb = cb->next;
+		};
+
+		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+	}
+#endif
+
 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
 	return (*dev->enqueue_burst)(
 			dev->data->queue_pairs[qp_id], ops, nb_ops);
@@ -1730,6 +1814,78 @@  struct rte_crypto_raw_dp_ctx {
 rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
 		uint32_t n);
 
+#ifdef RTE_CRYPTO_CALLBACKS
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add a user callback for a given crypto device and queue pair which will be
+ * called on crypto ops enqueue.
+ *
+ * This API configures a function to be called for each burst of crypto ops
+ * received on a given crypto device queue pair. The return value is a pointer
+ * that can be used later to remove the callback using
+ * rte_cryptodev_remove_enq_callback().
+ *
+ * Multiple functions are called in the order that they are added.
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	qp_id		The index of the queue pair in which ops are
+ *				to be enqueued for processing. The value
+ *				must be in the range [0, nb_queue_pairs - 1]
+ *				previously supplied to
+ *				*rte_cryptodev_configure*.
+ * @param	cb_fn		The callback function
+ * @param	cb_arg		A generic pointer parameter which will be passed
+ *				to each invocation of the callback function on
+ *				this crypto device and queue pair.
+ *
+ * @return
+ *   NULL on error.
+ *   On success, a pointer value which can later be used to remove the callback.
+ */
+
+__rte_experimental
+struct rte_cryptodev_cb *
+rte_cryptodev_add_enq_callback(uint8_t dev_id,
+			       uint16_t qp_id,
+			       rte_cryptodev_callback_fn cb_fn,
+			       void *cb_arg);
+
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Remove a user callback function for given crypto device and queue pair.
+ *
+ * This function is used to removed callbacks that were added to a crypto
+ * device queue pair using rte_cryptodev_add_enq_callback().
+ *
+ *
+ *
+ * @param	dev_id		The identifier of the device.
+ * @param	qp_id		The index of the queue pair in which ops are
+ *				to be enqueued for processing. The value
+ *				must be in the range [0, nb_queue_pairs - 1]
+ *				previously supplied to
+ *				*rte_cryptodev_configure*.
+ * @param	cb		Pointer to user supplied callback created via
+ *				rte_cryptodev_add_enq_callback().
+ *
+ * @return
+ *   - 0: Success. Callback was removed.
+ *   - -EINVAL:  The dev_id or the qp_id is out of range, or the callback
+ *               is NULL or not found for the crypto device queue pair.
+ */
+
+__rte_experimental
+int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
+				      uint16_t qp_id,
+				      struct rte_cryptodev_cb *cb);
+
+#endif
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 7e4360f..5d8d6b0 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -101,6 +101,7 @@  EXPERIMENTAL {
 	rte_cryptodev_get_qp_status;
 
 	# added in 20.11
+	rte_cryptodev_add_enq_callback;
 	rte_cryptodev_configure_raw_dp_ctx;
 	rte_cryptodev_get_raw_dp_ctx_size;
 	rte_cryptodev_raw_dequeue;
@@ -109,4 +110,5 @@  EXPERIMENTAL {
 	rte_cryptodev_raw_enqueue;
 	rte_cryptodev_raw_enqueue_burst;
 	rte_cryptodev_raw_enqueue_done;
+	rte_cryptodev_remove_enq_callback;
 };