[3/8] cryptodev: add helper functions for new datapath interface

Message ID 20210829125139.2173235-4-gakhil@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series cryptodev: hide internal strutures |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Akhil Goyal Aug. 29, 2021, 12:51 p.m. UTC
  Add helper functions and macros to help drivers to
transition to new datapath interface.

Signed-off-by: Akhil Goyal <gakhil@marvell.com>
---
 lib/cryptodev/cryptodev_pmd.h | 246 ++++++++++++++++++++++++++++++++++
 lib/cryptodev/rte_cryptodev.c |  40 +++++-
 lib/cryptodev/version.map     |   4 +
 3 files changed, 289 insertions(+), 1 deletion(-)
  

Comments

Fan Zhang Aug. 30, 2021, 8:07 p.m. UTC | #1
Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Sunday, August 29, 2021 1:52 PM
> To: dev@dpdk.org
> Cc: anoobj@marvell.com; Nicolau, Radu <radu.nicolau@intel.com>; Doherty,
> Declan <declan.doherty@intel.com>; hemant.agrawal@nxp.com;
> matan@nvidia.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> thomas@monjalon.net; Zhang, Roy Fan <roy.fan.zhang@intel.com>;
> asomalap@amd.com; ruifeng.wang@arm.com;
> ajit.khaparde@broadcom.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>;
> adwivedi@marvell.com; michaelsh@marvell.com;
> rnagadheeraj@marvell.com; jianjay.zhou@huawei.com; jerinj@marvell.com;
> Akhil Goyal <gakhil@marvell.com>
> Subject: [PATCH 3/8] cryptodev: add helper functions for new datapath
> interface
> 
> Add helper functions and macros to help drivers to
> transition to new datapath interface.
> 
> Signed-off-by: Akhil Goyal <gakhil@marvell.com>
> ---
>  lib/cryptodev/cryptodev_pmd.h | 246
> ++++++++++++++++++++++++++++++++++
>  lib/cryptodev/rte_cryptodev.c |  40 +++++-
>  lib/cryptodev/version.map     |   4 +
>  3 files changed, 289 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/cryptodev/cryptodev_pmd.h
> b/lib/cryptodev/cryptodev_pmd.h
> index eeaea13a23..d40e5cee94 100644
> --- a/lib/cryptodev/cryptodev_pmd.h
> +++ b/lib/cryptodev/cryptodev_pmd.h
> @@ -70,6 +70,13 @@ struct cryptodev_driver {
>  	const struct rte_driver *driver;
>  	uint8_t id;
>  };
> +/**
> + * @internal
> + * The pool of *rte_cryptodev* structures. The size of the pool
> + * is configured at compile-time in the <rte_cryptodev.c> file.
> + */
> +extern struct rte_cryptodev rte_crypto_devices[];
> +
> 
>  /**
>   * Get the rte_cryptodev structure device pointer for the device. Assumes a
> @@ -529,6 +536,245 @@ __rte_internal
>  void
>  rte_cryptodev_api_reset(struct rte_cryptodev_api *api);
> 
> +/**
> + * @internal
> + * Helper routine for cryptodev_dequeue_burst.
> + * Should be called as first thing on entrance to the PMD's
> + * rte_cryptodev_dequeue_burst implementation.
> + * Does necessary checks and returns pointer to cryptodev identifier.
> + *
> + * @param dev_id
> + *  The device identifier of the crypto device.
> + * @param qp_id
> + *  The index of the queue pair from which processed crypto ops will
> + *  be dequeued.
> + *
> + * @return
> + *  Pointer to device queue pair on success or NULL otherwise.
> + */
> +__rte_internal
> +static inline void *
> +_rte_cryptodev_dequeue_prolog(uint8_t dev_id, uint8_t qp_id)
> +{
> +	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> +
> +	return dev->data->queue_pairs[qp_id];
> +}
 
[Fan: the function name looks unclear to me - maybe 
rte_cryptodev_dequeue_prepare?
Also the function didn't do any check as the description suggested - the 
qp is later checked in _RTE_CRYPTO_DEQ_DEF, maybe remove the
description?]

> +
> +/**
> + * @internal
> + * Helper routine for crypto driver dequeue API.
> + * Should be called at exit from PMD's rte_cryptodev_dequeue_burst
> + * implementation.
> + * Does necessary post-processing - invokes RX callbacks if any, tracing, etc.
> + *
> + * @param dev_id
> + *  The device identifier of the Crypto device.
> + * @param qp_id
> + *  The index of the queue pair from which to retrieve input crypto_ops.
> + * @param ops
> + *   The address of an array of pointers to *rte_crypto_op* structures that
> + *   have been retrieved from the device.
> + * @param nb_ops
> + *   The number of ops that were retrieved from the device.
> + *
> + * @return
> + *  The number of crypto ops effectively supplied to the *ops* array.
> + */
> +__rte_internal
> +static inline uint16_t
> +_rte_cryptodev_dequeue_epilog(uint16_t dev_id, uint16_t qp_id,
> +	struct rte_crypto_op **ops, uint16_t nb_ops)
> +{
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> +
> +	if (unlikely(dev->deq_cbs != NULL)) {
> +		struct rte_cryptodev_cb_rcu *list;
> +		struct rte_cryptodev_cb *cb;
> +
> +		/* __ATOMIC_RELEASE memory order was used when the
> +		 * call back was inserted into the list.
> +		 * Since there is a clear dependency between loading
> +		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory
> order is
> +		 * not required.
> +		 */
> +		list = &dev->deq_cbs[qp_id];
> +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> +
> +		while (cb != NULL) {
> +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> +					cb->arg);
> +			cb = cb->next;
> +		};
> +
> +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> +	}
> +#endif
> +
> +	return nb_ops;
> +}

[Fan: same naming issue - maybe rte_cryptodev_dequeue_post, so
as the enqueue part]

> +#define _RTE_CRYPTO_DEQ_FUNC(fn)	_rte_crypto_deq_##fn
> +
> +/**
> + * @internal
> + * Helper macro to create new API wrappers for existing PMD dequeue
> functions.
> + */
> +#define _RTE_CRYPTO_DEQ_PROTO(fn) \
> +	uint16_t _RTE_CRYPTO_DEQ_FUNC(fn)(uint8_t dev_id, uint8_t
> qp_id, \
> +			struct rte_crypto_op **ops, uint16_t nb_ops)
> +
> +/**
> + * @internal
> + * Helper macro to create new API wrappers for existing PMD dequeue
> functions.
> + */
> +#define _RTE_CRYPTO_DEQ_DEF(fn) \
> +_RTE_CRYPTO_DEQ_PROTO(fn) \
> +{ \
> +	void *qp = _rte_cryptodev_dequeue_prolog(dev_id, qp_id); \
> +	if (qp == NULL) \
[Fan: suggest to add "unlikely" above] 
> +		return 0; \
> +	nb_ops = fn(qp, ops, nb_ops); \
> +	return _rte_cryptodev_dequeue_epilog(dev_id, qp_id, ops,
> nb_ops); \
> +}
> +
> +/**
> + * @internal
> + * Helper routine for cryptodev_enqueue_burst.
> + * Should be called as first thing on entrance to the PMD's
> + * rte_cryptodev_enqueue_burst implementation.
> + * Does necessary checks and returns pointer to cryptodev queue pair.
> + *
> + * @param dev_id
> + *  The device identifier of the crypto device.
> + * @param qp_id
> + *  The index of the queue pair in which packets will be enqueued.
> + * @param ops
> + *   The address of an array of pointers to *rte_crypto_op* structures that
> + *   will be enqueued to the device.
> + * @param nb_ops
> + *   The number of ops that will be sent to the device.
> + *
> + * @return
> + *  Pointer to device queue pair on success or NULL otherwise.
> + */
> +__rte_internal
> +static inline void *
> +_rte_cryptodev_enqueue_prolog(uint8_t dev_id, uint8_t qp_id,
> +		struct rte_crypto_op **ops, uint16_t nb_ops)
> +{
> +	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> +
> +#ifdef RTE_CRYPTO_CALLBACKS
> +	if (unlikely(dev->enq_cbs != NULL)) {
> +		struct rte_cryptodev_cb_rcu *list;
> +		struct rte_cryptodev_cb *cb;
> +
> +		/* __ATOMIC_RELEASE memory order was used when the
> +		 * call back was inserted into the list.
> +		 * Since there is a clear dependency between loading
> +		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory
> order is
> +		 * not required.
> +		 */
> +		list = &dev->enq_cbs[qp_id];
> +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> +
> +		while (cb != NULL) {
> +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> +					cb->arg);
> +			cb = cb->next;
> +		};
> +
> +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> +	}
> +#endif
> +	return dev->data->queue_pairs[qp_id];
> +}
> +
> +#define _RTE_CRYPTO_ENQ_FUNC(fn)	_rte_crypto_enq_##fn
> +
> +/**
> + * @internal
> + * Helper macro to create new API wrappers for existing PMD enqueue
> functions.
> + */
> +#define _RTE_CRYPTO_ENQ_PROTO(fn) \
> +	uint16_t _RTE_CRYPTO_ENQ_FUNC(fn)(uint8_t dev_id, uint8_t
> qp_id, \
> +			struct rte_crypto_op **ops, uint16_t nb_ops)
> +
> +/**
> + * @internal
> + * Helper macro to create new API wrappers for existing PMD enqueue
> functions.
> + */
> +#define _RTE_CRYPTO_ENQ_DEF(fn) \
> +_RTE_CRYPTO_ENQ_PROTO(fn) \
> +{ \
> +	void *qp = _rte_cryptodev_enqueue_prolog(dev_id, qp_id, ops,
> nb_ops); \
> +	if (qp == NULL) \
> +		return 0; \
> +	return fn(qp, ops, nb_ops); \
> +}
> +
> +/**
> + * @internal
> + * Helper routine to get enqueue burst function of a given device.
> + *
> + * @param dev_id
> + *  The device identifier of the Crypto device.
> + *
> + * @return
> + *  The function if valid else NULL
> + */
> +__rte_internal
> +rte_crypto_enqueue_burst_t
> +rte_crypto_get_enq_burst_fn(uint8_t dev_id);
> +
> +/**
> + * @internal
> + * Helper routine to get dequeue burst function of a given device.
> + *
> + * @param dev_id
> + *  The device identifier of the Crypto device.
> + *
> + * @return
> + *  The function if valid else NULL
> + */
> +__rte_internal
> +rte_crypto_dequeue_burst_t
> +rte_crypto_get_deq_burst_fn(uint8_t dev_id);
> +
> +/**
> + * @internal
> + * Helper routine to set enqueue burst function of a given device.
> + *
> + * @param dev_id
> + *  The device identifier of the Crypto device.
> + *
> + * @return
> + *  0		Success.
> + *  -EINVAL	Failure if dev_id or fn are in-valid.
> + */
> +__rte_internal
> +int
> +rte_crypto_set_enq_burst_fn(uint8_t dev_id,
> rte_crypto_enqueue_burst_t fn);
> +
> +/**
> + * @internal
> + * Helper routine to set dequeue burst function of a given device.
> + *
> + * @param dev_id
> + *  The device identifier of the Crypto device.
> + *
> + * @return
> + *  0		Success.
> + *  -EINVAL	Failure if dev_id or fn are in-valid.
> + */
> +__rte_internal
> +int
> +rte_crypto_set_deq_burst_fn(uint8_t dev_id,
> rte_crypto_dequeue_burst_t fn);
> +
> +
>  static inline void *
>  get_sym_session_private_data(const struct rte_cryptodev_sym_session
> *sess,
>  		uint8_t driver_id) {
> diff --git a/lib/cryptodev/rte_cryptodev.c b/lib/cryptodev/rte_cryptodev.c
> index 26f8390668..4ab82d21d0 100644
> --- a/lib/cryptodev/rte_cryptodev.c
> +++ b/lib/cryptodev/rte_cryptodev.c
> @@ -44,7 +44,7 @@
> 
>  static uint8_t nb_drivers;
> 
> -static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
> +struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
> 
>  struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
> 
> @@ -1270,6 +1270,44 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id,
> uint16_t queue_pair_id,
>  			socket_id);
>  }
> 
> +rte_crypto_enqueue_burst_t
> +rte_crypto_get_enq_burst_fn(uint8_t dev_id)
> +{
> +	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
> +		rte_errno = EINVAL;
> +		return NULL;
> +	}
> +	return rte_cryptodev_api[dev_id].enqueue_burst;
> +}
> +
> +rte_crypto_dequeue_burst_t
> +rte_crypto_get_deq_burst_fn(uint8_t dev_id)
> +{
> +	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
> +		rte_errno = EINVAL;
> +		return NULL;
> +	}
> +	return rte_cryptodev_api[dev_id].dequeue_burst;
> +}
> +
> +int
> +rte_crypto_set_enq_burst_fn(uint8_t dev_id,
> rte_crypto_enqueue_burst_t fn)
> +{
> +	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
> +		return -EINVAL;
> +	rte_cryptodev_api[dev_id].enqueue_burst = fn;
> +	return 0;
> +}
> +
> +int
> +rte_crypto_set_deq_burst_fn(uint8_t dev_id,
> rte_crypto_dequeue_burst_t fn)
> +{
> +	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
> +		return -EINVAL;
> +	rte_cryptodev_api[dev_id].dequeue_burst = fn;
> +	return 0;
> +}
> +
>  struct rte_cryptodev_cb *
>  rte_cryptodev_add_enq_callback(uint8_t dev_id,
>  			       uint16_t qp_id,
> diff --git a/lib/cryptodev/version.map b/lib/cryptodev/version.map
> index 050089ae55..b64384cc05 100644
> --- a/lib/cryptodev/version.map
> +++ b/lib/cryptodev/version.map
> @@ -116,6 +116,10 @@ EXPERIMENTAL {
>  INTERNAL {
>  	global:
> 
> +	rte_crypto_get_deq_burst_fn;
> +	rte_crypto_get_enq_burst_fn;
> +	rte_crypto_set_deq_burst_fn;
> +	rte_crypto_set_enq_burst_fn;
>  	rte_cryptodev_allocate_driver;
>  	rte_cryptodev_api_reset;
>  	rte_cryptodev_pmd_allocate;
> --
> 2.25.1

Regards,
Fan
  
Akhil Goyal Aug. 31, 2021, 6:14 a.m. UTC | #2
Hi Fan,
> Hi Akhil,
> 
> > +__rte_internal
> > +static inline void *
> > +_rte_cryptodev_dequeue_prolog(uint8_t dev_id, uint8_t qp_id)
> > +{
> > +	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> > +
> > +	return dev->data->queue_pairs[qp_id];
> > +}
> 
> [Fan: the function name looks unclear to me - maybe
> rte_cryptodev_dequeue_prepare?

The naming convention is same as Konstantin did for ethdev and
Subsequently by Pavan in eventdev. I think it is better to align all
With similar naming.

> Also the function didn't do any check as the description suggested - the
> qp is later checked in _RTE_CRYPTO_DEQ_DEF, maybe remove the
> description?]
> 

Ok will update the description in next version

> > +_rte_cryptodev_dequeue_epilog(uint16_t dev_id, uint16_t qp_id,
> > +	struct rte_crypto_op **ops, uint16_t nb_ops)
> > +{
> > +#ifdef RTE_CRYPTO_CALLBACKS
> > +	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
> > +
> > +	if (unlikely(dev->deq_cbs != NULL)) {
> > +		struct rte_cryptodev_cb_rcu *list;
> > +		struct rte_cryptodev_cb *cb;
> > +
> > +		/* __ATOMIC_RELEASE memory order was used when the
> > +		 * call back was inserted into the list.
> > +		 * Since there is a clear dependency between loading
> > +		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory
> > order is
> > +		 * not required.
> > +		 */
> > +		list = &dev->deq_cbs[qp_id];
> > +		rte_rcu_qsbr_thread_online(list->qsbr, 0);
> > +		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
> > +
> > +		while (cb != NULL) {
> > +			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
> > +					cb->arg);
> > +			cb = cb->next;
> > +		};
> > +
> > +		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
> > +	}
> > +#endif
> > +
> > +	return nb_ops;
> > +}
> 
> [Fan: same naming issue - maybe rte_cryptodev_dequeue_post, so
> as the enqueue part]

Same comment as above. Aligned with ethdev and eventdev changes.

> 
> > +#define _RTE_CRYPTO_DEQ_FUNC(fn)	_rte_crypto_deq_##fn
> > +
> > +/**
> > + * @internal
> > + * Helper macro to create new API wrappers for existing PMD dequeue
> > functions.
> > + */
> > +#define _RTE_CRYPTO_DEQ_PROTO(fn) \
> > +	uint16_t _RTE_CRYPTO_DEQ_FUNC(fn)(uint8_t dev_id, uint8_t
> > qp_id, \
> > +			struct rte_crypto_op **ops, uint16_t nb_ops)
> > +
> > +/**
> > + * @internal
> > + * Helper macro to create new API wrappers for existing PMD dequeue
> > functions.
> > + */
> > +#define _RTE_CRYPTO_DEQ_DEF(fn) \
> > +_RTE_CRYPTO_DEQ_PROTO(fn) \
> > +{ \
> > +	void *qp = _rte_cryptodev_dequeue_prolog(dev_id, qp_id); \
> > +	if (qp == NULL) \
> [Fan: suggest to add "unlikely" above]

Ok
  
Fan Zhang Sept. 13, 2021, 2:20 p.m. UTC | #3
> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Sunday, August 29, 2021 1:52 PM
> To: dev@dpdk.org
> Cc: anoobj@marvell.com; Nicolau, Radu <radu.nicolau@intel.com>; Doherty,
> Declan <declan.doherty@intel.com>; hemant.agrawal@nxp.com;
> matan@nvidia.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> thomas@monjalon.net; Zhang, Roy Fan <roy.fan.zhang@intel.com>;
> asomalap@amd.com; ruifeng.wang@arm.com;
> ajit.khaparde@broadcom.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Trahe, Fiona <fiona.trahe@intel.com>;
> adwivedi@marvell.com; michaelsh@marvell.com;
> rnagadheeraj@marvell.com; jianjay.zhou@huawei.com; jerinj@marvell.com;
> Akhil Goyal <gakhil@marvell.com>
> Subject: [PATCH 3/8] cryptodev: add helper functions for new datapath
> interface
> 
> Add helper functions and macros to help drivers to
> transition to new datapath interface.
> 
> Signed-off-by: Akhil Goyal <gakhil@marvell.com>
> ---

Tested-by: Rebecca Troy <rebecca.troy@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
  

Patch

diff --git a/lib/cryptodev/cryptodev_pmd.h b/lib/cryptodev/cryptodev_pmd.h
index eeaea13a23..d40e5cee94 100644
--- a/lib/cryptodev/cryptodev_pmd.h
+++ b/lib/cryptodev/cryptodev_pmd.h
@@ -70,6 +70,13 @@  struct cryptodev_driver {
 	const struct rte_driver *driver;
 	uint8_t id;
 };
+/**
+ * @internal
+ * The pool of *rte_cryptodev* structures. The size of the pool
+ * is configured at compile-time in the <rte_cryptodev.c> file.
+ */
+extern struct rte_cryptodev rte_crypto_devices[];
+
 
 /**
  * Get the rte_cryptodev structure device pointer for the device. Assumes a
@@ -529,6 +536,245 @@  __rte_internal
 void
 rte_cryptodev_api_reset(struct rte_cryptodev_api *api);
 
+/**
+ * @internal
+ * Helper routine for cryptodev_dequeue_burst.
+ * Should be called as first thing on entrance to the PMD's
+ * rte_cryptodev_dequeue_burst implementation.
+ * Does necessary checks and returns pointer to cryptodev identifier.
+ *
+ * @param dev_id
+ *  The device identifier of the crypto device.
+ * @param qp_id
+ *  The index of the queue pair from which processed crypto ops will
+ *  be dequeued.
+ *
+ * @return
+ *  Pointer to device queue pair on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_cryptodev_dequeue_prolog(uint8_t dev_id, uint8_t qp_id)
+{
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+	return dev->data->queue_pairs[qp_id];
+}
+
+/**
+ * @internal
+ * Helper routine for crypto driver dequeue API.
+ * Should be called at exit from PMD's rte_cryptodev_dequeue_burst
+ * implementation.
+ * Does necessary post-processing - invokes RX callbacks if any, tracing, etc.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ * @param qp_id
+ *  The index of the queue pair from which to retrieve input crypto_ops.
+ * @param ops
+ *   The address of an array of pointers to *rte_crypto_op* structures that
+ *   have been retrieved from the device.
+ * @param nb_ops
+ *   The number of ops that were retrieved from the device.
+ *
+ * @return
+ *  The number of crypto ops effectively supplied to the *ops* array.
+ */
+__rte_internal
+static inline uint16_t
+_rte_cryptodev_dequeue_epilog(uint16_t dev_id, uint16_t qp_id,
+	struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+#ifdef RTE_CRYPTO_CALLBACKS
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+	if (unlikely(dev->deq_cbs != NULL)) {
+		struct rte_cryptodev_cb_rcu *list;
+		struct rte_cryptodev_cb *cb;
+
+		/* __ATOMIC_RELEASE memory order was used when the
+		 * call back was inserted into the list.
+		 * Since there is a clear dependency between loading
+		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+		 * not required.
+		 */
+		list = &dev->deq_cbs[qp_id];
+		rte_rcu_qsbr_thread_online(list->qsbr, 0);
+		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+		while (cb != NULL) {
+			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+					cb->arg);
+			cb = cb->next;
+		};
+
+		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+	}
+#endif
+
+	return nb_ops;
+}
+#define _RTE_CRYPTO_DEQ_FUNC(fn)	_rte_crypto_deq_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD dequeue functions.
+ */
+#define _RTE_CRYPTO_DEQ_PROTO(fn) \
+	uint16_t _RTE_CRYPTO_DEQ_FUNC(fn)(uint8_t dev_id, uint8_t qp_id, \
+			struct rte_crypto_op **ops, uint16_t nb_ops)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD dequeue functions.
+ */
+#define _RTE_CRYPTO_DEQ_DEF(fn) \
+_RTE_CRYPTO_DEQ_PROTO(fn) \
+{ \
+	void *qp = _rte_cryptodev_dequeue_prolog(dev_id, qp_id); \
+	if (qp == NULL) \
+		return 0; \
+	nb_ops = fn(qp, ops, nb_ops); \
+	return _rte_cryptodev_dequeue_epilog(dev_id, qp_id, ops, nb_ops); \
+}
+
+/**
+ * @internal
+ * Helper routine for cryptodev_enqueue_burst.
+ * Should be called as first thing on entrance to the PMD's
+ * rte_cryptodev_enqueue_burst implementation.
+ * Does necessary checks and returns pointer to cryptodev queue pair.
+ *
+ * @param dev_id
+ *  The device identifier of the crypto device.
+ * @param qp_id
+ *  The index of the queue pair in which packets will be enqueued.
+ * @param ops
+ *   The address of an array of pointers to *rte_crypto_op* structures that
+ *   will be enqueued to the device.
+ * @param nb_ops
+ *   The number of ops that will be sent to the device.
+ *
+ * @return
+ *  Pointer to device queue pair on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_cryptodev_enqueue_prolog(uint8_t dev_id, uint8_t qp_id,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+#ifdef RTE_CRYPTO_CALLBACKS
+	if (unlikely(dev->enq_cbs != NULL)) {
+		struct rte_cryptodev_cb_rcu *list;
+		struct rte_cryptodev_cb *cb;
+
+		/* __ATOMIC_RELEASE memory order was used when the
+		 * call back was inserted into the list.
+		 * Since there is a clear dependency between loading
+		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+		 * not required.
+		 */
+		list = &dev->enq_cbs[qp_id];
+		rte_rcu_qsbr_thread_online(list->qsbr, 0);
+		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+		while (cb != NULL) {
+			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+					cb->arg);
+			cb = cb->next;
+		};
+
+		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+	}
+#endif
+	return dev->data->queue_pairs[qp_id];
+}
+
+#define _RTE_CRYPTO_ENQ_FUNC(fn)	_rte_crypto_enq_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue functions.
+ */
+#define _RTE_CRYPTO_ENQ_PROTO(fn) \
+	uint16_t _RTE_CRYPTO_ENQ_FUNC(fn)(uint8_t dev_id, uint8_t qp_id, \
+			struct rte_crypto_op **ops, uint16_t nb_ops)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue functions.
+ */
+#define _RTE_CRYPTO_ENQ_DEF(fn) \
+_RTE_CRYPTO_ENQ_PROTO(fn) \
+{ \
+	void *qp = _rte_cryptodev_enqueue_prolog(dev_id, qp_id, ops, nb_ops); \
+	if (qp == NULL) \
+		return 0; \
+	return fn(qp, ops, nb_ops); \
+}
+
+/**
+ * @internal
+ * Helper routine to get enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_crypto_enqueue_burst_t
+rte_crypto_get_enq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_crypto_dequeue_burst_t
+rte_crypto_get_deq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to set enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVAL	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_crypto_set_enq_burst_fn(uint8_t dev_id, rte_crypto_enqueue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVAL	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_crypto_set_deq_burst_fn(uint8_t dev_id, rte_crypto_dequeue_burst_t fn);
+
+
 static inline void *
 get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
 		uint8_t driver_id) {
diff --git a/lib/cryptodev/rte_cryptodev.c b/lib/cryptodev/rte_cryptodev.c
index 26f8390668..4ab82d21d0 100644
--- a/lib/cryptodev/rte_cryptodev.c
+++ b/lib/cryptodev/rte_cryptodev.c
@@ -44,7 +44,7 @@ 
 
 static uint8_t nb_drivers;
 
-static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
+struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
 
 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
 
@@ -1270,6 +1270,44 @@  rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
 			socket_id);
 }
 
+rte_crypto_enqueue_burst_t
+rte_crypto_get_enq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_cryptodev_api[dev_id].enqueue_burst;
+}
+
+rte_crypto_dequeue_burst_t
+rte_crypto_get_deq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_cryptodev_api[dev_id].dequeue_burst;
+}
+
+int
+rte_crypto_set_enq_burst_fn(uint8_t dev_id, rte_crypto_enqueue_burst_t fn)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_cryptodev_api[dev_id].enqueue_burst = fn;
+	return 0;
+}
+
+int
+rte_crypto_set_deq_burst_fn(uint8_t dev_id, rte_crypto_dequeue_burst_t fn)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_cryptodev_api[dev_id].dequeue_burst = fn;
+	return 0;
+}
+
 struct rte_cryptodev_cb *
 rte_cryptodev_add_enq_callback(uint8_t dev_id,
 			       uint16_t qp_id,
diff --git a/lib/cryptodev/version.map b/lib/cryptodev/version.map
index 050089ae55..b64384cc05 100644
--- a/lib/cryptodev/version.map
+++ b/lib/cryptodev/version.map
@@ -116,6 +116,10 @@  EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	rte_crypto_get_deq_burst_fn;
+	rte_crypto_get_enq_burst_fn;
+	rte_crypto_set_deq_burst_fn;
+	rte_crypto_set_enq_burst_fn;
 	rte_cryptodev_allocate_driver;
 	rte_cryptodev_api_reset;
 	rte_cryptodev_pmd_allocate;