[v3,1/2] crypto/scheduler: support DOCSIS security protocol

Message ID 20230914152207.19794-2-david.coyle@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series crypto/scheduler: add support for DOCSIS security protocol |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Coyle, David Sept. 14, 2023, 3:22 p.m. UTC
  Add support to the cryptodev scheduler PMD for the DOCSIS security
protocol. This includes adding the following to the scheduler:
- synchronization of worker's security capabilities
- retrieval of the scheduler's synchronized security capabilities
- retrieval of the security session size i.e. maximum session size
  across all workers
- creation of security sessions on each worker
- deletion of security sessions on each worker

Signed-off-by: David Coyle <david.coyle@intel.com>
Signed-off-by: Kevin O'Sullivan <kevin.osullivan@intel.com>
---
 doc/guides/rel_notes/release_23_11.rst        |   4 +
 drivers/crypto/scheduler/meson.build          |   2 +-
 .../scheduler/rte_cryptodev_scheduler.c       | 221 +++++++++-
 drivers/crypto/scheduler/scheduler_failover.c |  12 +-
 .../crypto/scheduler/scheduler_multicore.c    |  10 +-
 .../scheduler/scheduler_pkt_size_distr.c      |  54 +--
 drivers/crypto/scheduler/scheduler_pmd.c      |  33 ++
 drivers/crypto/scheduler/scheduler_pmd_ops.c  | 381 +++++++++++++-----
 .../crypto/scheduler/scheduler_pmd_private.h  | 159 +++++---
 .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
 10 files changed, 653 insertions(+), 229 deletions(-)
  

Comments

Anoob Joseph Sept. 18, 2023, 11:02 a.m. UTC | #1
Hi David,

Thanks for updating the patches based on the comments provided on previous version. Please see inline for some comments on code.

Thanks,
Anoob

> -----Original Message-----
> From: David Coyle <david.coyle@intel.com>
> Sent: Thursday, September 14, 2023 8:52 PM
> To: dev@dpdk.org
> Cc: kai.ji@intel.com; Anoob Joseph <anoobj@marvell.com>;
> kevin.osullivan@intel.com; David Coyle <david.coyle@intel.com>
> Subject: [EXT] [PATCH v3 1/2] crypto/scheduler: support DOCSIS security
> protocol
> 
> External Email
> 
> ----------------------------------------------------------------------
> Add support to the cryptodev scheduler PMD for the DOCSIS security
> protocol. This includes adding the following to the scheduler:
> - synchronization of worker's security capabilities
> - retrieval of the scheduler's synchronized security capabilities
> - retrieval of the security session size i.e. maximum session size
>   across all workers
> - creation of security sessions on each worker
> - deletion of security sessions on each worker
> 
> Signed-off-by: David Coyle <david.coyle@intel.com>
> Signed-off-by: Kevin O'Sullivan <kevin.osullivan@intel.com>
> ---
>  doc/guides/rel_notes/release_23_11.rst        |   4 +
>  drivers/crypto/scheduler/meson.build          |   2 +-
>  .../scheduler/rte_cryptodev_scheduler.c       | 221 +++++++++-
>  drivers/crypto/scheduler/scheduler_failover.c |  12 +-
>  .../crypto/scheduler/scheduler_multicore.c    |  10 +-
>  .../scheduler/scheduler_pkt_size_distr.c      |  54 +--
>  drivers/crypto/scheduler/scheduler_pmd.c      |  33 ++
>  drivers/crypto/scheduler/scheduler_pmd_ops.c  | 381 +++++++++++++-----
> .../crypto/scheduler/scheduler_pmd_private.h  | 159 +++++---
>  .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
>  10 files changed, 653 insertions(+), 229 deletions(-)
> 

<snip>

> diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> index 258d6f8c43..e8b905af2f 100644
> --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> @@ -5,11 +5,14 @@
>  #include <rte_reorder.h>
>  #include <rte_cryptodev.h>
>  #include <cryptodev_pmd.h>
> +#include <rte_security_driver.h>
>  #include <rte_malloc.h>
> 
>  #include "rte_cryptodev_scheduler.h"
>  #include "scheduler_pmd_private.h"
> 
> +#define MAX_CAPS 256
> +
>  /** update the scheduler pmd's capability with attaching device's
>   *  capability.
>   *  For each device to be attached, the scheduler's capability should be @@ -
> 59,7 +62,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
>  					cap->sym.auth.digest_size.max ?
>  					s_cap->sym.auth.digest_size.max :
>  					cap->sym.auth.digest_size.max;
> -
>  			}
> 
>  			if (s_cap->sym.xform_type ==
> @@ -81,25 +83,176 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
> 
>  		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
>  		sync_nb_caps--;
> +		i--;
>  	}
> 
>  	return sync_nb_caps;
>  }
> 
>  static int
> -update_scheduler_capability(struct scheduler_ctx *sched_ctx)
> +check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
> +		struct rte_security_capability *sec_cap2) {
> +	if (sec_cap1->action != sec_cap2->action ||
> +			sec_cap1->protocol != sec_cap2->protocol ||
> +			sec_cap1->ol_flags != sec_cap2->ol_flags)
> +		return 0;
> +
> +	if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> +		return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,
> +				sizeof(sec_cap1->docsis));
> +	else
> +		return 0;
> +}
> +
> +static void
> +copy_sec_cap(struct rte_security_capability *dst_sec_cap,
> +		struct rte_security_capability *src_sec_cap) {
> +	dst_sec_cap->action = src_sec_cap->action;
> +	dst_sec_cap->protocol = src_sec_cap->protocol;
> +	if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> +		dst_sec_cap->docsis = src_sec_cap->docsis;
> +	dst_sec_cap->ol_flags = src_sec_cap->ol_flags; }
> +
> +static uint32_t
> +sync_sec_crypto_caps(struct rte_cryptodev_capabilities
> *tmp_sec_crypto_caps,
> +		const struct rte_cryptodev_capabilities *sec_crypto_caps,
> +		const struct rte_cryptodev_capabilities
> *worker_sec_crypto_caps) {
> +	uint8_t nb_caps = 0;
> +
> +	nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps,
> sec_crypto_caps);
> +	sync_caps(tmp_sec_crypto_caps, nb_caps,
> worker_sec_crypto_caps);
> +
> +	return nb_caps;
> +}
> +
> +/** update the scheduler pmd's security capability with attaching
> +device's
> + *  security capability.
> + *  For each device to be attached, the scheduler's security capability
> +should
> + *  be the common capability set of all workers  **/ static uint32_t
> +sync_sec_caps(uint32_t worker_idx,
> +		struct rte_security_capability *sec_caps,
> +		struct rte_cryptodev_capabilities
> sec_crypto_caps[][MAX_CAPS],
> +		uint32_t nb_sec_caps,
> +		const struct rte_security_capability *worker_sec_caps)
>  {
> -	struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
> -	uint32_t nb_caps = 0, i;
> +	uint32_t nb_worker_sec_caps = 0, i;
> +
> +	if (worker_sec_caps == NULL)
> +		return 0;
> +
> +	while (worker_sec_caps[nb_worker_sec_caps].action !=
> +
> 	RTE_SECURITY_ACTION_TYPE_NONE)
> +		nb_worker_sec_caps++;
> +
> +	/* Handle first worker */
> +	if (worker_idx == 0) {
> +		uint32_t nb_worker_sec_crypto_caps = 0;
> +		uint32_t nb_worker_supp_sec_caps = 0;
> +
> +		for (i = 0; i < nb_worker_sec_caps; i++) {
> +			/* Check for supported security protocols */
> +			if
> (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,
> +					worker_sec_caps[i].protocol))
> +				continue;
> 
> -	if (sched_ctx->capabilities) {
> -		rte_free(sched_ctx->capabilities);
> -		sched_ctx->capabilities = NULL;
> +			sec_caps[nb_worker_supp_sec_caps] =
> worker_sec_caps[i];
> +
> +			while (worker_sec_caps[i].crypto_capabilities[
> +					nb_worker_sec_crypto_caps].op !=
> +
> 	RTE_CRYPTO_OP_TYPE_UNDEFINED)
> +				nb_worker_sec_crypto_caps++;
> +
> +
> 	rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],
> +				&worker_sec_caps[i].crypto_capabilities[0],

[Anoob] Isn't it possible to have 2 different security devices which may differ in crypto capabilities? My understanding is, the code assumes that crypto capability of both devices would match. It's okay to document it as a known limitation if it is too difficult to solve.

> +
> 	sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *
> +					nb_worker_sec_crypto_caps);
> +
> +			nb_worker_supp_sec_caps++;
> +		}
> +		return nb_worker_supp_sec_caps;
>  	}
> 

<snip>

> diff --git a/drivers/crypto/scheduler/scheduler_pmd.c
> b/drivers/crypto/scheduler/scheduler_pmd.c
> index 4e8bbf0e09..6dad9bc3dd 100644
> --- a/drivers/crypto/scheduler/scheduler_pmd.c
> +++ b/drivers/crypto/scheduler/scheduler_pmd.c
> @@ -8,6 +8,7 @@
>  #include <rte_hexdump.h>
>  #include <rte_cryptodev.h>
>  #include <cryptodev_pmd.h>
> +#include <rte_security_driver.h>
>  #include <bus_vdev_driver.h>
>  #include <rte_malloc.h>
>  #include <rte_cpuflags.h>
> @@ -233,6 +234,35 @@ cryptodev_scheduler_create(const char *name,
>  		return -ENOMEM;
>  	}
> 
> +	struct rte_security_ctx *security_instance;
> +	security_instance = rte_zmalloc_socket(NULL,
> +					sizeof(struct rte_security_ctx),
> +					RTE_CACHE_LINE_SIZE,
> SOCKET_ID_ANY);
> +	if (security_instance == NULL) {
> +		CR_SCHED_LOG(ERR, "rte_security_ctx memory alloc
> failed");
> +		return -ENOMEM;

[Anoob] The lines above this adds regular cryptodev capabilities. Don't we need to free that as well? 

> +	}
> +
> +	security_instance->device = (void *)dev;
> +	security_instance->ops = rte_crypto_scheduler_pmd_sec_ops;
> +	security_instance->sess_cnt = 0;
> +	dev->security_ctx = security_instance;
> +
> +	/*
> +	 * Initialize security capabilities structure as an empty structure,
> +	 * in case device information is requested when no workers are
> attached
> +	 */
> +	sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
> +					sizeof(struct rte_security_capability),
> +					0, SOCKET_ID_ANY);
> +
> +	if (!sched_ctx->sec_capabilities) {
> +		rte_free(security_instance);
> +		CR_SCHED_LOG(ERR, "Not enough memory for security
> capability "
> +				"information");
> +		return -ENOMEM;
> +	}
> +
>  	rte_cryptodev_pmd_probing_finish(dev);
> 
>  	return 0;
> @@ -263,6 +293,9 @@ cryptodev_scheduler_remove(struct
> rte_vdev_device *vdev)
>  					sched_ctx->workers[i].dev_id);
>  	}
> 
> +	rte_free(dev->security_ctx);
> +	dev->security_ctx = NULL;
> +
>  	return rte_cryptodev_pmd_destroy(dev);  }
> 
> diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c
> b/drivers/crypto/scheduler/scheduler_pmd_ops.c
> index 294aab4452..34d20ee2de 100644
> --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
> +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
> @@ -8,11 +8,212 @@
>  #include <dev_driver.h>
>  #include <rte_cryptodev.h>
>  #include <cryptodev_pmd.h>
> +#include <rte_security_driver.h>
>  #include <rte_reorder.h>
>  #include <rte_errno.h>
> 
>  #include "scheduler_pmd_private.h"
> 
> +struct scheduler_configured_sess_info {
> +	uint8_t dev_id;
> +	uint8_t driver_id;
> +	union {
> +		struct rte_cryptodev_sym_session *sess;
> +		struct {
> +			struct rte_security_session *sec_sess;
> +			struct rte_security_ctx *sec_ctx;
> +		};
> +	};
> +};
> +
> +static int
> +scheduler_session_create(void *sess, void *sess_params,
> +		struct scheduler_ctx *sched_ctx,
> +		enum rte_crypto_op_sess_type session_type) {
> +	struct rte_mempool *mp = rte_mempool_from_obj(sess);
> +	struct scheduler_session_ctx *sess_ctx;
> +	struct scheduler_configured_sess_info configured_sess[
> +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> = {{0}};
> +	uint32_t i, j, n_configured_sess = 0;
> +	int ret = 0;
> +
> +	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> +		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
> +				(struct rte_cryptodev_sym_session *)sess);
> +	else
> +		sess_ctx = SECURITY_GET_SESS_PRIV(
> +				(struct rte_security_session *)sess);
> +
> +	if (mp == NULL)
> +		return -EINVAL;
> +
> +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> +		struct scheduler_worker *worker = &sched_ctx->workers[i];
> +		struct rte_cryptodev *dev = &rte_cryptodevs[worker-
> >dev_id];
> +		uint8_t next_worker = 0;
> +
> +		for (j = 0; j < n_configured_sess; j++) {
> +			if (configured_sess[j].driver_id == worker-
> >driver_id) {
> +				if (session_type ==
> RTE_CRYPTO_OP_WITH_SESSION)
> +					sess_ctx->worker_sess[i] =
> +						configured_sess[j].sess;
> +				else
> +					sess_ctx->worker_sec_sess[i] =
> +						configured_sess[j].sec_sess;
> +
> +				next_worker = 1;
> +				break;
> +			}
> +		}
> +		if (next_worker)
> +			continue;
> +
> +		if (rte_mempool_avail_count(mp) == 0) {
> +			ret = -ENOMEM;
> +			goto error_exit;
> +		}
> +
> +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			struct rte_cryptodev_sym_session *worker_sess =
> +				rte_cryptodev_sym_session_create(worker-
> >dev_id,
> +						(struct
> rte_crypto_sym_xform *)

[Anoob] Is this cast required?

> +						sess_params, mp);
> +
> +			if (worker_sess == NULL) {
> +				ret = -rte_errno;
> +				goto error_exit;
> +			}
> +
> +			worker_sess->opaque_data = (uint64_t)sess;
> +			sess_ctx->worker_sess[i] = worker_sess;
> +			configured_sess[n_configured_sess].sess =
> worker_sess;
> +		} else {
> +			struct rte_security_session *worker_sess =
> +				rte_security_session_create(dev-
> >security_ctx,
> +					(struct rte_security_session_conf *)
> +					sess_params, mp);
> +
> +			if (worker_sess == NULL) {
> +				ret = -rte_errno;
> +				goto error_exit;
> +			}
> +
> +			worker_sess->opaque_data = (uint64_t)sess;
> +			sess_ctx->worker_sec_sess[i] = worker_sess;
> +			configured_sess[n_configured_sess].sec_sess =
> +							worker_sess;
> +			configured_sess[n_configured_sess].sec_ctx =
> +							dev->security_ctx;
> +		}
> +
> +		configured_sess[n_configured_sess].driver_id =
> +							worker->driver_id;
> +		configured_sess[n_configured_sess].dev_id = worker-
> >dev_id;
> +		n_configured_sess++;
> +	}
> +
> +	return 0;
> +
> +error_exit:
> +	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
> +	for (i = 0; i < n_configured_sess; i++) {
> +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> +			rte_cryptodev_sym_session_free(
> +						configured_sess[i].dev_id,
> +						configured_sess[i].sess);
> +		else
> +			rte_security_session_destroy(
> +						configured_sess[i].sec_ctx,
> +						configured_sess[i].sec_sess);
> +	}
> +
> +	return ret;
> +}
> +
> +static void
> +scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
> +		uint8_t session_type)
> +{
> +	struct scheduler_session_ctx *sess_ctx;
> +	struct scheduler_configured_sess_info deleted_sess[
> +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> = {{0}};
> +	uint32_t i, j, n_deleted_sess = 0;
> +
> +	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> +		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
> +				(struct rte_cryptodev_sym_session *)sess);
> +	else
> +		sess_ctx = SECURITY_GET_SESS_PRIV(
> +				(struct rte_security_session *)sess);
> +
> +	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
> +		CR_SCHED_LOG(WARNING,
> +			"Worker updated between session
> creation/deletion. "
> +			"The session may not be freed fully.");
> +	}
> +
> +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> +		struct scheduler_worker *worker = &sched_ctx->workers[i];
> +		struct rte_cryptodev *dev = &rte_cryptodevs[worker-
> >dev_id];
> +		uint8_t next_worker = 0;
> +
> +		for (j = 0; j < n_deleted_sess; j++) {
> +			if (deleted_sess[j].driver_id == worker->driver_id) {
> +				if (session_type ==
> RTE_CRYPTO_OP_WITH_SESSION)
> +					sess_ctx->worker_sess[i] = NULL;
> +				else
> +					sess_ctx->worker_sec_sess[i] =
> NULL;
> +
> +				next_worker = 1;
> +				break;
> +			}
> +		}
> +		if (next_worker)
> +			continue;
> +
> +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			rte_cryptodev_sym_session_free(worker->dev_id,
> +						sess_ctx->worker_sess[i]);
> +			sess_ctx->worker_sess[i] = NULL;
> +		} else {
> +			rte_security_session_destroy(dev->security_ctx,
> +						sess_ctx-
> >worker_sec_sess[i]);
> +			sess_ctx->worker_sec_sess[i] = NULL;
> +		}
> +
> +		deleted_sess[n_deleted_sess++].driver_id = worker-
> >driver_id;
> +	}
> +}
> +
> +static unsigned int
> +scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
> +		uint8_t session_type)
> +{
> +	uint8_t i = 0;
> +	uint32_t max_priv_sess_size = 0;
> +
> +	/* Check what is the maximum private session size for all workers */
> +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> +		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
> +		struct rte_cryptodev *dev =
> &rte_cryptodevs[worker_dev_id];
> +		struct rte_security_ctx *sec_ctx = dev->security_ctx;
> +		uint32_t priv_sess_size = 0;
> +
> +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +			priv_sess_size =
> +				(*dev->dev_ops-
> >sym_session_get_size)(dev);
> +		} else {
> +			priv_sess_size = (*sec_ctx->ops-
> >session_get_size)(dev);
> +		}
> +
> +		if (max_priv_sess_size < priv_sess_size)
> +			max_priv_sess_size = priv_sess_size;

[Anoob] Should we use RTE_MAX?

> +	}
> +
> +	return max_priv_sess_size;
> +}
> +
>  /** attaching the workers predefined by scheduler's EAL options */  static
> int  scheduler_attach_init_worker(struct rte_cryptodev *dev) @@ -265,10
> +466,7 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
>  		sched_ctx->private_ctx = NULL;
>  	}
> 
> -	if (sched_ctx->capabilities) {
> -		rte_free(sched_ctx->capabilities);
> -		sched_ctx->capabilities = NULL;
> -	}
> +	scheduler_free_capabilities(sched_ctx);
> 
>  	return 0;
>  }
> @@ -451,92 +649,22 @@ scheduler_pmd_qp_setup(struct rte_cryptodev
> *dev, uint16_t qp_id,  }
> 
>  static uint32_t
> -scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev
> __rte_unused)
> +scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
>  {
>  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> -	uint8_t i = 0;
> -	uint32_t max_priv_sess_size = 0;
> -
> -	/* Check what is the maximum private session size for all workers */
> -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> -		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
> -		struct rte_cryptodev *dev =
> &rte_cryptodevs[worker_dev_id];
> -		uint32_t priv_sess_size = (*dev->dev_ops-
> >sym_session_get_size)(dev);
> 
> -		if (max_priv_sess_size < priv_sess_size)
> -			max_priv_sess_size = priv_sess_size;
> -	}
> -
> -	return max_priv_sess_size;
> +	return scheduler_session_size_get(sched_ctx,
> +RTE_CRYPTO_OP_WITH_SESSION);
>  }
> 
> -struct scheduler_configured_sess_info {
> -	uint8_t dev_id;
> -	uint8_t driver_id;
> -	struct rte_cryptodev_sym_session *sess;
> -};
> -
>  static int
>  scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
>  	struct rte_crypto_sym_xform *xform,
>  	struct rte_cryptodev_sym_session *sess)  {
>  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> -	struct rte_mempool *mp = rte_mempool_from_obj(sess);
> -	struct scheduler_session_ctx *sess_ctx =
> CRYPTODEV_GET_SYM_SESS_PRIV(sess);
> -	struct scheduler_configured_sess_info configured_sess[
> -			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> = {{0}};
> -	uint32_t i, j, n_configured_sess = 0;
> -	int ret = 0;
> -
> -	if (mp == NULL)
> -		return -EINVAL;
> 
> -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> -		struct scheduler_worker *worker = &sched_ctx->workers[i];
> -		struct rte_cryptodev_sym_session *worker_sess;
> -		uint8_t next_worker = 0;
> -
> -		for (j = 0; j < n_configured_sess; j++) {
> -			if (configured_sess[j].driver_id ==
> -					worker->driver_id) {
> -				sess_ctx->worker_sess[i] =
> -					configured_sess[j].sess;
> -				next_worker = 1;
> -				break;
> -			}
> -		}
> -		if (next_worker)
> -			continue;
> -
> -		if (rte_mempool_avail_count(mp) == 0) {
> -			ret = -ENOMEM;
> -			goto error_exit;
> -		}
> -
> -		worker_sess = rte_cryptodev_sym_session_create(worker-
> >dev_id,
> -			xform, mp);
> -		if (worker_sess == NULL) {
> -			ret = -rte_errno;
> -			goto error_exit;
> -		}
> -
> -		worker_sess->opaque_data = (uint64_t)sess;
> -		sess_ctx->worker_sess[i] = worker_sess;
> -		configured_sess[n_configured_sess].driver_id =
> -			worker->driver_id;
> -		configured_sess[n_configured_sess].dev_id = worker-
> >dev_id;
> -		configured_sess[n_configured_sess].sess = worker_sess;
> -		n_configured_sess++;
> -	}
> -
> -	return 0;
> -error_exit:
> -	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
> -	for (i = 0; i < n_configured_sess; i++)
> -
> 	rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
> -			configured_sess[i].sess);
> -	return ret;
> +	return scheduler_session_create((void *)sess, (void *)xform,
> sched_ctx,
> +				RTE_CRYPTO_OP_WITH_SESSION);
>  }
> 
>  /** Clear the memory of session so it doesn't leave key material behind */
> @@ -545,37 +673,9 @@ scheduler_pmd_sym_session_clear(struct
> rte_cryptodev *dev,
>  		struct rte_cryptodev_sym_session *sess)  {
>  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> -	struct scheduler_session_ctx *sess_ctx =
> CRYPTODEV_GET_SYM_SESS_PRIV(sess);
> -	struct scheduler_configured_sess_info deleted_sess[
> -			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> = {{0}};
> -	uint32_t i, j, n_deleted_sess = 0;
> -
> -	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
> -		CR_SCHED_LOG(WARNING,
> -			"Worker updated between session
> creation/deletion. "
> -			"The session may not be freed fully.");
> -	}
> -
> -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> -		struct scheduler_worker *worker = &sched_ctx->workers[i];
> -		uint8_t next_worker = 0;
> 
> -		for (j = 0; j < n_deleted_sess; j++) {
> -			if (deleted_sess[j].driver_id == worker->driver_id) {
> -				sess_ctx->worker_sess[i] = NULL;
> -				next_worker = 1;
> -				break;
> -			}
> -		}
> -		if (next_worker)
> -			continue;
> -
> -		rte_cryptodev_sym_session_free(worker->dev_id,
> -			sess_ctx->worker_sess[i]);
> -
> -		deleted_sess[n_deleted_sess++].driver_id = worker-
> >driver_id;
> -		sess_ctx->worker_sess[i] = NULL;
> -	}
> +	scheduler_session_destroy((void *)sess, sched_ctx,
> +				RTE_CRYPTO_OP_WITH_SESSION);
>  }
> 
>  static struct rte_cryptodev_ops scheduler_pmd_ops = { @@ -598,3 +698,68
> @@ static struct rte_cryptodev_ops scheduler_pmd_ops = {  };
> 
>  struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops =
> &scheduler_pmd_ops;
> +
> +/** Configure a scheduler session from a security session configuration
> +*/ static int scheduler_pmd_sec_sess_create(void *dev, struct
> +rte_security_session_conf *conf,
> +			struct rte_security_session *sess)
> +{
> +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;

[Anoob] Is this cast required?

> +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> +
> +	/* Check for supported security protocols */
> +	if (!scheduler_check_sec_proto_supp(conf->action_type, conf-
> >protocol)) {
> +		CR_SCHED_LOG(ERR, "Unsupported security protocol");
> +		return -ENOTSUP;
> +	}
> +
> +	return scheduler_session_create((void *)sess, (void *)conf,
> sched_ctx,
> +				RTE_CRYPTO_OP_SECURITY_SESSION);
> +}
> +
> +/** Clear the memory of session so it doesn't leave key material behind
> +*/ static int scheduler_pmd_sec_sess_destroy(void *dev,
> +			       struct rte_security_session *sess) {
> +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;

[Anoob] Is this cast required?

> +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> +
> +	scheduler_session_destroy((void *)sess, sched_ctx,
> +				RTE_CRYPTO_OP_SECURITY_SESSION);
> +
> +	return 0;
> +}
> +
> +/** Get sync security capabilities for scheduler pmds */ static const
> +struct rte_security_capability * scheduler_pmd_sec_capa_get(void *dev)
> +{
> +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;

[Anoob] Is this cast required?

> +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> +
> +	return sched_ctx->sec_capabilities;
> +}
> +
> +static unsigned int
> +scheduler_pmd_sec_sess_size_get(void *dev) {
> +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;

[Anoob] Is this cast required?

> +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> +
> +	return scheduler_session_size_get(sched_ctx,
> +				RTE_CRYPTO_OP_SECURITY_SESSION);
> +}
> +
> +static struct rte_security_ops scheduler_pmd_sec_ops = {
> +		.session_create = scheduler_pmd_sec_sess_create,
> +		.session_update = NULL,
> +		.session_get_size = scheduler_pmd_sec_sess_size_get,
> +		.session_stats_get = NULL,
> +		.session_destroy = scheduler_pmd_sec_sess_destroy,
> +		.set_pkt_metadata = NULL,
> +		.capabilities_get = scheduler_pmd_sec_capa_get };
> +
> +struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
> +
> 	&scheduler_pmd_sec_ops;
> diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h
> b/drivers/crypto/scheduler/scheduler_pmd_private.h
> index 36d0bb6307..ff1e7a83e8 100644
> --- a/drivers/crypto/scheduler/scheduler_pmd_private.h
> +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
> @@ -5,6 +5,8 @@
>  #ifndef _SCHEDULER_PMD_PRIVATE_H
>  #define _SCHEDULER_PMD_PRIVATE_H
> 
> +#include <rte_security_driver.h>
> +
>  #include "rte_cryptodev_scheduler.h"
> 
>  #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
> @@ -30,7 +32,8 @@ struct scheduler_ctx {
>  	/**< private scheduler context pointer */
> 
>  	struct rte_cryptodev_capabilities *capabilities;
> -	uint32_t nb_capabilities;
> +	struct rte_security_capability *sec_capabilities;
> +	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
> 
>  	uint32_t max_nb_queue_pairs;
> 
> @@ -64,8 +67,12 @@ struct scheduler_qp_ctx {
> 
>  struct scheduler_session_ctx {
>  	uint32_t ref_cnt;
> -	struct rte_cryptodev_sym_session *worker_sess[
> -		RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> +	union {
> +		struct rte_cryptodev_sym_session *worker_sess[
> +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> +		struct rte_security_session *worker_sec_sess[
> +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> +	};
>  };
> 
>  extern uint8_t cryptodev_scheduler_driver_id; @@ -108,7 +115,22 @@
> scheduler_order_drain(struct rte_ring *order_ring,  }
> 
>  static __rte_always_inline void
> -scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t
> nb_ops,
> +scheduler_set_single_worker_session(struct rte_crypto_op *op,
> +		uint8_t worker_idx)
> +{
> +	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +		struct scheduler_session_ctx *sess_ctx =
> +				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym-
> >session);
> +		op->sym->session = sess_ctx->worker_sess[worker_idx];
> +	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +		struct scheduler_session_ctx *sess_ctx =
> +				SECURITY_GET_SESS_PRIV(op->sym-
> >session);
> +		op->sym->session = sess_ctx-
> >worker_sec_sess[worker_idx];
> +	}
> +}
> +
> +static __rte_always_inline void
> +scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t
> +nb_ops,
>  		uint8_t worker_index)
>  {
>  	struct rte_crypto_op **op = ops;
> @@ -129,52 +151,34 @@ scheduler_set_worker_session(struct
> rte_crypto_op **ops, uint16_t nb_ops,
>  			rte_prefetch0(op[7]->sym->session);
>  		}
> 
> -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> -			struct scheduler_session_ctx *sess_ctx =
> -				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]-
> >sym->session);
> -			op[0]->sym->session =
> -				sess_ctx->worker_sess[worker_index];
> -		}
> -
> -		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> -			struct scheduler_session_ctx *sess_ctx =
> -				CRYPTODEV_GET_SYM_SESS_PRIV(op[1]-
> >sym->session);
> -			op[1]->sym->session =
> -				sess_ctx->worker_sess[worker_index];
> -		}
> -
> -		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> -			struct scheduler_session_ctx *sess_ctx =
> -				CRYPTODEV_GET_SYM_SESS_PRIV(op[2]-
> >sym->session);
> -			op[2]->sym->session =
> -				sess_ctx->worker_sess[worker_index];
> -		}
> -
> -		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> -			struct scheduler_session_ctx *sess_ctx =
> -				CRYPTODEV_GET_SYM_SESS_PRIV(op[3]-
> >sym->session);
> -			op[3]->sym->session =
> -				sess_ctx->worker_sess[worker_index];
> -		}
> +		scheduler_set_single_worker_session(op[0],
> worker_index);
> +		scheduler_set_single_worker_session(op[1],
> worker_index);
> +		scheduler_set_single_worker_session(op[2],
> worker_index);
> +		scheduler_set_single_worker_session(op[3],
> worker_index);
> 
>  		op += 4;
>  		n -= 4;
>  	}
> 
>  	while (n--) {
> -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> -			struct scheduler_session_ctx *sess_ctx =
> -				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]-
> >sym->session);
> -
> -			op[0]->sym->session =
> -				sess_ctx->worker_sess[worker_index];
> -			op++;
> -		}
> +		scheduler_set_single_worker_session(op[0],
> worker_index);
> +		op++;
>  	}
>  }
> 
>  static __rte_always_inline void
> -scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)
> +scheduler_retrieve_single_session(struct rte_crypto_op *op) {
> +	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> +		op->sym->session = (void *)(uintptr_t)
> +			rte_cryptodev_sym_session_opaque_data_get(op-
> >sym->session);
> +	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
> +		op->sym->session = (void *)(uintptr_t)
> +			rte_security_session_opaque_data_get(op->sym-
> >session);
> +}
> +
> +static __rte_always_inline void
> +scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t
> +nb_ops)
>  {
>  	uint16_t n = nb_ops;
>  	struct rte_crypto_op **op = ops;
> @@ -194,32 +198,77 @@ scheduler_retrieve_session(struct rte_crypto_op
> **ops, uint16_t nb_ops)
>  			rte_prefetch0(op[7]->sym->session);
>  		}
> 
> -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -			op[0]->sym->session = (void *)(uintptr_t)
> -
> 	rte_cryptodev_sym_session_opaque_data_get(op[0]->sym-
> >session);
> -		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -			op[1]->sym->session = (void *)(uintptr_t)
> -
> 	rte_cryptodev_sym_session_opaque_data_get(op[1]->sym-
> >session);
> -		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -			op[2]->sym->session = (void *)(uintptr_t)
> -
> 	rte_cryptodev_sym_session_opaque_data_get(op[2]->sym-
> >session);
> -		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -			op[3]->sym->session = (void *)(uintptr_t)
> -
> 	rte_cryptodev_sym_session_opaque_data_get(op[3]->sym-
> >session);
> +		scheduler_retrieve_single_session(op[0]);
> +		scheduler_retrieve_single_session(op[1]);
> +		scheduler_retrieve_single_session(op[2]);
> +		scheduler_retrieve_single_session(op[3]);
> 
>  		op += 4;
>  		n -= 4;
>  	}
> 
>  	while (n--) {
> -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> -			op[0]->sym->session = (void *)(uintptr_t)
> -
> 	rte_cryptodev_sym_session_opaque_data_get(op[0]->sym-
> >session);
> +		scheduler_retrieve_single_session(op[0]);
>  		op++;
>  	}
>  }
> 
> +static __rte_always_inline uint32_t
> +scheduler_get_job_len(struct rte_crypto_op *op) {
> +	uint32_t job_len;
> +
> +	/* op_len is initialized as cipher data length, if
> +	 * it is 0, then it is set to auth data length
> +	 */
> +	job_len = op->sym->cipher.data.length;
> +	job_len += (op->sym->cipher.data.length == 0) *
> +					op->sym->auth.data.length;
> +
> +	return job_len;
> +}
> +
> +static __rte_always_inline void
> +scheduler_free_capabilities(struct scheduler_ctx *sched_ctx) {
> +	uint32_t i;
> +
> +	if (sched_ctx->capabilities) {
> +		rte_free(sched_ctx->capabilities);
> +		sched_ctx->capabilities = NULL;
> +	}
> +
> +	if (sched_ctx->sec_crypto_capabilities) {
> +		i = 0;
> +		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
> +			rte_free(sched_ctx->sec_crypto_capabilities[i]);
> +			sched_ctx->sec_crypto_capabilities[i] = NULL;
> +			i++;
> +		}
> +
> +		rte_free(sched_ctx->sec_crypto_capabilities);
> +		sched_ctx->sec_crypto_capabilities = NULL;
> +	}
> +
> +	if (sched_ctx->sec_capabilities) {
> +		rte_free(sched_ctx->sec_capabilities);
> +		sched_ctx->sec_capabilities = NULL;
> +	}
> +}
> +
> +static __rte_always_inline int
> +scheduler_check_sec_proto_supp(enum
> rte_security_session_action_type action,
> +		enum rte_security_session_protocol protocol) {
> +	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
> &&
> +			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> +		return 1;
> +
> +	return 0;
> +}
> +
>  /** device specific operations function pointer structure */  extern struct
> rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
> +extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
> 
>  #endif /* _SCHEDULER_PMD_PRIVATE_H */
> diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c
> b/drivers/crypto/scheduler/scheduler_roundrobin.c
> index ad3f8b842a..08041887a8 100644
> --- a/drivers/crypto/scheduler/scheduler_roundrobin.c
> +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
> @@ -28,11 +28,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op
> **ops, uint16_t nb_ops)
>  	if (unlikely(nb_ops == 0))
>  		return 0;
> 
> -	scheduler_set_worker_session(ops, nb_ops, worker_idx);
> +	scheduler_set_worker_sessions(ops, nb_ops, worker_idx);
>  	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
>  			worker->qp_id, ops, nb_ops);
>  	if (processed_ops < nb_ops)
> -		scheduler_retrieve_session(ops + processed_ops,
> +		scheduler_retrieve_sessions(ops + processed_ops,
>  			nb_ops - processed_ops);
> 
>  	worker->nb_inflight_cops += processed_ops; @@ -87,7 +87,7 @@
> schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
> 
>  	nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
>  			worker->qp_id, ops, nb_ops);
> -	scheduler_retrieve_session(ops, nb_deq_ops);
> +	scheduler_retrieve_sessions(ops, nb_deq_ops);
>  	last_worker_idx += 1;
>  	last_worker_idx %= rr_qp_ctx->nb_workers;
> 
> --
> 2.25.1
  
Coyle, David Sept. 19, 2023, 2:16 p.m. UTC | #2
Hi Anoob,

Thank you for the comments.
See inline below for replies.

Regards,
David

> -----Original Message-----
> From: Anoob Joseph <anoobj@marvell.com>
> Sent: Monday, September 18, 2023 12:03 PM
> To: Coyle, David <david.coyle@intel.com>
> Cc: Ji, Kai <kai.ji@intel.com>; O'Sullivan, Kevin <kevin.osullivan@intel.com>;
> dev@dpdk.org; Jerin Jacob Kollanukkaran <jerinj@marvell.com>
> Subject: RE: [EXT] [PATCH v3 1/2] crypto/scheduler: support DOCSIS security
> protocol
> 
> Hi David,
> 
> Thanks for updating the patches based on the comments provided on
> previous version. Please see inline for some comments on code.
> 
> Thanks,
> Anoob
> 
> > -----Original Message-----
> > From: David Coyle <david.coyle@intel.com>
> > Sent: Thursday, September 14, 2023 8:52 PM
> > To: dev@dpdk.org
> > Cc: kai.ji@intel.com; Anoob Joseph <anoobj@marvell.com>;
> > kevin.osullivan@intel.com; David Coyle <david.coyle@intel.com>
> > Subject: [EXT] [PATCH v3 1/2] crypto/scheduler: support DOCSIS
> > security protocol
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> > Add support to the cryptodev scheduler PMD for the DOCSIS security
> > protocol. This includes adding the following to the scheduler:
> > - synchronization of worker's security capabilities
> > - retrieval of the scheduler's synchronized security capabilities
> > - retrieval of the security session size i.e. maximum session size
> >   across all workers
> > - creation of security sessions on each worker
> > - deletion of security sessions on each worker
> >
> > Signed-off-by: David Coyle <david.coyle@intel.com>
> > Signed-off-by: Kevin O'Sullivan <kevin.osullivan@intel.com>
> > ---
> >  doc/guides/rel_notes/release_23_11.rst        |   4 +
> >  drivers/crypto/scheduler/meson.build          |   2 +-
> >  .../scheduler/rte_cryptodev_scheduler.c       | 221 +++++++++-
> >  drivers/crypto/scheduler/scheduler_failover.c |  12 +-
> >  .../crypto/scheduler/scheduler_multicore.c    |  10 +-
> >  .../scheduler/scheduler_pkt_size_distr.c      |  54 +--
> >  drivers/crypto/scheduler/scheduler_pmd.c      |  33 ++
> >  drivers/crypto/scheduler/scheduler_pmd_ops.c  | 381
> > +++++++++++++----- .../crypto/scheduler/scheduler_pmd_private.h  | 159
> +++++---
> >  .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
> >  10 files changed, 653 insertions(+), 229 deletions(-)
> >
> 
> <snip>
> 
> > diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> > b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> > index 258d6f8c43..e8b905af2f 100644
> > --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> > +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
> > @@ -5,11 +5,14 @@
> >  #include <rte_reorder.h>
> >  #include <rte_cryptodev.h>
> >  #include <cryptodev_pmd.h>
> > +#include <rte_security_driver.h>
> >  #include <rte_malloc.h>
> >
> >  #include "rte_cryptodev_scheduler.h"
> >  #include "scheduler_pmd_private.h"
> >
> > +#define MAX_CAPS 256
> > +
> >  /** update the scheduler pmd's capability with attaching device's
> >   *  capability.
> >   *  For each device to be attached, the scheduler's capability should
> > be @@ -
> > 59,7 +62,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
> >  					cap->sym.auth.digest_size.max ?
> >  					s_cap->sym.auth.digest_size.max :
> >  					cap->sym.auth.digest_size.max;
> > -
> >  			}
> >
> >  			if (s_cap->sym.xform_type ==
> > @@ -81,25 +83,176 @@ sync_caps(struct rte_cryptodev_capabilities
> > *caps,
> >
> >  		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
> >  		sync_nb_caps--;
> > +		i--;
> >  	}
> >
> >  	return sync_nb_caps;
> >  }
> >
> >  static int
> > -update_scheduler_capability(struct scheduler_ctx *sched_ctx)
> > +check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
> > +		struct rte_security_capability *sec_cap2) {
> > +	if (sec_cap1->action != sec_cap2->action ||
> > +			sec_cap1->protocol != sec_cap2->protocol ||
> > +			sec_cap1->ol_flags != sec_cap2->ol_flags)
> > +		return 0;
> > +
> > +	if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> > +		return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,
> > +				sizeof(sec_cap1->docsis));
> > +	else
> > +		return 0;
> > +}
> > +
> > +static void
> > +copy_sec_cap(struct rte_security_capability *dst_sec_cap,
> > +		struct rte_security_capability *src_sec_cap) {
> > +	dst_sec_cap->action = src_sec_cap->action;
> > +	dst_sec_cap->protocol = src_sec_cap->protocol;
> > +	if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> > +		dst_sec_cap->docsis = src_sec_cap->docsis;
> > +	dst_sec_cap->ol_flags = src_sec_cap->ol_flags; }
> > +
> > +static uint32_t
> > +sync_sec_crypto_caps(struct rte_cryptodev_capabilities
> > *tmp_sec_crypto_caps,
> > +		const struct rte_cryptodev_capabilities *sec_crypto_caps,
> > +		const struct rte_cryptodev_capabilities
> > *worker_sec_crypto_caps) {
> > +	uint8_t nb_caps = 0;
> > +
> > +	nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps,
> > sec_crypto_caps);
> > +	sync_caps(tmp_sec_crypto_caps, nb_caps,
> > worker_sec_crypto_caps);
> > +
> > +	return nb_caps;
> > +}
> > +
> > +/** update the scheduler pmd's security capability with attaching
> > +device's
> > + *  security capability.
> > + *  For each device to be attached, the scheduler's security
> > +capability should
> > + *  be the common capability set of all workers  **/ static uint32_t
> > +sync_sec_caps(uint32_t worker_idx,
> > +		struct rte_security_capability *sec_caps,
> > +		struct rte_cryptodev_capabilities
> > sec_crypto_caps[][MAX_CAPS],
> > +		uint32_t nb_sec_caps,
> > +		const struct rte_security_capability *worker_sec_caps)
> >  {
> > -	struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
> > -	uint32_t nb_caps = 0, i;
> > +	uint32_t nb_worker_sec_caps = 0, i;
> > +
> > +	if (worker_sec_caps == NULL)
> > +		return 0;
> > +
> > +	while (worker_sec_caps[nb_worker_sec_caps].action !=
> > +
> > 	RTE_SECURITY_ACTION_TYPE_NONE)
> > +		nb_worker_sec_caps++;
> > +
> > +	/* Handle first worker */
> > +	if (worker_idx == 0) {
> > +		uint32_t nb_worker_sec_crypto_caps = 0;
> > +		uint32_t nb_worker_supp_sec_caps = 0;
> > +
> > +		for (i = 0; i < nb_worker_sec_caps; i++) {
> > +			/* Check for supported security protocols */
> > +			if
> > (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,
> > +					worker_sec_caps[i].protocol))
> > +				continue;
> >
> > -	if (sched_ctx->capabilities) {
> > -		rte_free(sched_ctx->capabilities);
> > -		sched_ctx->capabilities = NULL;
> > +			sec_caps[nb_worker_supp_sec_caps] =
> > worker_sec_caps[i];
> > +
> > +			while (worker_sec_caps[i].crypto_capabilities[
> > +					nb_worker_sec_crypto_caps].op !=
> > +
> > 	RTE_CRYPTO_OP_TYPE_UNDEFINED)
> > +				nb_worker_sec_crypto_caps++;
> > +
> > +
> > 	rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],
> > +				&worker_sec_caps[i].crypto_capabilities[0],
> 
> [Anoob] Isn't it possible to have 2 different security devices which may differ in
> crypto capabilities? My understanding is, the code assumes that crypto
> capability of both devices would match. It's okay to document it as a known
> limitation if it is too difficult to solve.

[DC] Yes, it is possible, and this is handled.

The block of code here is handling the security capabilities and the associated crypto capabilities for the very first worker.
At this point, the schedulers capabilities become that of the worker exactly.

The next block of code, starting at the next for loop, synchs the security capabilities for the second and subsequent workers with the scheduler's current running capabilities.
For each common security capability between the workers, the associated crypto capabilities are also synched to ultimately leave only the common crypto capabilities for
each security capability - see the call to sync_sec_crypto_caps()

> 
> > +
> > 	sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *
> > +					nb_worker_sec_crypto_caps);
> > +
> > +			nb_worker_supp_sec_caps++;
> > +		}
> > +		return nb_worker_supp_sec_caps;
> >  	}
> >
> 
> <snip>
> 
> > diff --git a/drivers/crypto/scheduler/scheduler_pmd.c
> > b/drivers/crypto/scheduler/scheduler_pmd.c
> > index 4e8bbf0e09..6dad9bc3dd 100644
> > --- a/drivers/crypto/scheduler/scheduler_pmd.c
> > +++ b/drivers/crypto/scheduler/scheduler_pmd.c
> > @@ -8,6 +8,7 @@
> >  #include <rte_hexdump.h>
> >  #include <rte_cryptodev.h>
> >  #include <cryptodev_pmd.h>
> > +#include <rte_security_driver.h>
> >  #include <bus_vdev_driver.h>
> >  #include <rte_malloc.h>
> >  #include <rte_cpuflags.h>
> > @@ -233,6 +234,35 @@ cryptodev_scheduler_create(const char *name,
> >  		return -ENOMEM;
> >  	}
> >
> > +	struct rte_security_ctx *security_instance;
> > +	security_instance = rte_zmalloc_socket(NULL,
> > +					sizeof(struct rte_security_ctx),
> > +					RTE_CACHE_LINE_SIZE,
> > SOCKET_ID_ANY);
> > +	if (security_instance == NULL) {
> > +		CR_SCHED_LOG(ERR, "rte_security_ctx memory alloc
> > failed");
> > +		return -ENOMEM;
> 
> [Anoob] The lines above this adds regular cryptodev capabilities. Don't we
> need to free that as well?

[DC] Yes, good spot. I have added a free_mem() function to free any previously allocated memory from this function in v4 patch
This includes any capability info, security context and init worker names.

> 
> > +	}
> > +
> > +	security_instance->device = (void *)dev;
> > +	security_instance->ops = rte_crypto_scheduler_pmd_sec_ops;
> > +	security_instance->sess_cnt = 0;
> > +	dev->security_ctx = security_instance;
> > +
> > +	/*
> > +	 * Initialize security capabilities structure as an empty structure,
> > +	 * in case device information is requested when no workers are
> > attached
> > +	 */
> > +	sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
> > +					sizeof(struct rte_security_capability),
> > +					0, SOCKET_ID_ANY);
> > +
> > +	if (!sched_ctx->sec_capabilities) {
> > +		rte_free(security_instance);
> > +		CR_SCHED_LOG(ERR, "Not enough memory for security
> > capability "
> > +				"information");
> > +		return -ENOMEM;
> > +	}
> > +
> >  	rte_cryptodev_pmd_probing_finish(dev);
> >
> >  	return 0;
> > @@ -263,6 +293,9 @@ cryptodev_scheduler_remove(struct
> rte_vdev_device
> > *vdev)
> >  					sched_ctx->workers[i].dev_id);
> >  	}
> >
> > +	rte_free(dev->security_ctx);
> > +	dev->security_ctx = NULL;
> > +
> >  	return rte_cryptodev_pmd_destroy(dev);  }
> >
> > diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c
> > b/drivers/crypto/scheduler/scheduler_pmd_ops.c
> > index 294aab4452..34d20ee2de 100644
> > --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
> > +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
> > @@ -8,11 +8,212 @@
> >  #include <dev_driver.h>
> >  #include <rte_cryptodev.h>
> >  #include <cryptodev_pmd.h>
> > +#include <rte_security_driver.h>
> >  #include <rte_reorder.h>
> >  #include <rte_errno.h>
> >
> >  #include "scheduler_pmd_private.h"
> >
> > +struct scheduler_configured_sess_info {
> > +	uint8_t dev_id;
> > +	uint8_t driver_id;
> > +	union {
> > +		struct rte_cryptodev_sym_session *sess;
> > +		struct {
> > +			struct rte_security_session *sec_sess;
> > +			struct rte_security_ctx *sec_ctx;
> > +		};
> > +	};
> > +};
> > +
> > +static int
> > +scheduler_session_create(void *sess, void *sess_params,
> > +		struct scheduler_ctx *sched_ctx,
> > +		enum rte_crypto_op_sess_type session_type) {
> > +	struct rte_mempool *mp = rte_mempool_from_obj(sess);
> > +	struct scheduler_session_ctx *sess_ctx;
> > +	struct scheduler_configured_sess_info configured_sess[
> > +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> > = {{0}};
> > +	uint32_t i, j, n_configured_sess = 0;
> > +	int ret = 0;
> > +
> > +	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> > +		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
> > +				(struct rte_cryptodev_sym_session *)sess);
> > +	else
> > +		sess_ctx = SECURITY_GET_SESS_PRIV(
> > +				(struct rte_security_session *)sess);
> > +
> > +	if (mp == NULL)
> > +		return -EINVAL;
> > +
> > +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > +		struct scheduler_worker *worker = &sched_ctx->workers[i];
> > +		struct rte_cryptodev *dev = &rte_cryptodevs[worker-
> > >dev_id];
> > +		uint8_t next_worker = 0;
> > +
> > +		for (j = 0; j < n_configured_sess; j++) {
> > +			if (configured_sess[j].driver_id == worker-
> > >driver_id) {
> > +				if (session_type ==
> > RTE_CRYPTO_OP_WITH_SESSION)
> > +					sess_ctx->worker_sess[i] =
> > +						configured_sess[j].sess;
> > +				else
> > +					sess_ctx->worker_sec_sess[i] =
> > +						configured_sess[j].sec_sess;
> > +
> > +				next_worker = 1;
> > +				break;
> > +			}
> > +		}
> > +		if (next_worker)
> > +			continue;
> > +
> > +		if (rte_mempool_avail_count(mp) == 0) {
> > +			ret = -ENOMEM;
> > +			goto error_exit;
> > +		}
> > +
> > +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > +			struct rte_cryptodev_sym_session *worker_sess =
> > +				rte_cryptodev_sym_session_create(worker-
> > >dev_id,
> > +						(struct
> > rte_crypto_sym_xform *)
> 
> [Anoob] Is this cast required?

[DC] It wasn't required. I have removed this and all other unnecessary casts in v4

> 
> > +						sess_params, mp);
> > +
> > +			if (worker_sess == NULL) {
> > +				ret = -rte_errno;
> > +				goto error_exit;
> > +			}
> > +
> > +			worker_sess->opaque_data = (uint64_t)sess;
> > +			sess_ctx->worker_sess[i] = worker_sess;
> > +			configured_sess[n_configured_sess].sess =
> > worker_sess;
> > +		} else {
> > +			struct rte_security_session *worker_sess =
> > +				rte_security_session_create(dev-
> > >security_ctx,
> > +					(struct rte_security_session_conf *)
> > +					sess_params, mp);
> > +
> > +			if (worker_sess == NULL) {
> > +				ret = -rte_errno;
> > +				goto error_exit;
> > +			}
> > +
> > +			worker_sess->opaque_data = (uint64_t)sess;
> > +			sess_ctx->worker_sec_sess[i] = worker_sess;
> > +			configured_sess[n_configured_sess].sec_sess =
> > +							worker_sess;
> > +			configured_sess[n_configured_sess].sec_ctx =
> > +							dev->security_ctx;
> > +		}
> > +
> > +		configured_sess[n_configured_sess].driver_id =
> > +							worker->driver_id;
> > +		configured_sess[n_configured_sess].dev_id = worker-
> > >dev_id;
> > +		n_configured_sess++;
> > +	}
> > +
> > +	return 0;
> > +
> > +error_exit:
> > +	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
> > +	for (i = 0; i < n_configured_sess; i++) {
> > +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> > +			rte_cryptodev_sym_session_free(
> > +						configured_sess[i].dev_id,
> > +						configured_sess[i].sess);
> > +		else
> > +			rte_security_session_destroy(
> > +						configured_sess[i].sec_ctx,
> > +						configured_sess[i].sec_sess);
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> > +static void
> > +scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
> > +		uint8_t session_type)
> > +{
> > +	struct scheduler_session_ctx *sess_ctx;
> > +	struct scheduler_configured_sess_info deleted_sess[
> > +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> > = {{0}};
> > +	uint32_t i, j, n_deleted_sess = 0;
> > +
> > +	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
> > +		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
> > +				(struct rte_cryptodev_sym_session *)sess);
> > +	else
> > +		sess_ctx = SECURITY_GET_SESS_PRIV(
> > +				(struct rte_security_session *)sess);
> > +
> > +	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
> > +		CR_SCHED_LOG(WARNING,
> > +			"Worker updated between session
> > creation/deletion. "
> > +			"The session may not be freed fully.");
> > +	}
> > +
> > +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > +		struct scheduler_worker *worker = &sched_ctx->workers[i];
> > +		struct rte_cryptodev *dev = &rte_cryptodevs[worker-
> > >dev_id];
> > +		uint8_t next_worker = 0;
> > +
> > +		for (j = 0; j < n_deleted_sess; j++) {
> > +			if (deleted_sess[j].driver_id == worker->driver_id) {
> > +				if (session_type ==
> > RTE_CRYPTO_OP_WITH_SESSION)
> > +					sess_ctx->worker_sess[i] = NULL;
> > +				else
> > +					sess_ctx->worker_sec_sess[i] =
> > NULL;
> > +
> > +				next_worker = 1;
> > +				break;
> > +			}
> > +		}
> > +		if (next_worker)
> > +			continue;
> > +
> > +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > +			rte_cryptodev_sym_session_free(worker->dev_id,
> > +						sess_ctx->worker_sess[i]);
> > +			sess_ctx->worker_sess[i] = NULL;
> > +		} else {
> > +			rte_security_session_destroy(dev->security_ctx,
> > +						sess_ctx-
> > >worker_sec_sess[i]);
> > +			sess_ctx->worker_sec_sess[i] = NULL;
> > +		}
> > +
> > +		deleted_sess[n_deleted_sess++].driver_id = worker-
> > >driver_id;
> > +	}
> > +}
> > +
> > +static unsigned int
> > +scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
> > +		uint8_t session_type)
> > +{
> > +	uint8_t i = 0;
> > +	uint32_t max_priv_sess_size = 0;
> > +
> > +	/* Check what is the maximum private session size for all workers */
> > +	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > +		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
> > +		struct rte_cryptodev *dev =
> > &rte_cryptodevs[worker_dev_id];
> > +		struct rte_security_ctx *sec_ctx = dev->security_ctx;
> > +		uint32_t priv_sess_size = 0;
> > +
> > +		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > +			priv_sess_size =
> > +				(*dev->dev_ops-
> > >sym_session_get_size)(dev);
> > +		} else {
> > +			priv_sess_size = (*sec_ctx->ops-
> > >session_get_size)(dev);
> > +		}
> > +
> > +		if (max_priv_sess_size < priv_sess_size)
> > +			max_priv_sess_size = priv_sess_size;
> 
> [Anoob] Should we use RTE_MAX?

[DC] Yep RTE_MAX is used in v4

> 
> > +	}
> > +
> > +	return max_priv_sess_size;
> > +}
> > +
> >  /** attaching the workers predefined by scheduler's EAL options */
> > static int  scheduler_attach_init_worker(struct rte_cryptodev *dev) @@
> > -265,10
> > +466,7 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
> >  		sched_ctx->private_ctx = NULL;
> >  	}
> >
> > -	if (sched_ctx->capabilities) {
> > -		rte_free(sched_ctx->capabilities);
> > -		sched_ctx->capabilities = NULL;
> > -	}
> > +	scheduler_free_capabilities(sched_ctx);
> >
> >  	return 0;
> >  }
> > @@ -451,92 +649,22 @@ scheduler_pmd_qp_setup(struct rte_cryptodev
> > *dev, uint16_t qp_id,  }
> >
> >  static uint32_t
> > -scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev
> > __rte_unused)
> > +scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
> >  {
> >  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> > -	uint8_t i = 0;
> > -	uint32_t max_priv_sess_size = 0;
> > -
> > -	/* Check what is the maximum private session size for all workers */
> > -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > -		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
> > -		struct rte_cryptodev *dev =
> > &rte_cryptodevs[worker_dev_id];
> > -		uint32_t priv_sess_size = (*dev->dev_ops-
> > >sym_session_get_size)(dev);
> >
> > -		if (max_priv_sess_size < priv_sess_size)
> > -			max_priv_sess_size = priv_sess_size;
> > -	}
> > -
> > -	return max_priv_sess_size;
> > +	return scheduler_session_size_get(sched_ctx,
> > +RTE_CRYPTO_OP_WITH_SESSION);
> >  }
> >
> > -struct scheduler_configured_sess_info {
> > -	uint8_t dev_id;
> > -	uint8_t driver_id;
> > -	struct rte_cryptodev_sym_session *sess;
> > -};
> > -
> >  static int
> >  scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
> >  	struct rte_crypto_sym_xform *xform,
> >  	struct rte_cryptodev_sym_session *sess)  {
> >  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> > -	struct rte_mempool *mp = rte_mempool_from_obj(sess);
> > -	struct scheduler_session_ctx *sess_ctx =
> > CRYPTODEV_GET_SYM_SESS_PRIV(sess);
> > -	struct scheduler_configured_sess_info configured_sess[
> > -			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> > = {{0}};
> > -	uint32_t i, j, n_configured_sess = 0;
> > -	int ret = 0;
> > -
> > -	if (mp == NULL)
> > -		return -EINVAL;
> >
> > -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > -		struct scheduler_worker *worker = &sched_ctx->workers[i];
> > -		struct rte_cryptodev_sym_session *worker_sess;
> > -		uint8_t next_worker = 0;
> > -
> > -		for (j = 0; j < n_configured_sess; j++) {
> > -			if (configured_sess[j].driver_id ==
> > -					worker->driver_id) {
> > -				sess_ctx->worker_sess[i] =
> > -					configured_sess[j].sess;
> > -				next_worker = 1;
> > -				break;
> > -			}
> > -		}
> > -		if (next_worker)
> > -			continue;
> > -
> > -		if (rte_mempool_avail_count(mp) == 0) {
> > -			ret = -ENOMEM;
> > -			goto error_exit;
> > -		}
> > -
> > -		worker_sess = rte_cryptodev_sym_session_create(worker-
> > >dev_id,
> > -			xform, mp);
> > -		if (worker_sess == NULL) {
> > -			ret = -rte_errno;
> > -			goto error_exit;
> > -		}
> > -
> > -		worker_sess->opaque_data = (uint64_t)sess;
> > -		sess_ctx->worker_sess[i] = worker_sess;
> > -		configured_sess[n_configured_sess].driver_id =
> > -			worker->driver_id;
> > -		configured_sess[n_configured_sess].dev_id = worker-
> > >dev_id;
> > -		configured_sess[n_configured_sess].sess = worker_sess;
> > -		n_configured_sess++;
> > -	}
> > -
> > -	return 0;
> > -error_exit:
> > -	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
> > -	for (i = 0; i < n_configured_sess; i++)
> > -
> > 	rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
> > -			configured_sess[i].sess);
> > -	return ret;
> > +	return scheduler_session_create((void *)sess, (void *)xform,
> > sched_ctx,
> > +				RTE_CRYPTO_OP_WITH_SESSION);
> >  }
> >
> >  /** Clear the memory of session so it doesn't leave key material
> > behind */ @@ -545,37 +673,9 @@
> scheduler_pmd_sym_session_clear(struct
> > rte_cryptodev *dev,
> >  		struct rte_cryptodev_sym_session *sess)  {
> >  	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
> > -	struct scheduler_session_ctx *sess_ctx =
> > CRYPTODEV_GET_SYM_SESS_PRIV(sess);
> > -	struct scheduler_configured_sess_info deleted_sess[
> > -			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
> > = {{0}};
> > -	uint32_t i, j, n_deleted_sess = 0;
> > -
> > -	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
> > -		CR_SCHED_LOG(WARNING,
> > -			"Worker updated between session
> > creation/deletion. "
> > -			"The session may not be freed fully.");
> > -	}
> > -
> > -	for (i = 0; i < sched_ctx->nb_workers; i++) {
> > -		struct scheduler_worker *worker = &sched_ctx->workers[i];
> > -		uint8_t next_worker = 0;
> >
> > -		for (j = 0; j < n_deleted_sess; j++) {
> > -			if (deleted_sess[j].driver_id == worker->driver_id) {
> > -				sess_ctx->worker_sess[i] = NULL;
> > -				next_worker = 1;
> > -				break;
> > -			}
> > -		}
> > -		if (next_worker)
> > -			continue;
> > -
> > -		rte_cryptodev_sym_session_free(worker->dev_id,
> > -			sess_ctx->worker_sess[i]);
> > -
> > -		deleted_sess[n_deleted_sess++].driver_id = worker-
> > >driver_id;
> > -		sess_ctx->worker_sess[i] = NULL;
> > -	}
> > +	scheduler_session_destroy((void *)sess, sched_ctx,
> > +				RTE_CRYPTO_OP_WITH_SESSION);
> >  }
> >
> >  static struct rte_cryptodev_ops scheduler_pmd_ops = { @@ -598,3
> > +698,68 @@ static struct rte_cryptodev_ops scheduler_pmd_ops = {  };
> >
> >  struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops =
> > &scheduler_pmd_ops;
> > +
> > +/** Configure a scheduler session from a security session
> > +configuration */ static int scheduler_pmd_sec_sess_create(void *dev,
> > +struct rte_security_session_conf *conf,
> > +			struct rte_security_session *sess) {
> > +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
> 
> [Anoob] Is this cast required?

[DC] It wasn't required. I have removed this and all other unnecessary casts in v4

> 
> > +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> > +
> > +	/* Check for supported security protocols */
> > +	if (!scheduler_check_sec_proto_supp(conf->action_type, conf-
> > >protocol)) {
> > +		CR_SCHED_LOG(ERR, "Unsupported security protocol");
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	return scheduler_session_create((void *)sess, (void *)conf,
> > sched_ctx,
> > +				RTE_CRYPTO_OP_SECURITY_SESSION);
> > +}
> > +
> > +/** Clear the memory of session so it doesn't leave key material
> > +behind */ static int scheduler_pmd_sec_sess_destroy(void *dev,
> > +			       struct rte_security_session *sess) {
> > +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
> 
> [Anoob] Is this cast required?

[DC] It wasn't required. I have removed this and all other unnecessary casts in v4

> 
> > +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> > +
> > +	scheduler_session_destroy((void *)sess, sched_ctx,
> > +				RTE_CRYPTO_OP_SECURITY_SESSION);
> > +
> > +	return 0;
> > +}
> > +
> > +/** Get sync security capabilities for scheduler pmds */ static const
> > +struct rte_security_capability * scheduler_pmd_sec_capa_get(void
> > +*dev) {
> > +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
> 
> [Anoob] Is this cast required?

[DC] It wasn't required. I have removed this and all other unnecessary casts in v4

> 
> > +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> > +
> > +	return sched_ctx->sec_capabilities;
> > +}
> > +
> > +static unsigned int
> > +scheduler_pmd_sec_sess_size_get(void *dev) {
> > +	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
> 
> [Anoob] Is this cast required?

[DC] It wasn't required. I have removed this and all other unnecessary casts in v4

> 
> > +	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
> > +
> > +	return scheduler_session_size_get(sched_ctx,
> > +				RTE_CRYPTO_OP_SECURITY_SESSION);
> > +}
> > +
> > +static struct rte_security_ops scheduler_pmd_sec_ops = {
> > +		.session_create = scheduler_pmd_sec_sess_create,
> > +		.session_update = NULL,
> > +		.session_get_size = scheduler_pmd_sec_sess_size_get,
> > +		.session_stats_get = NULL,
> > +		.session_destroy = scheduler_pmd_sec_sess_destroy,
> > +		.set_pkt_metadata = NULL,
> > +		.capabilities_get = scheduler_pmd_sec_capa_get };
> > +
> > +struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
> > +
> > 	&scheduler_pmd_sec_ops;
> > diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h
> > b/drivers/crypto/scheduler/scheduler_pmd_private.h
> > index 36d0bb6307..ff1e7a83e8 100644
> > --- a/drivers/crypto/scheduler/scheduler_pmd_private.h
> > +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
> > @@ -5,6 +5,8 @@
> >  #ifndef _SCHEDULER_PMD_PRIVATE_H
> >  #define _SCHEDULER_PMD_PRIVATE_H
> >
> > +#include <rte_security_driver.h>
> > +
> >  #include "rte_cryptodev_scheduler.h"
> >
> >  #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
> > @@ -30,7 +32,8 @@ struct scheduler_ctx {
> >  	/**< private scheduler context pointer */
> >
> >  	struct rte_cryptodev_capabilities *capabilities;
> > -	uint32_t nb_capabilities;
> > +	struct rte_security_capability *sec_capabilities;
> > +	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
> >
> >  	uint32_t max_nb_queue_pairs;
> >
> > @@ -64,8 +67,12 @@ struct scheduler_qp_ctx {
> >
> >  struct scheduler_session_ctx {
> >  	uint32_t ref_cnt;
> > -	struct rte_cryptodev_sym_session *worker_sess[
> > -		RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> > +	union {
> > +		struct rte_cryptodev_sym_session *worker_sess[
> > +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> > +		struct rte_security_session *worker_sec_sess[
> > +			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
> > +	};
> >  };
> >
> >  extern uint8_t cryptodev_scheduler_driver_id; @@ -108,7 +115,22 @@
> > scheduler_order_drain(struct rte_ring *order_ring,  }
> >
> >  static __rte_always_inline void
> > -scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t
> > nb_ops,
> > +scheduler_set_single_worker_session(struct rte_crypto_op *op,
> > +		uint8_t worker_idx)
> > +{
> > +	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > +		struct scheduler_session_ctx *sess_ctx =
> > +				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym-
> > >session);
> > +		op->sym->session = sess_ctx->worker_sess[worker_idx];
> > +	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> > +		struct scheduler_session_ctx *sess_ctx =
> > +				SECURITY_GET_SESS_PRIV(op->sym-
> > >session);
> > +		op->sym->session = sess_ctx-
> > >worker_sec_sess[worker_idx];
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t
> > +nb_ops,
> >  		uint8_t worker_index)
> >  {
> >  	struct rte_crypto_op **op = ops;
> > @@ -129,52 +151,34 @@ scheduler_set_worker_session(struct
> > rte_crypto_op **ops, uint16_t nb_ops,
> >  			rte_prefetch0(op[7]->sym->session);
> >  		}
> >
> > -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > -			struct scheduler_session_ctx *sess_ctx =
> > -				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]-
> > >sym->session);
> > -			op[0]->sym->session =
> > -				sess_ctx->worker_sess[worker_index];
> > -		}
> > -
> > -		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > -			struct scheduler_session_ctx *sess_ctx =
> > -				CRYPTODEV_GET_SYM_SESS_PRIV(op[1]-
> > >sym->session);
> > -			op[1]->sym->session =
> > -				sess_ctx->worker_sess[worker_index];
> > -		}
> > -
> > -		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > -			struct scheduler_session_ctx *sess_ctx =
> > -				CRYPTODEV_GET_SYM_SESS_PRIV(op[2]-
> > >sym->session);
> > -			op[2]->sym->session =
> > -				sess_ctx->worker_sess[worker_index];
> > -		}
> > -
> > -		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > -			struct scheduler_session_ctx *sess_ctx =
> > -				CRYPTODEV_GET_SYM_SESS_PRIV(op[3]-
> > >sym->session);
> > -			op[3]->sym->session =
> > -				sess_ctx->worker_sess[worker_index];
> > -		}
> > +		scheduler_set_single_worker_session(op[0],
> > worker_index);
> > +		scheduler_set_single_worker_session(op[1],
> > worker_index);
> > +		scheduler_set_single_worker_session(op[2],
> > worker_index);
> > +		scheduler_set_single_worker_session(op[3],
> > worker_index);
> >
> >  		op += 4;
> >  		n -= 4;
> >  	}
> >
> >  	while (n--) {
> > -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > -			struct scheduler_session_ctx *sess_ctx =
> > -				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]-
> > >sym->session);
> > -
> > -			op[0]->sym->session =
> > -				sess_ctx->worker_sess[worker_index];
> > -			op++;
> > -		}
> > +		scheduler_set_single_worker_session(op[0],
> > worker_index);
> > +		op++;
> >  	}
> >  }
> >
> >  static __rte_always_inline void
> > -scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t
> > nb_ops)
> > +scheduler_retrieve_single_session(struct rte_crypto_op *op) {
> > +	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > +		op->sym->session = (void *)(uintptr_t)
> > +			rte_cryptodev_sym_session_opaque_data_get(op-
> > >sym->session);
> > +	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
> > +		op->sym->session = (void *)(uintptr_t)
> > +			rte_security_session_opaque_data_get(op->sym-
> > >session);
> > +}
> > +
> > +static __rte_always_inline void
> > +scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t
> > +nb_ops)
> >  {
> >  	uint16_t n = nb_ops;
> >  	struct rte_crypto_op **op = ops;
> > @@ -194,32 +198,77 @@ scheduler_retrieve_session(struct rte_crypto_op
> > **ops, uint16_t nb_ops)
> >  			rte_prefetch0(op[7]->sym->session);
> >  		}
> >
> > -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > -			op[0]->sym->session = (void *)(uintptr_t)
> > -
> > 	rte_cryptodev_sym_session_opaque_data_get(op[0]->sym-
> > >session);
> > -		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > -			op[1]->sym->session = (void *)(uintptr_t)
> > -
> > 	rte_cryptodev_sym_session_opaque_data_get(op[1]->sym-
> > >session);
> > -		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > -			op[2]->sym->session = (void *)(uintptr_t)
> > -
> > 	rte_cryptodev_sym_session_opaque_data_get(op[2]->sym-
> > >session);
> > -		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > -			op[3]->sym->session = (void *)(uintptr_t)
> > -
> > 	rte_cryptodev_sym_session_opaque_data_get(op[3]->sym-
> > >session);
> > +		scheduler_retrieve_single_session(op[0]);
> > +		scheduler_retrieve_single_session(op[1]);
> > +		scheduler_retrieve_single_session(op[2]);
> > +		scheduler_retrieve_single_session(op[3]);
> >
> >  		op += 4;
> >  		n -= 4;
> >  	}
> >
> >  	while (n--) {
> > -		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
> > -			op[0]->sym->session = (void *)(uintptr_t)
> > -
> > 	rte_cryptodev_sym_session_opaque_data_get(op[0]->sym-
> > >session);
> > +		scheduler_retrieve_single_session(op[0]);
> >  		op++;
> >  	}
> >  }
> >
> > +static __rte_always_inline uint32_t
> > +scheduler_get_job_len(struct rte_crypto_op *op) {
> > +	uint32_t job_len;
> > +
> > +	/* op_len is initialized as cipher data length, if
> > +	 * it is 0, then it is set to auth data length
> > +	 */
> > +	job_len = op->sym->cipher.data.length;
> > +	job_len += (op->sym->cipher.data.length == 0) *
> > +					op->sym->auth.data.length;
> > +
> > +	return job_len;
> > +}
> > +
> > +static __rte_always_inline void
> > +scheduler_free_capabilities(struct scheduler_ctx *sched_ctx) {
> > +	uint32_t i;
> > +
> > +	if (sched_ctx->capabilities) {
> > +		rte_free(sched_ctx->capabilities);
> > +		sched_ctx->capabilities = NULL;
> > +	}
> > +
> > +	if (sched_ctx->sec_crypto_capabilities) {
> > +		i = 0;
> > +		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
> > +			rte_free(sched_ctx->sec_crypto_capabilities[i]);
> > +			sched_ctx->sec_crypto_capabilities[i] = NULL;
> > +			i++;
> > +		}
> > +
> > +		rte_free(sched_ctx->sec_crypto_capabilities);
> > +		sched_ctx->sec_crypto_capabilities = NULL;
> > +	}
> > +
> > +	if (sched_ctx->sec_capabilities) {
> > +		rte_free(sched_ctx->sec_capabilities);
> > +		sched_ctx->sec_capabilities = NULL;
> > +	}
> > +}
> > +
> > +static __rte_always_inline int
> > +scheduler_check_sec_proto_supp(enum
> > rte_security_session_action_type action,
> > +		enum rte_security_session_protocol protocol) {
> > +	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
> > &&
> > +			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
> > +		return 1;
> > +
> > +	return 0;
> > +}
> > +
> >  /** device specific operations function pointer structure */  extern
> > struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
> > +extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
> >
> >  #endif /* _SCHEDULER_PMD_PRIVATE_H */ diff --git
> > a/drivers/crypto/scheduler/scheduler_roundrobin.c
> > b/drivers/crypto/scheduler/scheduler_roundrobin.c
> > index ad3f8b842a..08041887a8 100644
> > --- a/drivers/crypto/scheduler/scheduler_roundrobin.c
> > +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
> > @@ -28,11 +28,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op
> > **ops, uint16_t nb_ops)
> >  	if (unlikely(nb_ops == 0))
> >  		return 0;
> >
> > -	scheduler_set_worker_session(ops, nb_ops, worker_idx);
> > +	scheduler_set_worker_sessions(ops, nb_ops, worker_idx);
> >  	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
> >  			worker->qp_id, ops, nb_ops);
> >  	if (processed_ops < nb_ops)
> > -		scheduler_retrieve_session(ops + processed_ops,
> > +		scheduler_retrieve_sessions(ops + processed_ops,
> >  			nb_ops - processed_ops);
> >
> >  	worker->nb_inflight_cops += processed_ops; @@ -87,7 +87,7 @@
> > schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t
> > nb_ops)
> >
> >  	nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
> >  			worker->qp_id, ops, nb_ops);
> > -	scheduler_retrieve_session(ops, nb_deq_ops);
> > +	scheduler_retrieve_sessions(ops, nb_deq_ops);
> >  	last_worker_idx += 1;
> >  	last_worker_idx %= rr_qp_ctx->nb_workers;
> >
> > --
> > 2.25.1
  

Patch

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..a3d0dadbdf 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,10 @@  New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Cryptodev Scheduler PMD.**
+
+  Added support for DOCSIS security protocol through the ``rte_security`` API
+  callbacks.
 
 Removed Items
 -------------
diff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build
index cd18efc791..752d655415 100644
--- a/drivers/crypto/scheduler/meson.build
+++ b/drivers/crypto/scheduler/meson.build
@@ -7,7 +7,7 @@  if is_windows
     subdir_done()
 endif
 
-deps += ['bus_vdev', 'reorder']
+deps += ['bus_vdev', 'reorder', 'security']
 sources = files(
         'rte_cryptodev_scheduler.c',
         'scheduler_failover.c',
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 258d6f8c43..e8b905af2f 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -5,11 +5,14 @@ 
 #include <rte_reorder.h>
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
+#include <rte_security_driver.h>
 #include <rte_malloc.h>
 
 #include "rte_cryptodev_scheduler.h"
 #include "scheduler_pmd_private.h"
 
+#define MAX_CAPS 256
+
 /** update the scheduler pmd's capability with attaching device's
  *  capability.
  *  For each device to be attached, the scheduler's capability should be
@@ -59,7 +62,6 @@  sync_caps(struct rte_cryptodev_capabilities *caps,
 					cap->sym.auth.digest_size.max ?
 					s_cap->sym.auth.digest_size.max :
 					cap->sym.auth.digest_size.max;
-
 			}
 
 			if (s_cap->sym.xform_type ==
@@ -81,25 +83,176 @@  sync_caps(struct rte_cryptodev_capabilities *caps,
 
 		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
 		sync_nb_caps--;
+		i--;
 	}
 
 	return sync_nb_caps;
 }
 
 static int
-update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
+		struct rte_security_capability *sec_cap2)
+{
+	if (sec_cap1->action != sec_cap2->action ||
+			sec_cap1->protocol != sec_cap2->protocol ||
+			sec_cap1->ol_flags != sec_cap2->ol_flags)
+		return 0;
+
+	if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+		return !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,
+				sizeof(sec_cap1->docsis));
+	else
+		return 0;
+}
+
+static void
+copy_sec_cap(struct rte_security_capability *dst_sec_cap,
+		struct rte_security_capability *src_sec_cap)
+{
+	dst_sec_cap->action = src_sec_cap->action;
+	dst_sec_cap->protocol = src_sec_cap->protocol;
+	if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+		dst_sec_cap->docsis = src_sec_cap->docsis;
+	dst_sec_cap->ol_flags = src_sec_cap->ol_flags;
+}
+
+static uint32_t
+sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps,
+		const struct rte_cryptodev_capabilities *sec_crypto_caps,
+		const struct rte_cryptodev_capabilities *worker_sec_crypto_caps)
+{
+	uint8_t nb_caps = 0;
+
+	nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps);
+	sync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps);
+
+	return nb_caps;
+}
+
+/** update the scheduler pmd's security capability with attaching device's
+ *  security capability.
+ *  For each device to be attached, the scheduler's security capability should
+ *  be the common capability set of all workers
+ **/
+static uint32_t
+sync_sec_caps(uint32_t worker_idx,
+		struct rte_security_capability *sec_caps,
+		struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],
+		uint32_t nb_sec_caps,
+		const struct rte_security_capability *worker_sec_caps)
 {
-	struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
-	uint32_t nb_caps = 0, i;
+	uint32_t nb_worker_sec_caps = 0, i;
+
+	if (worker_sec_caps == NULL)
+		return 0;
+
+	while (worker_sec_caps[nb_worker_sec_caps].action !=
+					RTE_SECURITY_ACTION_TYPE_NONE)
+		nb_worker_sec_caps++;
+
+	/* Handle first worker */
+	if (worker_idx == 0) {
+		uint32_t nb_worker_sec_crypto_caps = 0;
+		uint32_t nb_worker_supp_sec_caps = 0;
+
+		for (i = 0; i < nb_worker_sec_caps; i++) {
+			/* Check for supported security protocols */
+			if (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,
+					worker_sec_caps[i].protocol))
+				continue;
 
-	if (sched_ctx->capabilities) {
-		rte_free(sched_ctx->capabilities);
-		sched_ctx->capabilities = NULL;
+			sec_caps[nb_worker_supp_sec_caps] = worker_sec_caps[i];
+
+			while (worker_sec_caps[i].crypto_capabilities[
+					nb_worker_sec_crypto_caps].op !=
+						RTE_CRYPTO_OP_TYPE_UNDEFINED)
+				nb_worker_sec_crypto_caps++;
+
+			rte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],
+				&worker_sec_caps[i].crypto_capabilities[0],
+				sizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *
+					nb_worker_sec_crypto_caps);
+
+			nb_worker_supp_sec_caps++;
+		}
+		return nb_worker_supp_sec_caps;
 	}
 
-	for (i = 0; i < sched_ctx->nb_workers; i++) {
-		struct rte_cryptodev_info dev_info;
+	for (i = 0; i < nb_sec_caps; i++) {
+		struct rte_security_capability *sec_cap = &sec_caps[i];
+		uint32_t j;
+
+		for (j = 0; j < nb_worker_sec_caps; j++) {
+			struct rte_cryptodev_capabilities
+					tmp_sec_crypto_caps[MAX_CAPS] = { {0} };
+			uint32_t nb_sec_crypto_caps = 0;
+			const struct rte_security_capability *worker_sec_cap =
+								&worker_sec_caps[j];
+
+			if (!check_sec_cap_equal(worker_sec_cap, sec_cap))
+				continue;
+
+			/* Sync the crypto caps of the common security cap */
+			nb_sec_crypto_caps = sync_sec_crypto_caps(
+						tmp_sec_crypto_caps,
+						&sec_crypto_caps[i][0],
+						&worker_sec_cap->crypto_capabilities[0]);
+
+			memset(&sec_crypto_caps[i][0], 0,
+					sizeof(*&sec_crypto_caps[i][0]) *
+						MAX_CAPS);
+
+			rte_memcpy(&sec_crypto_caps[i][0],
+					&tmp_sec_crypto_caps[0],
+					sizeof(*&sec_crypto_caps[i][0]) *
+						nb_sec_crypto_caps);
+
+			/* No common cap found */
+			break;
+		}
+
+		if (j < nb_worker_sec_caps)
+			continue;
+
+		/*
+		 * Remove an uncommon security cap, and it's associated crypto
+		 * caps, from the arrays
+		 */
+		for (j = i; j < nb_sec_caps - 1; j++) {
+			rte_memcpy(&sec_caps[j], &sec_caps[j+1],
+					sizeof(*sec_cap));
+
+			rte_memcpy(&sec_crypto_caps[j][0],
+					&sec_crypto_caps[j+1][0],
+					sizeof(*&sec_crypto_caps[j][0]) *
+						MAX_CAPS);
+		}
+		memset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap));
+		memset(&sec_crypto_caps[nb_sec_caps - 1][0], 0,
+			sizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) *
+				MAX_CAPS);
+		nb_sec_caps--;
+		i--;
+	}
+
+	return nb_sec_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+	struct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} };
+	struct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} };
+	struct rte_cryptodev_capabilities
+		tmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} };
+	uint32_t nb_caps = 0, nb_sec_caps = 0, i;
+	struct rte_cryptodev_info dev_info;
+
+	/* Free any previously allocated capability memory */
+	scheduler_free_capabilities(sched_ctx);
 
+	/* Determine the new cryptodev capabilities for the scheduler */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
 		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
 
 		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
@@ -116,6 +269,54 @@  update_scheduler_capability(struct scheduler_ctx *sched_ctx)
 	rte_memcpy(sched_ctx->capabilities, tmp_caps,
 			sizeof(struct rte_cryptodev_capabilities) * nb_caps);
 
+	/* Determine the new security capabilities for the scheduler */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		struct rte_cryptodev *dev =
+				&rte_cryptodevs[sched_ctx->workers[i].dev_id];
+		struct rte_security_ctx *sec_ctx = dev->security_ctx;
+
+		nb_sec_caps = sync_sec_caps(i, tmp_sec_caps, tmp_sec_crypto_caps,
+			nb_sec_caps, rte_security_capabilities_get(sec_ctx));
+	}
+
+	sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
+					sizeof(struct rte_security_capability) *
+					(nb_sec_caps + 1), 0, SOCKET_ID_ANY);
+	if (!sched_ctx->sec_capabilities)
+		return -ENOMEM;
+
+	sched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL,
+				sizeof(struct rte_cryptodev_capabilities *) *
+				(nb_sec_caps + 1),
+				0, SOCKET_ID_ANY);
+	if (!sched_ctx->sec_crypto_capabilities)
+		return -ENOMEM;
+
+	for (i = 0; i < nb_sec_caps; i++) {
+		uint16_t nb_sec_crypto_caps = 0;
+
+		copy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]);
+
+		while (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op !=
+						RTE_CRYPTO_OP_TYPE_UNDEFINED)
+			nb_sec_crypto_caps++;
+
+		sched_ctx->sec_crypto_capabilities[i] =
+			rte_zmalloc_socket(NULL,
+				sizeof(struct rte_cryptodev_capabilities) *
+				(nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY);
+		if (!sched_ctx->sec_crypto_capabilities[i])
+			return -ENOMEM;
+
+		rte_memcpy(sched_ctx->sec_crypto_capabilities[i],
+				&tmp_sec_crypto_caps[i][0],
+				sizeof(struct rte_cryptodev_capabilities)
+					* nb_sec_crypto_caps);
+
+		sched_ctx->sec_capabilities[i].crypto_capabilities =
+				sched_ctx->sec_crypto_capabilities[i];
+	}
+
 	return 0;
 }
 
@@ -205,6 +406,7 @@  rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
 	sched_ctx->nb_workers++;
 
 	if (update_scheduler_capability(sched_ctx) < 0) {
+		scheduler_free_capabilities(sched_ctx);
 		worker->dev_id = 0;
 		worker->driver_id = 0;
 		sched_ctx->nb_workers--;
@@ -266,6 +468,7 @@  rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
 	sched_ctx->nb_workers--;
 
 	if (update_scheduler_capability(sched_ctx) < 0) {
+		scheduler_free_capabilities(sched_ctx);
 		CR_SCHED_LOG(ERR, "capabilities update failed");
 		return -ENOTSUP;
 	}
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index f24d2fc44b..52ff2ffbb7 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -28,7 +28,7 @@  failover_worker_enqueue(struct scheduler_worker *worker,
 {
 	uint16_t processed_ops;
 
-	scheduler_set_worker_session(ops, nb_ops, index);
+	scheduler_set_worker_sessions(ops, nb_ops, index);
 
 	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
 			worker->qp_id, ops, nb_ops);
@@ -51,7 +51,7 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			ops, nb_ops, PRIMARY_WORKER_IDX);
 
 	if (enqueued_ops < nb_ops) {
-		scheduler_retrieve_session(&ops[enqueued_ops],
+		scheduler_retrieve_sessions(&ops[enqueued_ops],
 						nb_ops - enqueued_ops);
 		enqueued_ops += failover_worker_enqueue(
 				&qp_ctx->secondary_worker,
@@ -59,7 +59,7 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 				nb_ops - enqueued_ops,
 				SECONDARY_WORKER_IDX);
 		if (enqueued_ops < nb_ops)
-			scheduler_retrieve_session(&ops[enqueued_ops],
+			scheduler_retrieve_sessions(&ops[enqueued_ops],
 						nb_ops - enqueued_ops);
 	}
 
@@ -102,7 +102,7 @@  schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;
 
 	if (nb_deq_ops == nb_ops)
-		goto retrieve_session;
+		goto retrieve_sessions;
 
 	worker = workers[qp_ctx->deq_idx];
 
@@ -112,8 +112,8 @@  schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		worker->nb_inflight_cops -= nb_deq_ops2;
 	}
 
-retrieve_session:
-	scheduler_retrieve_session(ops, nb_deq_ops + nb_deq_ops2);
+retrieve_sessions:
+	scheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2);
 
 	return nb_deq_ops + nb_deq_ops2;
 }
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index 3dea850661..a21b522f9f 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -183,7 +183,7 @@  mc_scheduler_worker(struct rte_cryptodev *dev)
 
 	while (!mc_ctx->stop_signal) {
 		if (pending_enq_ops) {
-			scheduler_set_worker_session(
+			scheduler_set_worker_sessions(
 				&enq_ops[pending_enq_ops_idx], pending_enq_ops,
 				worker_idx);
 			processed_ops =
@@ -192,7 +192,7 @@  mc_scheduler_worker(struct rte_cryptodev *dev)
 					&enq_ops[pending_enq_ops_idx],
 					pending_enq_ops);
 			if (processed_ops < pending_deq_ops)
-				scheduler_retrieve_session(
+				scheduler_retrieve_sessions(
 					&enq_ops[pending_enq_ops_idx +
 						processed_ops],
 					pending_deq_ops - processed_ops);
@@ -203,13 +203,13 @@  mc_scheduler_worker(struct rte_cryptodev *dev)
 			processed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,
 							MC_SCHED_BUFFER_SIZE, NULL);
 			if (processed_ops) {
-				scheduler_set_worker_session(enq_ops,
+				scheduler_set_worker_sessions(enq_ops,
 					processed_ops, worker_idx);
 				pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
 						worker->dev_id, worker->qp_id,
 						enq_ops, processed_ops);
 				if (pending_enq_ops_idx < processed_ops)
-					scheduler_retrieve_session(
+					scheduler_retrieve_sessions(
 						enq_ops + pending_enq_ops_idx,
 						processed_ops -
 						pending_enq_ops_idx);
@@ -229,7 +229,7 @@  mc_scheduler_worker(struct rte_cryptodev *dev)
 					worker->dev_id, worker->qp_id, deq_ops,
 					MC_SCHED_BUFFER_SIZE);
 			if (processed_ops) {
-				scheduler_retrieve_session(deq_ops,
+				scheduler_retrieve_sessions(deq_ops,
 					processed_ops);
 				inflight_ops -= processed_ops;
 				if (reordering_enabled) {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 0c51fff930..30bb5ce0e2 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -59,7 +59,6 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	}
 
 	for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
-		struct scheduler_session_ctx *sess_ctx[4];
 		uint8_t target[4];
 		uint32_t job_len[4];
 
@@ -76,17 +75,7 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		rte_prefetch0((uint8_t *)ops[i + 7]->sym->session +
 			sizeof(struct rte_cryptodev_sym_session));
 
-		sess_ctx[0] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);
-		sess_ctx[1] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 1]->sym->session);
-		sess_ctx[2] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 2]->sym->session);
-		sess_ctx[3] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 3]->sym->session);
-
-		/* job_len is initialized as cipher data length, once
-		 * it is 0, equals to auth data length
-		 */
-		job_len[0] = ops[i]->sym->cipher.data.length;
-		job_len[0] += (ops[i]->sym->cipher.data.length == 0) *
-				ops[i]->sym->auth.data.length;
+		job_len[0] = scheduler_get_job_len(ops[i]);
 		/* decide the target op based on the job length */
 		target[0] = !(job_len[0] & psd_qp_ctx->threshold);
 		p_enq_op = &enq_ops[target[0]];
@@ -100,15 +89,11 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			break;
 		}
 
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			ops[i]->sym->session =
-				sess_ctx[0]->worker_sess[target[0]];
+		scheduler_set_single_worker_session(ops[i], target[0]);
 		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
 		p_enq_op->pos++;
 
-		job_len[1] = ops[i + 1]->sym->cipher.data.length;
-		job_len[1] += (ops[i + 1]->sym->cipher.data.length == 0) *
-				ops[i+1]->sym->auth.data.length;
+		job_len[1] = scheduler_get_job_len(ops[i + 1]);
 		target[1] = !(job_len[1] & psd_qp_ctx->threshold);
 		p_enq_op = &enq_ops[target[1]];
 
@@ -118,15 +103,11 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			break;
 		}
 
-		if (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			ops[i + 1]->sym->session =
-				sess_ctx[1]->worker_sess[target[1]];
+		scheduler_set_single_worker_session(ops[i + 1], target[1]);
 		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];
 		p_enq_op->pos++;
 
-		job_len[2] = ops[i + 2]->sym->cipher.data.length;
-		job_len[2] += (ops[i + 2]->sym->cipher.data.length == 0) *
-				ops[i + 2]->sym->auth.data.length;
+		job_len[2] = scheduler_get_job_len(ops[i + 2]);
 		target[2] = !(job_len[2] & psd_qp_ctx->threshold);
 		p_enq_op = &enq_ops[target[2]];
 
@@ -136,15 +117,11 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			break;
 		}
 
-		if (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			ops[i + 2]->sym->session =
-				sess_ctx[2]->worker_sess[target[2]];
+		scheduler_set_single_worker_session(ops[i + 2], target[2]);
 		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];
 		p_enq_op->pos++;
 
-		job_len[3] = ops[i + 3]->sym->cipher.data.length;
-		job_len[3] += (ops[i + 3]->sym->cipher.data.length == 0) *
-				ops[i + 3]->sym->auth.data.length;
+		job_len[3] = scheduler_get_job_len(ops[i + 3]);
 		target[3] = !(job_len[3] & psd_qp_ctx->threshold);
 		p_enq_op = &enq_ops[target[3]];
 
@@ -154,22 +131,16 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			break;
 		}
 
-		if (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			ops[i + 3]->sym->session =
-				sess_ctx[3]->worker_sess[target[3]];
+		scheduler_set_single_worker_session(ops[i + 3], target[3]);
 		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];
 		p_enq_op->pos++;
 	}
 
 	for (; i < nb_ops; i++) {
-		struct scheduler_session_ctx *sess_ctx =
-			CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);
 		uint32_t job_len;
 		uint8_t target;
 
-		job_len = ops[i]->sym->cipher.data.length;
-		job_len += (ops[i]->sym->cipher.data.length == 0) *
-				ops[i]->sym->auth.data.length;
+		job_len = scheduler_get_job_len(ops[i]);
 		target = !(job_len & psd_qp_ctx->threshold);
 		p_enq_op = &enq_ops[target];
 
@@ -179,8 +150,7 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 			break;
 		}
 
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			ops[i]->sym->session = sess_ctx->worker_sess[target];
+		scheduler_set_single_worker_session(ops[i], target);
 		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
 		p_enq_op->pos++;
 	}
@@ -236,7 +206,7 @@  schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	if (worker->nb_inflight_cops) {
 		nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,
 			worker->qp_id, ops, nb_ops);
-		scheduler_retrieve_session(ops, nb_deq_ops_pri);
+		scheduler_retrieve_sessions(ops, nb_deq_ops_pri);
 		worker->nb_inflight_cops -= nb_deq_ops_pri;
 	}
 
@@ -251,7 +221,7 @@  schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,
 				worker->qp_id, &ops[nb_deq_ops_pri],
 				nb_ops - nb_deq_ops_pri);
-		scheduler_retrieve_session(&ops[nb_deq_ops_pri], nb_deq_ops_sec);
+		scheduler_retrieve_sessions(&ops[nb_deq_ops_pri], nb_deq_ops_sec);
 		worker->nb_inflight_cops -= nb_deq_ops_sec;
 
 		if (!worker->nb_inflight_cops)
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 4e8bbf0e09..6dad9bc3dd 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -8,6 +8,7 @@ 
 #include <rte_hexdump.h>
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
+#include <rte_security_driver.h>
 #include <bus_vdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_cpuflags.h>
@@ -233,6 +234,35 @@  cryptodev_scheduler_create(const char *name,
 		return -ENOMEM;
 	}
 
+	struct rte_security_ctx *security_instance;
+	security_instance = rte_zmalloc_socket(NULL,
+					sizeof(struct rte_security_ctx),
+					RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (security_instance == NULL) {
+		CR_SCHED_LOG(ERR, "rte_security_ctx memory alloc failed");
+		return -ENOMEM;
+	}
+
+	security_instance->device = (void *)dev;
+	security_instance->ops = rte_crypto_scheduler_pmd_sec_ops;
+	security_instance->sess_cnt = 0;
+	dev->security_ctx = security_instance;
+
+	/*
+	 * Initialize security capabilities structure as an empty structure,
+	 * in case device information is requested when no workers are attached
+	 */
+	sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
+					sizeof(struct rte_security_capability),
+					0, SOCKET_ID_ANY);
+
+	if (!sched_ctx->sec_capabilities) {
+		rte_free(security_instance);
+		CR_SCHED_LOG(ERR, "Not enough memory for security capability "
+				"information");
+		return -ENOMEM;
+	}
+
 	rte_cryptodev_pmd_probing_finish(dev);
 
 	return 0;
@@ -263,6 +293,9 @@  cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
 					sched_ctx->workers[i].dev_id);
 	}
 
+	rte_free(dev->security_ctx);
+	dev->security_ctx = NULL;
+
 	return rte_cryptodev_pmd_destroy(dev);
 }
 
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 294aab4452..34d20ee2de 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -8,11 +8,212 @@ 
 #include <dev_driver.h>
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
+#include <rte_security_driver.h>
 #include <rte_reorder.h>
 #include <rte_errno.h>
 
 #include "scheduler_pmd_private.h"
 
+struct scheduler_configured_sess_info {
+	uint8_t dev_id;
+	uint8_t driver_id;
+	union {
+		struct rte_cryptodev_sym_session *sess;
+		struct {
+			struct rte_security_session *sec_sess;
+			struct rte_security_ctx *sec_ctx;
+		};
+	};
+};
+
+static int
+scheduler_session_create(void *sess, void *sess_params,
+		struct scheduler_ctx *sched_ctx,
+		enum rte_crypto_op_sess_type session_type)
+{
+	struct rte_mempool *mp = rte_mempool_from_obj(sess);
+	struct scheduler_session_ctx *sess_ctx;
+	struct scheduler_configured_sess_info configured_sess[
+			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
+	uint32_t i, j, n_configured_sess = 0;
+	int ret = 0;
+
+	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
+				(struct rte_cryptodev_sym_session *)sess);
+	else
+		sess_ctx = SECURITY_GET_SESS_PRIV(
+				(struct rte_security_session *)sess);
+
+	if (mp == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		struct scheduler_worker *worker = &sched_ctx->workers[i];
+		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
+		uint8_t next_worker = 0;
+
+		for (j = 0; j < n_configured_sess; j++) {
+			if (configured_sess[j].driver_id == worker->driver_id) {
+				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+					sess_ctx->worker_sess[i] =
+						configured_sess[j].sess;
+				else
+					sess_ctx->worker_sec_sess[i] =
+						configured_sess[j].sec_sess;
+
+				next_worker = 1;
+				break;
+			}
+		}
+		if (next_worker)
+			continue;
+
+		if (rte_mempool_avail_count(mp) == 0) {
+			ret = -ENOMEM;
+			goto error_exit;
+		}
+
+		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			struct rte_cryptodev_sym_session *worker_sess =
+				rte_cryptodev_sym_session_create(worker->dev_id,
+						(struct rte_crypto_sym_xform *)
+						sess_params, mp);
+
+			if (worker_sess == NULL) {
+				ret = -rte_errno;
+				goto error_exit;
+			}
+
+			worker_sess->opaque_data = (uint64_t)sess;
+			sess_ctx->worker_sess[i] = worker_sess;
+			configured_sess[n_configured_sess].sess = worker_sess;
+		} else {
+			struct rte_security_session *worker_sess =
+				rte_security_session_create(dev->security_ctx,
+					(struct rte_security_session_conf *)
+					sess_params, mp);
+
+			if (worker_sess == NULL) {
+				ret = -rte_errno;
+				goto error_exit;
+			}
+
+			worker_sess->opaque_data = (uint64_t)sess;
+			sess_ctx->worker_sec_sess[i] = worker_sess;
+			configured_sess[n_configured_sess].sec_sess =
+							worker_sess;
+			configured_sess[n_configured_sess].sec_ctx =
+							dev->security_ctx;
+		}
+
+		configured_sess[n_configured_sess].driver_id =
+							worker->driver_id;
+		configured_sess[n_configured_sess].dev_id = worker->dev_id;
+		n_configured_sess++;
+	}
+
+	return 0;
+
+error_exit:
+	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
+	for (i = 0; i < n_configured_sess; i++) {
+		if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+			rte_cryptodev_sym_session_free(
+						configured_sess[i].dev_id,
+						configured_sess[i].sess);
+		else
+			rte_security_session_destroy(
+						configured_sess[i].sec_ctx,
+						configured_sess[i].sec_sess);
+	}
+
+	return ret;
+}
+
+static void
+scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
+		uint8_t session_type)
+{
+	struct scheduler_session_ctx *sess_ctx;
+	struct scheduler_configured_sess_info deleted_sess[
+			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
+	uint32_t i, j, n_deleted_sess = 0;
+
+	if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
+				(struct rte_cryptodev_sym_session *)sess);
+	else
+		sess_ctx = SECURITY_GET_SESS_PRIV(
+				(struct rte_security_session *)sess);
+
+	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
+		CR_SCHED_LOG(WARNING,
+			"Worker updated between session creation/deletion. "
+			"The session may not be freed fully.");
+	}
+
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		struct scheduler_worker *worker = &sched_ctx->workers[i];
+		struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
+		uint8_t next_worker = 0;
+
+		for (j = 0; j < n_deleted_sess; j++) {
+			if (deleted_sess[j].driver_id == worker->driver_id) {
+				if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+					sess_ctx->worker_sess[i] = NULL;
+				else
+					sess_ctx->worker_sec_sess[i] = NULL;
+
+				next_worker = 1;
+				break;
+			}
+		}
+		if (next_worker)
+			continue;
+
+		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			rte_cryptodev_sym_session_free(worker->dev_id,
+						sess_ctx->worker_sess[i]);
+			sess_ctx->worker_sess[i] = NULL;
+		} else {
+			rte_security_session_destroy(dev->security_ctx,
+						sess_ctx->worker_sec_sess[i]);
+			sess_ctx->worker_sec_sess[i] = NULL;
+		}
+
+		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
+	}
+}
+
+static unsigned int
+scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
+		uint8_t session_type)
+{
+	uint8_t i = 0;
+	uint32_t max_priv_sess_size = 0;
+
+	/* Check what is the maximum private session size for all workers */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
+		struct rte_security_ctx *sec_ctx = dev->security_ctx;
+		uint32_t priv_sess_size = 0;
+
+		if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+			priv_sess_size =
+				(*dev->dev_ops->sym_session_get_size)(dev);
+		} else {
+			priv_sess_size = (*sec_ctx->ops->session_get_size)(dev);
+		}
+
+		if (max_priv_sess_size < priv_sess_size)
+			max_priv_sess_size = priv_sess_size;
+	}
+
+	return max_priv_sess_size;
+}
+
 /** attaching the workers predefined by scheduler's EAL options */
 static int
 scheduler_attach_init_worker(struct rte_cryptodev *dev)
@@ -265,10 +466,7 @@  scheduler_pmd_close(struct rte_cryptodev *dev)
 		sched_ctx->private_ctx = NULL;
 	}
 
-	if (sched_ctx->capabilities) {
-		rte_free(sched_ctx->capabilities);
-		sched_ctx->capabilities = NULL;
-	}
+	scheduler_free_capabilities(sched_ctx);
 
 	return 0;
 }
@@ -451,92 +649,22 @@  scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 }
 
 static uint32_t
-scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
 {
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-	uint8_t i = 0;
-	uint32_t max_priv_sess_size = 0;
-
-	/* Check what is the maximum private session size for all workers */
-	for (i = 0; i < sched_ctx->nb_workers; i++) {
-		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
-		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
-		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
 
-		if (max_priv_sess_size < priv_sess_size)
-			max_priv_sess_size = priv_sess_size;
-	}
-
-	return max_priv_sess_size;
+	return scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);
 }
 
-struct scheduler_configured_sess_info {
-	uint8_t dev_id;
-	uint8_t driver_id;
-	struct rte_cryptodev_sym_session *sess;
-};
-
 static int
 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
 	struct rte_crypto_sym_xform *xform,
 	struct rte_cryptodev_sym_session *sess)
 {
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-	struct rte_mempool *mp = rte_mempool_from_obj(sess);
-	struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-	struct scheduler_configured_sess_info configured_sess[
-			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
-	uint32_t i, j, n_configured_sess = 0;
-	int ret = 0;
-
-	if (mp == NULL)
-		return -EINVAL;
 
-	for (i = 0; i < sched_ctx->nb_workers; i++) {
-		struct scheduler_worker *worker = &sched_ctx->workers[i];
-		struct rte_cryptodev_sym_session *worker_sess;
-		uint8_t next_worker = 0;
-
-		for (j = 0; j < n_configured_sess; j++) {
-			if (configured_sess[j].driver_id ==
-					worker->driver_id) {
-				sess_ctx->worker_sess[i] =
-					configured_sess[j].sess;
-				next_worker = 1;
-				break;
-			}
-		}
-		if (next_worker)
-			continue;
-
-		if (rte_mempool_avail_count(mp) == 0) {
-			ret = -ENOMEM;
-			goto error_exit;
-		}
-
-		worker_sess = rte_cryptodev_sym_session_create(worker->dev_id,
-			xform, mp);
-		if (worker_sess == NULL) {
-			ret = -rte_errno;
-			goto error_exit;
-		}
-
-		worker_sess->opaque_data = (uint64_t)sess;
-		sess_ctx->worker_sess[i] = worker_sess;
-		configured_sess[n_configured_sess].driver_id =
-			worker->driver_id;
-		configured_sess[n_configured_sess].dev_id = worker->dev_id;
-		configured_sess[n_configured_sess].sess = worker_sess;
-		n_configured_sess++;
-	}
-
-	return 0;
-error_exit:
-	sess_ctx->ref_cnt = sched_ctx->ref_cnt;
-	for (i = 0; i < n_configured_sess; i++)
-		rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
-			configured_sess[i].sess);
-	return ret;
+	return scheduler_session_create((void *)sess, (void *)xform, sched_ctx,
+				RTE_CRYPTO_OP_WITH_SESSION);
 }
 
 /** Clear the memory of session so it doesn't leave key material behind */
@@ -545,37 +673,9 @@  scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_sym_session *sess)
 {
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-	struct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-	struct scheduler_configured_sess_info deleted_sess[
-			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
-	uint32_t i, j, n_deleted_sess = 0;
-
-	if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
-		CR_SCHED_LOG(WARNING,
-			"Worker updated between session creation/deletion. "
-			"The session may not be freed fully.");
-	}
-
-	for (i = 0; i < sched_ctx->nb_workers; i++) {
-		struct scheduler_worker *worker = &sched_ctx->workers[i];
-		uint8_t next_worker = 0;
 
-		for (j = 0; j < n_deleted_sess; j++) {
-			if (deleted_sess[j].driver_id == worker->driver_id) {
-				sess_ctx->worker_sess[i] = NULL;
-				next_worker = 1;
-				break;
-			}
-		}
-		if (next_worker)
-			continue;
-
-		rte_cryptodev_sym_session_free(worker->dev_id,
-			sess_ctx->worker_sess[i]);
-
-		deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
-		sess_ctx->worker_sess[i] = NULL;
-	}
+	scheduler_session_destroy((void *)sess, sched_ctx,
+				RTE_CRYPTO_OP_WITH_SESSION);
 }
 
 static struct rte_cryptodev_ops scheduler_pmd_ops = {
@@ -598,3 +698,68 @@  static struct rte_cryptodev_ops scheduler_pmd_ops = {
 };
 
 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
+
+/** Configure a scheduler session from a security session configuration */
+static int
+scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
+			struct rte_security_session *sess)
+{
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+	/* Check for supported security protocols */
+	if (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) {
+		CR_SCHED_LOG(ERR, "Unsupported security protocol");
+		return -ENOTSUP;
+	}
+
+	return scheduler_session_create((void *)sess, (void *)conf, sched_ctx,
+				RTE_CRYPTO_OP_SECURITY_SESSION);
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+scheduler_pmd_sec_sess_destroy(void *dev,
+			       struct rte_security_session *sess)
+{
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+	scheduler_session_destroy((void *)sess, sched_ctx,
+				RTE_CRYPTO_OP_SECURITY_SESSION);
+
+	return 0;
+}
+
+/** Get sync security capabilities for scheduler pmds */
+static const struct rte_security_capability *
+scheduler_pmd_sec_capa_get(void *dev)
+{
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+	return sched_ctx->sec_capabilities;
+}
+
+static unsigned int
+scheduler_pmd_sec_sess_size_get(void *dev)
+{
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+	return scheduler_session_size_get(sched_ctx,
+				RTE_CRYPTO_OP_SECURITY_SESSION);
+}
+
+static struct rte_security_ops scheduler_pmd_sec_ops = {
+		.session_create = scheduler_pmd_sec_sess_create,
+		.session_update = NULL,
+		.session_get_size = scheduler_pmd_sec_sess_size_get,
+		.session_stats_get = NULL,
+		.session_destroy = scheduler_pmd_sec_sess_destroy,
+		.set_pkt_metadata = NULL,
+		.capabilities_get = scheduler_pmd_sec_capa_get
+};
+
+struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
+							&scheduler_pmd_sec_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 36d0bb6307..ff1e7a83e8 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -5,6 +5,8 @@ 
 #ifndef _SCHEDULER_PMD_PRIVATE_H
 #define _SCHEDULER_PMD_PRIVATE_H
 
+#include <rte_security_driver.h>
+
 #include "rte_cryptodev_scheduler.h"
 
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
@@ -30,7 +32,8 @@  struct scheduler_ctx {
 	/**< private scheduler context pointer */
 
 	struct rte_cryptodev_capabilities *capabilities;
-	uint32_t nb_capabilities;
+	struct rte_security_capability *sec_capabilities;
+	struct rte_cryptodev_capabilities **sec_crypto_capabilities;
 
 	uint32_t max_nb_queue_pairs;
 
@@ -64,8 +67,12 @@  struct scheduler_qp_ctx {
 
 struct scheduler_session_ctx {
 	uint32_t ref_cnt;
-	struct rte_cryptodev_sym_session *worker_sess[
-		RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	union {
+		struct rte_cryptodev_sym_session *worker_sess[
+			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+		struct rte_security_session *worker_sec_sess[
+			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	};
 };
 
 extern uint8_t cryptodev_scheduler_driver_id;
@@ -108,7 +115,22 @@  scheduler_order_drain(struct rte_ring *order_ring,
 }
 
 static __rte_always_inline void
-scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops,
+scheduler_set_single_worker_session(struct rte_crypto_op *op,
+		uint8_t worker_idx)
+{
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		struct scheduler_session_ctx *sess_ctx =
+				CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
+		op->sym->session = sess_ctx->worker_sess[worker_idx];
+	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct scheduler_session_ctx *sess_ctx =
+				SECURITY_GET_SESS_PRIV(op->sym->session);
+		op->sym->session = sess_ctx->worker_sec_sess[worker_idx];
+	}
+}
+
+static __rte_always_inline void
+scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,
 		uint8_t worker_index)
 {
 	struct rte_crypto_op **op = ops;
@@ -129,52 +151,34 @@  scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops,
 			rte_prefetch0(op[7]->sym->session);
 		}
 
-		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-			struct scheduler_session_ctx *sess_ctx =
-				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);
-			op[0]->sym->session =
-				sess_ctx->worker_sess[worker_index];
-		}
-
-		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-			struct scheduler_session_ctx *sess_ctx =
-				CRYPTODEV_GET_SYM_SESS_PRIV(op[1]->sym->session);
-			op[1]->sym->session =
-				sess_ctx->worker_sess[worker_index];
-		}
-
-		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-			struct scheduler_session_ctx *sess_ctx =
-				CRYPTODEV_GET_SYM_SESS_PRIV(op[2]->sym->session);
-			op[2]->sym->session =
-				sess_ctx->worker_sess[worker_index];
-		}
-
-		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-			struct scheduler_session_ctx *sess_ctx =
-				CRYPTODEV_GET_SYM_SESS_PRIV(op[3]->sym->session);
-			op[3]->sym->session =
-				sess_ctx->worker_sess[worker_index];
-		}
+		scheduler_set_single_worker_session(op[0], worker_index);
+		scheduler_set_single_worker_session(op[1], worker_index);
+		scheduler_set_single_worker_session(op[2], worker_index);
+		scheduler_set_single_worker_session(op[3], worker_index);
 
 		op += 4;
 		n -= 4;
 	}
 
 	while (n--) {
-		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-			struct scheduler_session_ctx *sess_ctx =
-				CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);
-
-			op[0]->sym->session =
-				sess_ctx->worker_sess[worker_index];
-			op++;
-		}
+		scheduler_set_single_worker_session(op[0], worker_index);
+		op++;
 	}
 }
 
 static __rte_always_inline void
-scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)
+scheduler_retrieve_single_session(struct rte_crypto_op *op)
+{
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		op->sym->session = (void *)(uintptr_t)
+			rte_cryptodev_sym_session_opaque_data_get(op->sym->session);
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		op->sym->session = (void *)(uintptr_t)
+			rte_security_session_opaque_data_get(op->sym->session);
+}
+
+static __rte_always_inline void
+scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	uint16_t n = nb_ops;
 	struct rte_crypto_op **op = ops;
@@ -194,32 +198,77 @@  scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)
 			rte_prefetch0(op[7]->sym->session);
 		}
 
-		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			op[0]->sym->session = (void *)(uintptr_t)
-				rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);
-		if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			op[1]->sym->session = (void *)(uintptr_t)
-				rte_cryptodev_sym_session_opaque_data_get(op[1]->sym->session);
-		if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			op[2]->sym->session = (void *)(uintptr_t)
-				rte_cryptodev_sym_session_opaque_data_get(op[2]->sym->session);
-		if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			op[3]->sym->session = (void *)(uintptr_t)
-				rte_cryptodev_sym_session_opaque_data_get(op[3]->sym->session);
+		scheduler_retrieve_single_session(op[0]);
+		scheduler_retrieve_single_session(op[1]);
+		scheduler_retrieve_single_session(op[2]);
+		scheduler_retrieve_single_session(op[3]);
 
 		op += 4;
 		n -= 4;
 	}
 
 	while (n--) {
-		if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-			op[0]->sym->session = (void *)(uintptr_t)
-				rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);
+		scheduler_retrieve_single_session(op[0]);
 		op++;
 	}
 }
 
+static __rte_always_inline uint32_t
+scheduler_get_job_len(struct rte_crypto_op *op)
+{
+	uint32_t job_len;
+
+	/* op_len is initialized as cipher data length, if
+	 * it is 0, then it is set to auth data length
+	 */
+	job_len = op->sym->cipher.data.length;
+	job_len += (op->sym->cipher.data.length == 0) *
+					op->sym->auth.data.length;
+
+	return job_len;
+}
+
+static __rte_always_inline void
+scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)
+{
+	uint32_t i;
+
+	if (sched_ctx->capabilities) {
+		rte_free(sched_ctx->capabilities);
+		sched_ctx->capabilities = NULL;
+	}
+
+	if (sched_ctx->sec_crypto_capabilities) {
+		i = 0;
+		while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
+			rte_free(sched_ctx->sec_crypto_capabilities[i]);
+			sched_ctx->sec_crypto_capabilities[i] = NULL;
+			i++;
+		}
+
+		rte_free(sched_ctx->sec_crypto_capabilities);
+		sched_ctx->sec_crypto_capabilities = NULL;
+	}
+
+	if (sched_ctx->sec_capabilities) {
+		rte_free(sched_ctx->sec_capabilities);
+		sched_ctx->sec_capabilities = NULL;
+	}
+}
+
+static __rte_always_inline int
+scheduler_check_sec_proto_supp(enum rte_security_session_action_type action,
+		enum rte_security_session_protocol protocol)
+{
+	if (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL &&
+			protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+		return 1;
+
+	return 0;
+}
+
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
 
 #endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index ad3f8b842a..08041887a8 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -28,11 +28,11 @@  schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	if (unlikely(nb_ops == 0))
 		return 0;
 
-	scheduler_set_worker_session(ops, nb_ops, worker_idx);
+	scheduler_set_worker_sessions(ops, nb_ops, worker_idx);
 	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
 			worker->qp_id, ops, nb_ops);
 	if (processed_ops < nb_ops)
-		scheduler_retrieve_session(ops + processed_ops,
+		scheduler_retrieve_sessions(ops + processed_ops,
 			nb_ops - processed_ops);
 
 	worker->nb_inflight_cops += processed_ops;
@@ -87,7 +87,7 @@  schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 
 	nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
 			worker->qp_id, ops, nb_ops);
-	scheduler_retrieve_session(ops, nb_deq_ops);
+	scheduler_retrieve_sessions(ops, nb_deq_ops);
 	last_worker_idx += 1;
 	last_worker_idx %= rr_qp_ctx->nb_workers;