get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131638/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131638,
    "url": "http://patches.dpdk.org/api/patches/131638/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230919141440.39305-2-david.coyle@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230919141440.39305-2-david.coyle@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230919141440.39305-2-david.coyle@intel.com",
    "date": "2023-09-19T14:14:39",
    "name": "[v4,1/2] crypto/scheduler: support DOCSIS security protocol",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "3b78341b9460e87f52a86db9ff724d9802afba7e",
    "submitter": {
        "id": 961,
        "url": "http://patches.dpdk.org/api/people/961/?format=api",
        "name": "Coyle, David",
        "email": "david.coyle@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230919141440.39305-2-david.coyle@intel.com/mbox/",
    "series": [
        {
            "id": 29556,
            "url": "http://patches.dpdk.org/api/series/29556/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29556",
            "date": "2023-09-19T14:14:38",
            "name": "crypto/scheduler: add support for DOCSIS security protocol",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/29556/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/131638/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/131638/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0509342609;\n\tTue, 19 Sep 2023 16:15:07 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E844C40E78;\n\tTue, 19 Sep 2023 16:15:06 +0200 (CEST)",
            "from mgamail.intel.com (mgamail.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id EFE9740277\n for <dev@dpdk.org>; Tue, 19 Sep 2023 16:15:04 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 19 Sep 2023 07:15:03 -0700",
            "from silpixa00399912.ir.intel.com (HELO\n silpixa00399912.ger.corp.intel.com) ([10.237.222.220])\n by orsmga008.jf.intel.com with ESMTP; 19 Sep 2023 07:15:00 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1695132905; x=1726668905;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=jDNjHubroVECYfkeasBNMtpdE3D0FPMHyoNlP1kLjgY=;\n b=VvDcPITo/+XHMXucNQHXRy/Kv5/X6R9byebuzVqkaT+Ww1atuEJXqRmx\n DezjNYKmu9+nAiYXWsLhgwabM0+tJXs4E3SrrJvn9+/FsHobCAI0Uym9N\n dGZ8fS7aYCdtf6tMrzWiDuiPckh+84Yobrfrs5411BBXu/CCwT07LAnGz\n DtZ9m4AuFwBLCnlPtVoptObqyuJ7l+GNYQLeQ/ULtTxI2zy9rb2BSVlXm\n YXoLOLF7X4Fp/2dSb2CdZUonBP1uhnFChaAQhalG+FwUvKeyicIV7u9+R\n YZMBG2caWh++HlfqB7poOGSL0Mglu8eMJl52YVGfnt4tuE2AnSkYcegfT Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10838\"; a=\"466276441\"",
            "E=Sophos;i=\"6.02,159,1688454000\"; d=\"scan'208\";a=\"466276441\"",
            "E=McAfee;i=\"6600,9927,10838\"; a=\"775569999\"",
            "E=Sophos;i=\"6.02,159,1688454000\"; d=\"scan'208\";a=\"775569999\""
        ],
        "X-ExtLoop1": "1",
        "From": "David Coyle <david.coyle@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "kai.ji@intel.com, anoobj@marvell.com, jerinj@marvell.com,\n ciara.power@intel.com, kevin.osullivan@intel.com,\n David Coyle <david.coyle@intel.com>",
        "Subject": "[PATCH v4 1/2] crypto/scheduler: support DOCSIS security protocol",
        "Date": "Tue, 19 Sep 2023 14:14:39 +0000",
        "Message-Id": "<20230919141440.39305-2-david.coyle@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230919141440.39305-1-david.coyle@intel.com>",
        "References": "<20230914152207.19794-1-david.coyle@intel.com>\n <20230919141440.39305-1-david.coyle@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support to the cryptodev scheduler PMD for the DOCSIS security\nprotocol. This includes adding the following to the scheduler:\n- synchronization of worker's security capabilities\n- retrieval of the scheduler's synchronized security capabilities\n- retrieval of the security session size i.e. maximum session size\n  across all workers\n- creation of security sessions on each worker\n- deletion of security sessions on each worker\n\nSigned-off-by: David Coyle <david.coyle@intel.com>\nSigned-off-by: Kevin O'Sullivan <kevin.osullivan@intel.com>\n---\n doc/guides/rel_notes/release_23_11.rst        |   4 +\n drivers/crypto/scheduler/meson.build          |   2 +-\n .../scheduler/rte_cryptodev_scheduler.c       | 218 ++++++++++-\n drivers/crypto/scheduler/scheduler_failover.c |  12 +-\n .../crypto/scheduler/scheduler_multicore.c    |  10 +-\n .../scheduler/scheduler_pkt_size_distr.c      |  54 +--\n drivers/crypto/scheduler/scheduler_pmd.c      |  59 ++-\n drivers/crypto/scheduler/scheduler_pmd_ops.c  | 370 +++++++++++++-----\n .../crypto/scheduler/scheduler_pmd_private.h  | 155 +++++---\n .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-\n 10 files changed, 657 insertions(+), 233 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst\nindex 333e1d95a2..a3d0dadbdf 100644\n--- a/doc/guides/rel_notes/release_23_11.rst\n+++ b/doc/guides/rel_notes/release_23_11.rst\n@@ -78,6 +78,10 @@ New Features\n * build: Optional libraries can now be selected with the new ``enable_libs``\n   build option similarly to the existing ``enable_drivers`` build option.\n \n+* **Updated Cryptodev Scheduler PMD.**\n+\n+  Added support for DOCSIS security protocol through the ``rte_security`` API\n+  callbacks.\n \n Removed Items\n -------------\ndiff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build\nindex cd18efc791..752d655415 100644\n--- a/drivers/crypto/scheduler/meson.build\n+++ b/drivers/crypto/scheduler/meson.build\n@@ -7,7 +7,7 @@ if is_windows\n     subdir_done()\n endif\n \n-deps += ['bus_vdev', 'reorder']\n+deps += ['bus_vdev', 'reorder', 'security']\n sources = files(\n         'rte_cryptodev_scheduler.c',\n         'scheduler_failover.c',\ndiff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\nindex 258d6f8c43..9a21edd32a 100644\n--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\n+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\n@@ -5,11 +5,14 @@\n #include <rte_reorder.h>\n #include <rte_cryptodev.h>\n #include <cryptodev_pmd.h>\n+#include <rte_security_driver.h>\n #include <rte_malloc.h>\n \n #include \"rte_cryptodev_scheduler.h\"\n #include \"scheduler_pmd_private.h\"\n \n+#define MAX_CAPS 256\n+\n /** update the scheduler pmd's capability with attaching device's\n  *  capability.\n  *  For each device to be attached, the scheduler's capability should be\n@@ -59,7 +62,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps,\n \t\t\t\t\tcap->sym.auth.digest_size.max ?\n \t\t\t\t\ts_cap->sym.auth.digest_size.max :\n \t\t\t\t\tcap->sym.auth.digest_size.max;\n-\n \t\t\t}\n \n \t\t\tif (s_cap->sym.xform_type ==\n@@ -81,25 +83,173 @@ sync_caps(struct rte_cryptodev_capabilities *caps,\n \n \t\tmemset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));\n \t\tsync_nb_caps--;\n+\t\ti--;\n \t}\n \n \treturn sync_nb_caps;\n }\n \n static int\n-update_scheduler_capability(struct scheduler_ctx *sched_ctx)\n+check_sec_cap_equal(const struct rte_security_capability *sec_cap1,\n+\t\tstruct rte_security_capability *sec_cap2)\n+{\n+\tif (sec_cap1->action != sec_cap2->action ||\n+\t\t\tsec_cap1->protocol != sec_cap2->protocol ||\n+\t\t\tsec_cap1->ol_flags != sec_cap2->ol_flags)\n+\t\treturn 0;\n+\n+\tif (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)\n+\t\treturn !memcmp(&sec_cap1->docsis, &sec_cap2->docsis,\n+\t\t\t\tsizeof(sec_cap1->docsis));\n+\telse\n+\t\treturn 0;\n+}\n+\n+static void\n+copy_sec_cap(struct rte_security_capability *dst_sec_cap,\n+\t\tstruct rte_security_capability *src_sec_cap)\n+{\n+\tdst_sec_cap->action = src_sec_cap->action;\n+\tdst_sec_cap->protocol = src_sec_cap->protocol;\n+\tif (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)\n+\t\tdst_sec_cap->docsis = src_sec_cap->docsis;\n+\tdst_sec_cap->ol_flags = src_sec_cap->ol_flags;\n+}\n+\n+static uint32_t\n+sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps,\n+\t\tconst struct rte_cryptodev_capabilities *sec_crypto_caps,\n+\t\tconst struct rte_cryptodev_capabilities *worker_sec_crypto_caps)\n+{\n+\tuint8_t nb_caps = 0;\n+\n+\tnb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps);\n+\tsync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps);\n+\n+\treturn nb_caps;\n+}\n+\n+/** update the scheduler pmd's security capability with attaching device's\n+ *  security capability.\n+ *  For each device to be attached, the scheduler's security capability should\n+ *  be the common capability set of all workers\n+ **/\n+static uint32_t\n+sync_sec_caps(uint32_t worker_idx,\n+\t\tstruct rte_security_capability *sec_caps,\n+\t\tstruct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],\n+\t\tuint32_t nb_sec_caps,\n+\t\tconst struct rte_security_capability *worker_sec_caps)\n {\n-\tstruct rte_cryptodev_capabilities tmp_caps[256] = { {0} };\n-\tuint32_t nb_caps = 0, i;\n+\tuint32_t nb_worker_sec_caps = 0, i;\n+\n+\tif (worker_sec_caps == NULL)\n+\t\treturn 0;\n+\n+\twhile (worker_sec_caps[nb_worker_sec_caps].action !=\n+\t\t\t\t\tRTE_SECURITY_ACTION_TYPE_NONE)\n+\t\tnb_worker_sec_caps++;\n+\n+\t/* Handle first worker */\n+\tif (worker_idx == 0) {\n+\t\tuint32_t nb_worker_sec_crypto_caps = 0;\n+\t\tuint32_t nb_worker_supp_sec_caps = 0;\n+\n+\t\tfor (i = 0; i < nb_worker_sec_caps; i++) {\n+\t\t\t/* Check for supported security protocols */\n+\t\t\tif (!scheduler_check_sec_proto_supp(worker_sec_caps[i].action,\n+\t\t\t\t\tworker_sec_caps[i].protocol))\n+\t\t\t\tcontinue;\n \n-\tif (sched_ctx->capabilities) {\n-\t\trte_free(sched_ctx->capabilities);\n-\t\tsched_ctx->capabilities = NULL;\n+\t\t\tsec_caps[nb_worker_supp_sec_caps] = worker_sec_caps[i];\n+\n+\t\t\twhile (worker_sec_caps[i].crypto_capabilities[\n+\t\t\t\t\tnb_worker_sec_crypto_caps].op !=\n+\t\t\t\t\t\tRTE_CRYPTO_OP_TYPE_UNDEFINED)\n+\t\t\t\tnb_worker_sec_crypto_caps++;\n+\n+\t\t\trte_memcpy(&sec_crypto_caps[nb_worker_supp_sec_caps][0],\n+\t\t\t\t&worker_sec_caps[i].crypto_capabilities[0],\n+\t\t\t\tsizeof(sec_crypto_caps[nb_worker_supp_sec_caps][0]) *\n+\t\t\t\t\tnb_worker_sec_crypto_caps);\n+\n+\t\t\tnb_worker_supp_sec_caps++;\n+\t\t}\n+\t\treturn nb_worker_supp_sec_caps;\n \t}\n \n-\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n-\t\tstruct rte_cryptodev_info dev_info;\n+\tfor (i = 0; i < nb_sec_caps; i++) {\n+\t\tstruct rte_security_capability *sec_cap = &sec_caps[i];\n+\t\tuint32_t j;\n+\n+\t\tfor (j = 0; j < nb_worker_sec_caps; j++) {\n+\t\t\tstruct rte_cryptodev_capabilities\n+\t\t\t\t\ttmp_sec_crypto_caps[MAX_CAPS] = { {0} };\n+\t\t\tuint32_t nb_sec_crypto_caps = 0;\n+\t\t\tconst struct rte_security_capability *worker_sec_cap =\n+\t\t\t\t\t\t\t\t&worker_sec_caps[j];\n+\n+\t\t\tif (!check_sec_cap_equal(worker_sec_cap, sec_cap))\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* Sync the crypto caps of the common security cap */\n+\t\t\tnb_sec_crypto_caps = sync_sec_crypto_caps(\n+\t\t\t\t\t\ttmp_sec_crypto_caps,\n+\t\t\t\t\t\t&sec_crypto_caps[i][0],\n+\t\t\t\t\t\t&worker_sec_cap->crypto_capabilities[0]);\n+\n+\t\t\tmemset(&sec_crypto_caps[i][0], 0,\n+\t\t\t\t\tsizeof(sec_crypto_caps[i][0]) * MAX_CAPS);\n+\n+\t\t\trte_memcpy(&sec_crypto_caps[i][0],\n+\t\t\t\t\t&tmp_sec_crypto_caps[0],\n+\t\t\t\t\tsizeof(sec_crypto_caps[i][0]) * nb_sec_crypto_caps);\n+\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (j < nb_worker_sec_caps)\n+\t\t\tcontinue;\n+\n+\t\t/*\n+\t\t * Remove an uncommon security cap, and it's associated crypto\n+\t\t * caps, from the arrays\n+\t\t */\n+\t\tfor (j = i; j < nb_sec_caps - 1; j++) {\n+\t\t\trte_memcpy(&sec_caps[j], &sec_caps[j+1],\n+\t\t\t\t\tsizeof(*sec_cap));\n+\n+\t\t\trte_memcpy(&sec_crypto_caps[j][0],\n+\t\t\t\t\t&sec_crypto_caps[j+1][0],\n+\t\t\t\t\tsizeof(*&sec_crypto_caps[j][0]) *\n+\t\t\t\t\t\tMAX_CAPS);\n+\t\t}\n+\t\tmemset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap));\n+\t\tmemset(&sec_crypto_caps[nb_sec_caps - 1][0], 0,\n+\t\t\tsizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) *\n+\t\t\t\tMAX_CAPS);\n+\t\tnb_sec_caps--;\n+\t\ti--;\n+\t}\n+\n+\treturn nb_sec_caps;\n+}\n+\n+static int\n+update_scheduler_capability(struct scheduler_ctx *sched_ctx)\n+{\n+\tstruct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} };\n+\tstruct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} };\n+\tstruct rte_cryptodev_capabilities\n+\t\ttmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} };\n+\tuint32_t nb_caps = 0, nb_sec_caps = 0, i;\n+\tstruct rte_cryptodev_info dev_info;\n+\n+\t/* Free any previously allocated capability memory */\n+\tscheduler_free_capabilities(sched_ctx);\n \n+\t/* Determine the new cryptodev capabilities for the scheduler */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n \t\trte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);\n \n \t\tnb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);\n@@ -116,6 +266,54 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)\n \trte_memcpy(sched_ctx->capabilities, tmp_caps,\n \t\t\tsizeof(struct rte_cryptodev_capabilities) * nb_caps);\n \n+\t/* Determine the new security capabilities for the scheduler */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tstruct rte_cryptodev *dev =\n+\t\t\t\t&rte_cryptodevs[sched_ctx->workers[i].dev_id];\n+\t\tstruct rte_security_ctx *sec_ctx = dev->security_ctx;\n+\n+\t\tnb_sec_caps = sync_sec_caps(i, tmp_sec_caps, tmp_sec_crypto_caps,\n+\t\t\tnb_sec_caps, rte_security_capabilities_get(sec_ctx));\n+\t}\n+\n+\tsched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct rte_security_capability) *\n+\t\t\t\t\t(nb_sec_caps + 1), 0, SOCKET_ID_ANY);\n+\tif (!sched_ctx->sec_capabilities)\n+\t\treturn -ENOMEM;\n+\n+\tsched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL,\n+\t\t\t\tsizeof(struct rte_cryptodev_capabilities *) *\n+\t\t\t\t(nb_sec_caps + 1),\n+\t\t\t\t0, SOCKET_ID_ANY);\n+\tif (!sched_ctx->sec_crypto_capabilities)\n+\t\treturn -ENOMEM;\n+\n+\tfor (i = 0; i < nb_sec_caps; i++) {\n+\t\tuint16_t nb_sec_crypto_caps = 0;\n+\n+\t\tcopy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]);\n+\n+\t\twhile (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op !=\n+\t\t\t\t\t\tRTE_CRYPTO_OP_TYPE_UNDEFINED)\n+\t\t\tnb_sec_crypto_caps++;\n+\n+\t\tsched_ctx->sec_crypto_capabilities[i] =\n+\t\t\trte_zmalloc_socket(NULL,\n+\t\t\t\tsizeof(struct rte_cryptodev_capabilities) *\n+\t\t\t\t(nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY);\n+\t\tif (!sched_ctx->sec_crypto_capabilities[i])\n+\t\t\treturn -ENOMEM;\n+\n+\t\trte_memcpy(sched_ctx->sec_crypto_capabilities[i],\n+\t\t\t\t&tmp_sec_crypto_caps[i][0],\n+\t\t\t\tsizeof(struct rte_cryptodev_capabilities)\n+\t\t\t\t\t* nb_sec_crypto_caps);\n+\n+\t\tsched_ctx->sec_capabilities[i].crypto_capabilities =\n+\t\t\t\tsched_ctx->sec_crypto_capabilities[i];\n+\t}\n+\n \treturn 0;\n }\n \n@@ -205,6 +403,7 @@ rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)\n \tsched_ctx->nb_workers++;\n \n \tif (update_scheduler_capability(sched_ctx) < 0) {\n+\t\tscheduler_free_capabilities(sched_ctx);\n \t\tworker->dev_id = 0;\n \t\tworker->driver_id = 0;\n \t\tsched_ctx->nb_workers--;\n@@ -266,6 +465,7 @@ rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)\n \tsched_ctx->nb_workers--;\n \n \tif (update_scheduler_capability(sched_ctx) < 0) {\n+\t\tscheduler_free_capabilities(sched_ctx);\n \t\tCR_SCHED_LOG(ERR, \"capabilities update failed\");\n \t\treturn -ENOTSUP;\n \t}\ndiff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c\nindex f24d2fc44b..52ff2ffbb7 100644\n--- a/drivers/crypto/scheduler/scheduler_failover.c\n+++ b/drivers/crypto/scheduler/scheduler_failover.c\n@@ -28,7 +28,7 @@ failover_worker_enqueue(struct scheduler_worker *worker,\n {\n \tuint16_t processed_ops;\n \n-\tscheduler_set_worker_session(ops, nb_ops, index);\n+\tscheduler_set_worker_sessions(ops, nb_ops, index);\n \n \tprocessed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,\n \t\t\tworker->qp_id, ops, nb_ops);\n@@ -51,7 +51,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tops, nb_ops, PRIMARY_WORKER_IDX);\n \n \tif (enqueued_ops < nb_ops) {\n-\t\tscheduler_retrieve_session(&ops[enqueued_ops],\n+\t\tscheduler_retrieve_sessions(&ops[enqueued_ops],\n \t\t\t\t\t\tnb_ops - enqueued_ops);\n \t\tenqueued_ops += failover_worker_enqueue(\n \t\t\t\t&qp_ctx->secondary_worker,\n@@ -59,7 +59,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\t\tnb_ops - enqueued_ops,\n \t\t\t\tSECONDARY_WORKER_IDX);\n \t\tif (enqueued_ops < nb_ops)\n-\t\t\tscheduler_retrieve_session(&ops[enqueued_ops],\n+\t\t\tscheduler_retrieve_sessions(&ops[enqueued_ops],\n \t\t\t\t\t\tnb_ops - enqueued_ops);\n \t}\n \n@@ -102,7 +102,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tqp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;\n \n \tif (nb_deq_ops == nb_ops)\n-\t\tgoto retrieve_session;\n+\t\tgoto retrieve_sessions;\n \n \tworker = workers[qp_ctx->deq_idx];\n \n@@ -112,8 +112,8 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tworker->nb_inflight_cops -= nb_deq_ops2;\n \t}\n \n-retrieve_session:\n-\tscheduler_retrieve_session(ops, nb_deq_ops + nb_deq_ops2);\n+retrieve_sessions:\n+\tscheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2);\n \n \treturn nb_deq_ops + nb_deq_ops2;\n }\ndiff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c\nindex 3dea850661..a21b522f9f 100644\n--- a/drivers/crypto/scheduler/scheduler_multicore.c\n+++ b/drivers/crypto/scheduler/scheduler_multicore.c\n@@ -183,7 +183,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \n \twhile (!mc_ctx->stop_signal) {\n \t\tif (pending_enq_ops) {\n-\t\t\tscheduler_set_worker_session(\n+\t\t\tscheduler_set_worker_sessions(\n \t\t\t\t&enq_ops[pending_enq_ops_idx], pending_enq_ops,\n \t\t\t\tworker_idx);\n \t\t\tprocessed_ops =\n@@ -192,7 +192,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\t\t\t\t&enq_ops[pending_enq_ops_idx],\n \t\t\t\t\tpending_enq_ops);\n \t\t\tif (processed_ops < pending_deq_ops)\n-\t\t\t\tscheduler_retrieve_session(\n+\t\t\t\tscheduler_retrieve_sessions(\n \t\t\t\t\t&enq_ops[pending_enq_ops_idx +\n \t\t\t\t\t\tprocessed_ops],\n \t\t\t\t\tpending_deq_ops - processed_ops);\n@@ -203,13 +203,13 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\t\tprocessed_ops = rte_ring_dequeue_burst(enq_ring, (void *)enq_ops,\n \t\t\t\t\t\t\tMC_SCHED_BUFFER_SIZE, NULL);\n \t\t\tif (processed_ops) {\n-\t\t\t\tscheduler_set_worker_session(enq_ops,\n+\t\t\t\tscheduler_set_worker_sessions(enq_ops,\n \t\t\t\t\tprocessed_ops, worker_idx);\n \t\t\t\tpending_enq_ops_idx = rte_cryptodev_enqueue_burst(\n \t\t\t\t\t\tworker->dev_id, worker->qp_id,\n \t\t\t\t\t\tenq_ops, processed_ops);\n \t\t\t\tif (pending_enq_ops_idx < processed_ops)\n-\t\t\t\t\tscheduler_retrieve_session(\n+\t\t\t\t\tscheduler_retrieve_sessions(\n \t\t\t\t\t\tenq_ops + pending_enq_ops_idx,\n \t\t\t\t\t\tprocessed_ops -\n \t\t\t\t\t\tpending_enq_ops_idx);\n@@ -229,7 +229,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\t\t\t\tworker->dev_id, worker->qp_id, deq_ops,\n \t\t\t\t\tMC_SCHED_BUFFER_SIZE);\n \t\t\tif (processed_ops) {\n-\t\t\t\tscheduler_retrieve_session(deq_ops,\n+\t\t\t\tscheduler_retrieve_sessions(deq_ops,\n \t\t\t\t\tprocessed_ops);\n \t\t\t\tinflight_ops -= processed_ops;\n \t\t\t\tif (reordering_enabled) {\ndiff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\nindex 0c51fff930..30bb5ce0e2 100644\n--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\n+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\n@@ -59,7 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t}\n \n \tfor (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {\n-\t\tstruct scheduler_session_ctx *sess_ctx[4];\n \t\tuint8_t target[4];\n \t\tuint32_t job_len[4];\n \n@@ -76,17 +75,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\trte_prefetch0((uint8_t *)ops[i + 7]->sym->session +\n \t\t\tsizeof(struct rte_cryptodev_sym_session));\n \n-\t\tsess_ctx[0] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);\n-\t\tsess_ctx[1] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 1]->sym->session);\n-\t\tsess_ctx[2] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 2]->sym->session);\n-\t\tsess_ctx[3] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 3]->sym->session);\n-\n-\t\t/* job_len is initialized as cipher data length, once\n-\t\t * it is 0, equals to auth data length\n-\t\t */\n-\t\tjob_len[0] = ops[i]->sym->cipher.data.length;\n-\t\tjob_len[0] += (ops[i]->sym->cipher.data.length == 0) *\n-\t\t\t\tops[i]->sym->auth.data.length;\n+\t\tjob_len[0] = scheduler_get_job_len(ops[i]);\n \t\t/* decide the target op based on the job length */\n \t\ttarget[0] = !(job_len[0] & psd_qp_ctx->threshold);\n \t\tp_enq_op = &enq_ops[target[0]];\n@@ -100,15 +89,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\tops[i]->sym->session =\n-\t\t\t\tsess_ctx[0]->worker_sess[target[0]];\n+\t\tscheduler_set_single_worker_session(ops[i], target[0]);\n \t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];\n \t\tp_enq_op->pos++;\n \n-\t\tjob_len[1] = ops[i + 1]->sym->cipher.data.length;\n-\t\tjob_len[1] += (ops[i + 1]->sym->cipher.data.length == 0) *\n-\t\t\t\tops[i+1]->sym->auth.data.length;\n+\t\tjob_len[1] = scheduler_get_job_len(ops[i + 1]);\n \t\ttarget[1] = !(job_len[1] & psd_qp_ctx->threshold);\n \t\tp_enq_op = &enq_ops[target[1]];\n \n@@ -118,15 +103,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\tops[i + 1]->sym->session =\n-\t\t\t\tsess_ctx[1]->worker_sess[target[1]];\n+\t\tscheduler_set_single_worker_session(ops[i + 1], target[1]);\n \t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];\n \t\tp_enq_op->pos++;\n \n-\t\tjob_len[2] = ops[i + 2]->sym->cipher.data.length;\n-\t\tjob_len[2] += (ops[i + 2]->sym->cipher.data.length == 0) *\n-\t\t\t\tops[i + 2]->sym->auth.data.length;\n+\t\tjob_len[2] = scheduler_get_job_len(ops[i + 2]);\n \t\ttarget[2] = !(job_len[2] & psd_qp_ctx->threshold);\n \t\tp_enq_op = &enq_ops[target[2]];\n \n@@ -136,15 +117,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\tops[i + 2]->sym->session =\n-\t\t\t\tsess_ctx[2]->worker_sess[target[2]];\n+\t\tscheduler_set_single_worker_session(ops[i + 2], target[2]);\n \t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];\n \t\tp_enq_op->pos++;\n \n-\t\tjob_len[3] = ops[i + 3]->sym->cipher.data.length;\n-\t\tjob_len[3] += (ops[i + 3]->sym->cipher.data.length == 0) *\n-\t\t\t\tops[i + 3]->sym->auth.data.length;\n+\t\tjob_len[3] = scheduler_get_job_len(ops[i + 3]);\n \t\ttarget[3] = !(job_len[3] & psd_qp_ctx->threshold);\n \t\tp_enq_op = &enq_ops[target[3]];\n \n@@ -154,22 +131,16 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\tops[i + 3]->sym->session =\n-\t\t\t\tsess_ctx[3]->worker_sess[target[3]];\n+\t\tscheduler_set_single_worker_session(ops[i + 3], target[3]);\n \t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];\n \t\tp_enq_op->pos++;\n \t}\n \n \tfor (; i < nb_ops; i++) {\n-\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);\n \t\tuint32_t job_len;\n \t\tuint8_t target;\n \n-\t\tjob_len = ops[i]->sym->cipher.data.length;\n-\t\tjob_len += (ops[i]->sym->cipher.data.length == 0) *\n-\t\t\t\tops[i]->sym->auth.data.length;\n+\t\tjob_len = scheduler_get_job_len(ops[i]);\n \t\ttarget = !(job_len & psd_qp_ctx->threshold);\n \t\tp_enq_op = &enq_ops[target];\n \n@@ -179,8 +150,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\tops[i]->sym->session = sess_ctx->worker_sess[target];\n+\t\tscheduler_set_single_worker_session(ops[i], target);\n \t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];\n \t\tp_enq_op->pos++;\n \t}\n@@ -236,7 +206,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tif (worker->nb_inflight_cops) {\n \t\tnb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,\n \t\t\tworker->qp_id, ops, nb_ops);\n-\t\tscheduler_retrieve_session(ops, nb_deq_ops_pri);\n+\t\tscheduler_retrieve_sessions(ops, nb_deq_ops_pri);\n \t\tworker->nb_inflight_cops -= nb_deq_ops_pri;\n \t}\n \n@@ -251,7 +221,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tnb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,\n \t\t\t\tworker->qp_id, &ops[nb_deq_ops_pri],\n \t\t\t\tnb_ops - nb_deq_ops_pri);\n-\t\tscheduler_retrieve_session(&ops[nb_deq_ops_pri], nb_deq_ops_sec);\n+\t\tscheduler_retrieve_sessions(&ops[nb_deq_ops_pri], nb_deq_ops_sec);\n \t\tworker->nb_inflight_cops -= nb_deq_ops_sec;\n \n \t\tif (!worker->nb_inflight_cops)\ndiff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c\nindex 4e8bbf0e09..589d092d74 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd.c\n+++ b/drivers/crypto/scheduler/scheduler_pmd.c\n@@ -8,6 +8,7 @@\n #include <rte_hexdump.h>\n #include <rte_cryptodev.h>\n #include <cryptodev_pmd.h>\n+#include <rte_security_driver.h>\n #include <bus_vdev_driver.h>\n #include <rte_malloc.h>\n #include <rte_cpuflags.h>\n@@ -77,6 +78,23 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {\n \n #define CDEV_SCHED_MODE_PARAM_SEP_CHAR\t\t':'\n \n+static void\n+free_mem(struct rte_cryptodev *dev)\n+{\n+\tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n+\tint i;\n+\n+\tfor (i = 0; i < sched_ctx->nb_init_workers; i++) {\n+\t\trte_free(sched_ctx->init_worker_names[i]);\n+\t\tsched_ctx->init_worker_names[i] = NULL;\n+\t}\n+\n+\tscheduler_free_capabilities(sched_ctx);\n+\n+\trte_free(dev->security_ctx);\n+\tdev->security_ctx = NULL;\n+}\n+\n static int\n cryptodev_scheduler_create(const char *name,\n \t\tstruct rte_vdev_device *vdev,\n@@ -206,8 +224,8 @@ cryptodev_scheduler_create(const char *name,\n \n \t\tif (!sched_ctx->init_worker_names[\n \t\t\t\tsched_ctx->nb_init_workers]) {\n-\t\t\tCR_SCHED_LOG(ERR, \"driver %s: Insufficient memory\",\n-\t\t\t\t\tname);\n+\t\t\tCR_SCHED_LOG(ERR, \"Not enough memory for init worker name\");\n+\t\t\tfree_mem(dev);\n \t\t\treturn -ENOMEM;\n \t\t}\n \n@@ -228,8 +246,38 @@ cryptodev_scheduler_create(const char *name,\n \t\t\t0, SOCKET_ID_ANY);\n \n \tif (!sched_ctx->capabilities) {\n-\t\tCR_SCHED_LOG(ERR, \"Not enough memory for capability \"\n-\t\t\t\t\"information\");\n+\t\tCR_SCHED_LOG(ERR, \"Not enough memory for capability information\");\n+\t\tfree_mem(dev);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Initialize security context */\n+\tstruct rte_security_ctx *security_instance;\n+\tsecurity_instance = rte_zmalloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct rte_security_ctx),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (!security_instance) {\n+\t\tCR_SCHED_LOG(ERR, \"Not enough memory for security context\");\n+\t\tfree_mem(dev);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tsecurity_instance->device = dev;\n+\tsecurity_instance->ops = rte_crypto_scheduler_pmd_sec_ops;\n+\tsecurity_instance->sess_cnt = 0;\n+\tdev->security_ctx = security_instance;\n+\n+\t/*\n+\t * Initialize security capabilities structure as an empty structure,\n+\t * in case device information is requested when no workers are attached\n+\t */\n+\tsched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct rte_security_capability),\n+\t\t\t\t\t0, SOCKET_ID_ANY);\n+\n+\tif (!sched_ctx->sec_capabilities) {\n+\t\tCR_SCHED_LOG(ERR, \"Not enough memory for security capability information\");\n+\t\tfree_mem(dev);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -263,6 +311,9 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)\n \t\t\t\t\tsched_ctx->workers[i].dev_id);\n \t}\n \n+\trte_free(dev->security_ctx);\n+\tdev->security_ctx = NULL;\n+\n \treturn rte_cryptodev_pmd_destroy(dev);\n }\n \ndiff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c\nindex 294aab4452..a18f7a08b0 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c\n+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c\n@@ -8,11 +8,205 @@\n #include <dev_driver.h>\n #include <rte_cryptodev.h>\n #include <cryptodev_pmd.h>\n+#include <rte_security_driver.h>\n #include <rte_reorder.h>\n #include <rte_errno.h>\n \n #include \"scheduler_pmd_private.h\"\n \n+struct scheduler_configured_sess_info {\n+\tuint8_t dev_id;\n+\tuint8_t driver_id;\n+\tunion {\n+\t\tstruct rte_cryptodev_sym_session *sess;\n+\t\tstruct {\n+\t\t\tstruct rte_security_session *sec_sess;\n+\t\t\tstruct rte_security_ctx *sec_ctx;\n+\t\t};\n+\t};\n+};\n+\n+static int\n+scheduler_session_create(void *sess, void *sess_params,\n+\t\tstruct scheduler_ctx *sched_ctx,\n+\t\tenum rte_crypto_op_sess_type session_type)\n+{\n+\tstruct rte_mempool *mp = rte_mempool_from_obj(sess);\n+\tstruct scheduler_session_ctx *sess_ctx;\n+\tstruct scheduler_configured_sess_info configured_sess[\n+\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};\n+\tuint32_t i, j, n_configured_sess = 0;\n+\tint ret = 0;\n+\n+\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\tsess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);\n+\telse\n+\t\tsess_ctx = SECURITY_GET_SESS_PRIV(sess);\n+\n+\tif (mp == NULL)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n+\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];\n+\t\tuint8_t next_worker = 0;\n+\n+\t\tfor (j = 0; j < n_configured_sess; j++) {\n+\t\t\tif (configured_sess[j].driver_id == worker->driver_id) {\n+\t\t\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\t\t\t\tsess_ctx->worker_sess[i] =\n+\t\t\t\t\t\tconfigured_sess[j].sess;\n+\t\t\t\telse\n+\t\t\t\t\tsess_ctx->worker_sec_sess[i] =\n+\t\t\t\t\t\tconfigured_sess[j].sec_sess;\n+\n+\t\t\t\tnext_worker = 1;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tif (next_worker)\n+\t\t\tcontinue;\n+\n+\t\tif (rte_mempool_avail_count(mp) == 0) {\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n+\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\t\tstruct rte_cryptodev_sym_session *worker_sess =\n+\t\t\t\trte_cryptodev_sym_session_create(worker->dev_id,\n+\t\t\t\t\t\tsess_params, mp);\n+\n+\t\t\tif (worker_sess == NULL) {\n+\t\t\t\tret = -rte_errno;\n+\t\t\t\tgoto error_exit;\n+\t\t\t}\n+\n+\t\t\tworker_sess->opaque_data = (uint64_t)sess;\n+\t\t\tsess_ctx->worker_sess[i] = worker_sess;\n+\t\t\tconfigured_sess[n_configured_sess].sess = worker_sess;\n+\t\t} else {\n+\t\t\tstruct rte_security_session *worker_sess =\n+\t\t\t\trte_security_session_create(dev->security_ctx,\n+\t\t\t\t\t\tsess_params, mp);\n+\n+\t\t\tif (worker_sess == NULL) {\n+\t\t\t\tret = -rte_errno;\n+\t\t\t\tgoto error_exit;\n+\t\t\t}\n+\n+\t\t\tworker_sess->opaque_data = (uint64_t)sess;\n+\t\t\tsess_ctx->worker_sec_sess[i] = worker_sess;\n+\t\t\tconfigured_sess[n_configured_sess].sec_sess =\n+\t\t\t\t\t\t\tworker_sess;\n+\t\t\tconfigured_sess[n_configured_sess].sec_ctx =\n+\t\t\t\t\t\t\tdev->security_ctx;\n+\t\t}\n+\n+\t\tconfigured_sess[n_configured_sess].driver_id =\n+\t\t\t\t\t\t\tworker->driver_id;\n+\t\tconfigured_sess[n_configured_sess].dev_id = worker->dev_id;\n+\t\tn_configured_sess++;\n+\t}\n+\n+\treturn 0;\n+\n+error_exit:\n+\tsess_ctx->ref_cnt = sched_ctx->ref_cnt;\n+\tfor (i = 0; i < n_configured_sess; i++) {\n+\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\t\trte_cryptodev_sym_session_free(\n+\t\t\t\t\t\tconfigured_sess[i].dev_id,\n+\t\t\t\t\t\tconfigured_sess[i].sess);\n+\t\telse\n+\t\t\trte_security_session_destroy(\n+\t\t\t\t\t\tconfigured_sess[i].sec_ctx,\n+\t\t\t\t\t\tconfigured_sess[i].sec_sess);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static void\n+scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,\n+\t\tuint8_t session_type)\n+{\n+\tstruct scheduler_session_ctx *sess_ctx;\n+\tstruct scheduler_configured_sess_info deleted_sess[\n+\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};\n+\tuint32_t i, j, n_deleted_sess = 0;\n+\n+\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\tsess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);\n+\telse\n+\t\tsess_ctx = SECURITY_GET_SESS_PRIV(sess);\n+\n+\tif (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {\n+\t\tCR_SCHED_LOG(WARNING,\n+\t\t\t\"Worker updated between session creation/deletion. \"\n+\t\t\t\"The session may not be freed fully.\");\n+\t}\n+\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n+\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];\n+\t\tuint8_t next_worker = 0;\n+\n+\t\tfor (j = 0; j < n_deleted_sess; j++) {\n+\t\t\tif (deleted_sess[j].driver_id == worker->driver_id) {\n+\t\t\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\t\t\t\tsess_ctx->worker_sess[i] = NULL;\n+\t\t\t\telse\n+\t\t\t\t\tsess_ctx->worker_sec_sess[i] = NULL;\n+\n+\t\t\t\tnext_worker = 1;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tif (next_worker)\n+\t\t\tcontinue;\n+\n+\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\t\trte_cryptodev_sym_session_free(worker->dev_id,\n+\t\t\t\t\t\tsess_ctx->worker_sess[i]);\n+\t\t\tsess_ctx->worker_sess[i] = NULL;\n+\t\t} else {\n+\t\t\trte_security_session_destroy(dev->security_ctx,\n+\t\t\t\t\t\tsess_ctx->worker_sec_sess[i]);\n+\t\t\tsess_ctx->worker_sec_sess[i] = NULL;\n+\t\t}\n+\n+\t\tdeleted_sess[n_deleted_sess++].driver_id = worker->driver_id;\n+\t}\n+}\n+\n+static unsigned int\n+scheduler_session_size_get(struct scheduler_ctx *sched_ctx,\n+\t\tuint8_t session_type)\n+{\n+\tuint8_t i = 0;\n+\tuint32_t max_priv_sess_size = 0;\n+\n+\t/* Check what is the maximum private session size for all workers */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];\n+\t\tstruct rte_security_ctx *sec_ctx = dev->security_ctx;\n+\t\tuint32_t priv_sess_size = 0;\n+\n+\t\tif (session_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\t\tpriv_sess_size =\n+\t\t\t\t(*dev->dev_ops->sym_session_get_size)(dev);\n+\t\t} else {\n+\t\t\tpriv_sess_size = (*sec_ctx->ops->session_get_size)(dev);\n+\t\t}\n+\n+\t\tmax_priv_sess_size = RTE_MAX(max_priv_sess_size, priv_sess_size);\n+\t}\n+\n+\treturn max_priv_sess_size;\n+}\n+\n /** attaching the workers predefined by scheduler's EAL options */\n static int\n scheduler_attach_init_worker(struct rte_cryptodev *dev)\n@@ -265,10 +459,7 @@ scheduler_pmd_close(struct rte_cryptodev *dev)\n \t\tsched_ctx->private_ctx = NULL;\n \t}\n \n-\tif (sched_ctx->capabilities) {\n-\t\trte_free(sched_ctx->capabilities);\n-\t\tsched_ctx->capabilities = NULL;\n-\t}\n+\tscheduler_free_capabilities(sched_ctx);\n \n \treturn 0;\n }\n@@ -451,92 +642,21 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n }\n \n static uint32_t\n-scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)\n+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)\n {\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n-\tuint8_t i = 0;\n-\tuint32_t max_priv_sess_size = 0;\n-\n-\t/* Check what is the maximum private session size for all workers */\n-\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n-\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n-\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];\n-\t\tuint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);\n \n-\t\tif (max_priv_sess_size < priv_sess_size)\n-\t\t\tmax_priv_sess_size = priv_sess_size;\n-\t}\n-\n-\treturn max_priv_sess_size;\n+\treturn scheduler_session_size_get(sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);\n }\n \n-struct scheduler_configured_sess_info {\n-\tuint8_t dev_id;\n-\tuint8_t driver_id;\n-\tstruct rte_cryptodev_sym_session *sess;\n-};\n-\n static int\n scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,\n \tstruct rte_crypto_sym_xform *xform,\n \tstruct rte_cryptodev_sym_session *sess)\n {\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n-\tstruct rte_mempool *mp = rte_mempool_from_obj(sess);\n-\tstruct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);\n-\tstruct scheduler_configured_sess_info configured_sess[\n-\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};\n-\tuint32_t i, j, n_configured_sess = 0;\n-\tint ret = 0;\n-\n-\tif (mp == NULL)\n-\t\treturn -EINVAL;\n-\n-\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n-\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n-\t\tstruct rte_cryptodev_sym_session *worker_sess;\n-\t\tuint8_t next_worker = 0;\n-\n-\t\tfor (j = 0; j < n_configured_sess; j++) {\n-\t\t\tif (configured_sess[j].driver_id ==\n-\t\t\t\t\tworker->driver_id) {\n-\t\t\t\tsess_ctx->worker_sess[i] =\n-\t\t\t\t\tconfigured_sess[j].sess;\n-\t\t\t\tnext_worker = 1;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (next_worker)\n-\t\t\tcontinue;\n-\n-\t\tif (rte_mempool_avail_count(mp) == 0) {\n-\t\t\tret = -ENOMEM;\n-\t\t\tgoto error_exit;\n-\t\t}\n-\n-\t\tworker_sess = rte_cryptodev_sym_session_create(worker->dev_id,\n-\t\t\txform, mp);\n-\t\tif (worker_sess == NULL) {\n-\t\t\tret = -rte_errno;\n-\t\t\tgoto error_exit;\n-\t\t}\n \n-\t\tworker_sess->opaque_data = (uint64_t)sess;\n-\t\tsess_ctx->worker_sess[i] = worker_sess;\n-\t\tconfigured_sess[n_configured_sess].driver_id =\n-\t\t\tworker->driver_id;\n-\t\tconfigured_sess[n_configured_sess].dev_id = worker->dev_id;\n-\t\tconfigured_sess[n_configured_sess].sess = worker_sess;\n-\t\tn_configured_sess++;\n-\t}\n-\n-\treturn 0;\n-error_exit:\n-\tsess_ctx->ref_cnt = sched_ctx->ref_cnt;\n-\tfor (i = 0; i < n_configured_sess; i++)\n-\t\trte_cryptodev_sym_session_free(configured_sess[i].dev_id,\n-\t\t\tconfigured_sess[i].sess);\n-\treturn ret;\n+\treturn scheduler_session_create(sess, xform, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);\n }\n \n /** Clear the memory of session so it doesn't leave key material behind */\n@@ -545,37 +665,8 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,\n \t\tstruct rte_cryptodev_sym_session *sess)\n {\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n-\tstruct scheduler_session_ctx *sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(sess);\n-\tstruct scheduler_configured_sess_info deleted_sess[\n-\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};\n-\tuint32_t i, j, n_deleted_sess = 0;\n-\n-\tif (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {\n-\t\tCR_SCHED_LOG(WARNING,\n-\t\t\t\"Worker updated between session creation/deletion. \"\n-\t\t\t\"The session may not be freed fully.\");\n-\t}\n-\n-\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n-\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n-\t\tuint8_t next_worker = 0;\n \n-\t\tfor (j = 0; j < n_deleted_sess; j++) {\n-\t\t\tif (deleted_sess[j].driver_id == worker->driver_id) {\n-\t\t\t\tsess_ctx->worker_sess[i] = NULL;\n-\t\t\t\tnext_worker = 1;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (next_worker)\n-\t\t\tcontinue;\n-\n-\t\trte_cryptodev_sym_session_free(worker->dev_id,\n-\t\t\tsess_ctx->worker_sess[i]);\n-\n-\t\tdeleted_sess[n_deleted_sess++].driver_id = worker->driver_id;\n-\t\tsess_ctx->worker_sess[i] = NULL;\n-\t}\n+\tscheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_WITH_SESSION);\n }\n \n static struct rte_cryptodev_ops scheduler_pmd_ops = {\n@@ -598,3 +689,66 @@ static struct rte_cryptodev_ops scheduler_pmd_ops = {\n };\n \n struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;\n+\n+/** Configure a scheduler session from a security session configuration */\n+static int\n+scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,\n+\t\t\tstruct rte_security_session *sess)\n+{\n+\tstruct rte_cryptodev *cdev = dev;\n+\tstruct scheduler_ctx *sched_ctx = cdev->data->dev_private;\n+\n+\t/* Check for supported security protocols */\n+\tif (!scheduler_check_sec_proto_supp(conf->action_type, conf->protocol)) {\n+\t\tCR_SCHED_LOG(ERR, \"Unsupported security protocol\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\treturn scheduler_session_create(sess, conf, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);\n+}\n+\n+/** Clear the memory of session so it doesn't leave key material behind */\n+static int\n+scheduler_pmd_sec_sess_destroy(void *dev,\n+\t\t\t       struct rte_security_session *sess)\n+{\n+\tstruct rte_cryptodev *cdev = dev;\n+\tstruct scheduler_ctx *sched_ctx = cdev->data->dev_private;\n+\n+\tscheduler_session_destroy(sess, sched_ctx, RTE_CRYPTO_OP_SECURITY_SESSION);\n+\n+\treturn 0;\n+}\n+\n+/** Get sync security capabilities for scheduler pmds */\n+static const struct rte_security_capability *\n+scheduler_pmd_sec_capa_get(void *dev)\n+{\n+\tstruct rte_cryptodev *cdev = dev;\n+\tstruct scheduler_ctx *sched_ctx = cdev->data->dev_private;\n+\n+\treturn sched_ctx->sec_capabilities;\n+}\n+\n+static unsigned int\n+scheduler_pmd_sec_sess_size_get(void *dev)\n+{\n+\tstruct rte_cryptodev *cdev = dev;\n+\tstruct scheduler_ctx *sched_ctx = cdev->data->dev_private;\n+\n+\treturn scheduler_session_size_get(sched_ctx,\n+\t\t\t\tRTE_CRYPTO_OP_SECURITY_SESSION);\n+}\n+\n+static struct rte_security_ops scheduler_pmd_sec_ops = {\n+\t\t.session_create = scheduler_pmd_sec_sess_create,\n+\t\t.session_update = NULL,\n+\t\t.session_get_size = scheduler_pmd_sec_sess_size_get,\n+\t\t.session_stats_get = NULL,\n+\t\t.session_destroy = scheduler_pmd_sec_sess_destroy,\n+\t\t.set_pkt_metadata = NULL,\n+\t\t.capabilities_get = scheduler_pmd_sec_capa_get\n+};\n+\n+struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =\n+\t\t\t\t\t\t\t&scheduler_pmd_sec_ops;\ndiff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h\nindex 36d0bb6307..26110277a4 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd_private.h\n+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h\n@@ -5,6 +5,8 @@\n #ifndef _SCHEDULER_PMD_PRIVATE_H\n #define _SCHEDULER_PMD_PRIVATE_H\n \n+#include <rte_security_driver.h>\n+\n #include \"rte_cryptodev_scheduler.h\"\n \n #define CRYPTODEV_NAME_SCHEDULER_PMD\tcrypto_scheduler\n@@ -30,7 +32,8 @@ struct scheduler_ctx {\n \t/**< private scheduler context pointer */\n \n \tstruct rte_cryptodev_capabilities *capabilities;\n-\tuint32_t nb_capabilities;\n+\tstruct rte_security_capability *sec_capabilities;\n+\tstruct rte_cryptodev_capabilities **sec_crypto_capabilities;\n \n \tuint32_t max_nb_queue_pairs;\n \n@@ -64,8 +67,12 @@ struct scheduler_qp_ctx {\n \n struct scheduler_session_ctx {\n \tuint32_t ref_cnt;\n-\tstruct rte_cryptodev_sym_session *worker_sess[\n-\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\tunion {\n+\t\tstruct rte_cryptodev_sym_session *worker_sess[\n+\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\t\tstruct rte_security_session *worker_sec_sess[\n+\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\t};\n };\n \n extern uint8_t cryptodev_scheduler_driver_id;\n@@ -108,7 +115,22 @@ scheduler_order_drain(struct rte_ring *order_ring,\n }\n \n static __rte_always_inline void\n-scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops,\n+scheduler_set_single_worker_session(struct rte_crypto_op *op,\n+\t\tuint8_t worker_idx)\n+{\n+\tif (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\tstruct scheduler_session_ctx *sess_ctx =\n+\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\t\top->sym->session = sess_ctx->worker_sess[worker_idx];\n+\t} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {\n+\t\tstruct scheduler_session_ctx *sess_ctx =\n+\t\t\t\tSECURITY_GET_SESS_PRIV(op->sym->session);\n+\t\top->sym->session = sess_ctx->worker_sec_sess[worker_idx];\n+\t}\n+}\n+\n+static __rte_always_inline void\n+scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,\n \t\tuint8_t worker_index)\n {\n \tstruct rte_crypto_op **op = ops;\n@@ -129,52 +151,34 @@ scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops,\n \t\t\trte_prefetch0(op[7]->sym->session);\n \t\t}\n \n-\t\tif (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);\n-\t\t\top[0]->sym->session =\n-\t\t\t\tsess_ctx->worker_sess[worker_index];\n-\t\t}\n-\n-\t\tif (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op[1]->sym->session);\n-\t\t\top[1]->sym->session =\n-\t\t\t\tsess_ctx->worker_sess[worker_index];\n-\t\t}\n-\n-\t\tif (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op[2]->sym->session);\n-\t\t\top[2]->sym->session =\n-\t\t\t\tsess_ctx->worker_sess[worker_index];\n-\t\t}\n-\n-\t\tif (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op[3]->sym->session);\n-\t\t\top[3]->sym->session =\n-\t\t\t\tsess_ctx->worker_sess[worker_index];\n-\t\t}\n+\t\tscheduler_set_single_worker_session(op[0], worker_index);\n+\t\tscheduler_set_single_worker_session(op[1], worker_index);\n+\t\tscheduler_set_single_worker_session(op[2], worker_index);\n+\t\tscheduler_set_single_worker_session(op[3], worker_index);\n \n \t\top += 4;\n \t\tn -= 4;\n \t}\n \n \twhile (n--) {\n-\t\tif (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\t\tstruct scheduler_session_ctx *sess_ctx =\n-\t\t\t\tCRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);\n-\n-\t\t\top[0]->sym->session =\n-\t\t\t\tsess_ctx->worker_sess[worker_index];\n-\t\t\top++;\n-\t\t}\n+\t\tscheduler_set_single_worker_session(op[0], worker_index);\n+\t\top++;\n \t}\n }\n \n static __rte_always_inline void\n-scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)\n+scheduler_retrieve_single_session(struct rte_crypto_op *op)\n+{\n+\tif (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n+\t\top->sym->session = (void *)(uintptr_t)\n+\t\t\trte_cryptodev_sym_session_opaque_data_get(op->sym->session);\n+\telse if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)\n+\t\top->sym->session = (void *)(uintptr_t)\n+\t\t\trte_security_session_opaque_data_get(op->sym->session);\n+}\n+\n+static __rte_always_inline void\n+scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tuint16_t n = nb_ops;\n \tstruct rte_crypto_op **op = ops;\n@@ -194,32 +198,73 @@ scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\trte_prefetch0(op[7]->sym->session);\n \t\t}\n \n-\t\tif (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\top[0]->sym->session = (void *)(uintptr_t)\n-\t\t\t\trte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);\n-\t\tif (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\top[1]->sym->session = (void *)(uintptr_t)\n-\t\t\t\trte_cryptodev_sym_session_opaque_data_get(op[1]->sym->session);\n-\t\tif (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\top[2]->sym->session = (void *)(uintptr_t)\n-\t\t\t\trte_cryptodev_sym_session_opaque_data_get(op[2]->sym->session);\n-\t\tif (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\top[3]->sym->session = (void *)(uintptr_t)\n-\t\t\t\trte_cryptodev_sym_session_opaque_data_get(op[3]->sym->session);\n+\t\tscheduler_retrieve_single_session(op[0]);\n+\t\tscheduler_retrieve_single_session(op[1]);\n+\t\tscheduler_retrieve_single_session(op[2]);\n+\t\tscheduler_retrieve_single_session(op[3]);\n \n \t\top += 4;\n \t\tn -= 4;\n \t}\n \n \twhile (n--) {\n-\t\tif (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)\n-\t\t\top[0]->sym->session = (void *)(uintptr_t)\n-\t\t\t\trte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);\n+\t\tscheduler_retrieve_single_session(op[0]);\n \t\top++;\n \t}\n }\n \n+static __rte_always_inline uint32_t\n+scheduler_get_job_len(struct rte_crypto_op *op)\n+{\n+\tuint32_t job_len;\n+\n+\t/* op_len is initialized as cipher data length, if\n+\t * it is 0, then it is set to auth data length\n+\t */\n+\tjob_len = op->sym->cipher.data.length;\n+\tjob_len += (op->sym->cipher.data.length == 0) *\n+\t\t\t\t\top->sym->auth.data.length;\n+\n+\treturn job_len;\n+}\n+\n+static __rte_always_inline void\n+scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)\n+{\n+\tuint32_t i;\n+\n+\trte_free(sched_ctx->capabilities);\n+\tsched_ctx->capabilities = NULL;\n+\n+\tif (sched_ctx->sec_crypto_capabilities) {\n+\t\ti = 0;\n+\t\twhile (sched_ctx->sec_crypto_capabilities[i] != NULL) {\n+\t\t\trte_free(sched_ctx->sec_crypto_capabilities[i]);\n+\t\t\tsched_ctx->sec_crypto_capabilities[i] = NULL;\n+\t\t\ti++;\n+\t\t}\n+\n+\t\trte_free(sched_ctx->sec_crypto_capabilities);\n+\t\tsched_ctx->sec_crypto_capabilities = NULL;\n+\t}\n+\n+\trte_free(sched_ctx->sec_capabilities);\n+\tsched_ctx->sec_capabilities = NULL;\n+}\n+\n+static __rte_always_inline int\n+scheduler_check_sec_proto_supp(enum rte_security_session_action_type action,\n+\t\tenum rte_security_session_protocol protocol)\n+{\n+\tif (action == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL &&\n+\t\t\tprotocol == RTE_SECURITY_PROTOCOL_DOCSIS)\n+\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n /** device specific operations function pointer structure */\n extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;\n+extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;\n \n #endif /* _SCHEDULER_PMD_PRIVATE_H */\ndiff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c\nindex ad3f8b842a..08041887a8 100644\n--- a/drivers/crypto/scheduler/scheduler_roundrobin.c\n+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c\n@@ -28,11 +28,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tif (unlikely(nb_ops == 0))\n \t\treturn 0;\n \n-\tscheduler_set_worker_session(ops, nb_ops, worker_idx);\n+\tscheduler_set_worker_sessions(ops, nb_ops, worker_idx);\n \tprocessed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,\n \t\t\tworker->qp_id, ops, nb_ops);\n \tif (processed_ops < nb_ops)\n-\t\tscheduler_retrieve_session(ops + processed_ops,\n+\t\tscheduler_retrieve_sessions(ops + processed_ops,\n \t\t\tnb_ops - processed_ops);\n \n \tworker->nb_inflight_cops += processed_ops;\n@@ -87,7 +87,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \n \tnb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,\n \t\t\tworker->qp_id, ops, nb_ops);\n-\tscheduler_retrieve_session(ops, nb_deq_ops);\n+\tscheduler_retrieve_sessions(ops, nb_deq_ops);\n \tlast_worker_idx += 1;\n \tlast_worker_idx %= rr_qp_ctx->nb_workers;\n \n",
    "prefixes": [
        "v4",
        "1/2"
    ]
}