get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/74612/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 74612,
    "url": "https://patches.dpdk.org/api/patches/74612/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200722101522.6531-1-adamx.dybkowski@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200722101522.6531-1-adamx.dybkowski@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200722101522.6531-1-adamx.dybkowski@intel.com",
    "date": "2020-07-22T10:15:22",
    "name": "crypto/scheduler: replace usage of master/slave",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b71b9862a82db226e8c657430c65c19a03cb3641",
    "submitter": {
        "id": 1322,
        "url": "https://patches.dpdk.org/api/people/1322/?format=api",
        "name": "Dybkowski, AdamX",
        "email": "adamx.dybkowski@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200722101522.6531-1-adamx.dybkowski@intel.com/mbox/",
    "series": [
        {
            "id": 11233,
            "url": "https://patches.dpdk.org/api/series/11233/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=11233",
            "date": "2020-07-22T10:15:22",
            "name": "crypto/scheduler: replace usage of master/slave",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/11233/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/74612/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/74612/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5449EA0526;\n\tWed, 22 Jul 2020 12:15:30 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2AA051BFE9;\n\tWed, 22 Jul 2020 12:15:30 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by dpdk.org (Postfix) with ESMTP id A76F01BFD1\n for <dev@dpdk.org>; Wed, 22 Jul 2020 12:15:27 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 22 Jul 2020 03:15:26 -0700",
            "from adamdybx-mobl.ger.corp.intel.com ([10.104.121.92])\n by orsmga002.jf.intel.com with ESMTP; 22 Jul 2020 03:15:23 -0700"
        ],
        "IronPort-SDR": [
            "\n Kj1R3XHw7u2+7lTDTrih33LjvbPbm/sAcIUq+PitIQNbINe1tF8Lij9MkXrdOTkqBRVrJxFRVi\n MFPRPwmIUMiA==",
            "\n XxBTs6uuLXpgHIlDEMvcvdI+0SvHrYW4BWPcAKBCt09HhX5H/XK9rmM/w2QBMlGZHEyK0IuJLs\n GEWZaBl+5Qvg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9689\"; a=\"130383692\"",
            "E=Sophos;i=\"5.75,381,1589266800\"; d=\"scan'208\";a=\"130383692\"",
            "E=Sophos;i=\"5.75,381,1589266800\"; d=\"scan'208\";a=\"301906477\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Adam Dybkowski <adamx.dybkowski@intel.com>",
        "To": "dev@dpdk.org,\n\tfiona.trahe@intel.com,\n\takhil.goyal@nxp.com",
        "Cc": "Adam Dybkowski <adamx.dybkowski@intel.com>",
        "Date": "Wed, 22 Jul 2020 12:15:22 +0200",
        "Message-Id": "<20200722101522.6531-1-adamx.dybkowski@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH] crypto/scheduler: replace usage of master/slave",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch replaces the usage of master/slave in QAT PMD and\nScheduler PMD as well as in their docs. Also the test app was\nmodified to use the new wording.\n\nSigned-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>\n---\n app/test/test_cryptodev.c                     |  16 +-\n doc/guides/cryptodevs/qat.rst                 |   2 +-\n doc/guides/cryptodevs/scheduler.rst           |  40 ++--\n doc/guides/rel_notes/deprecation.rst          |   7 +\n .../scheduler/rte_cryptodev_scheduler.c       | 135 +++++++-----\n .../scheduler/rte_cryptodev_scheduler.h       |  51 +++--\n .../rte_cryptodev_scheduler_operations.h      |  12 +-\n drivers/crypto/scheduler/scheduler_failover.c |  83 +++----\n .../crypto/scheduler/scheduler_multicore.c    |  54 ++---\n .../scheduler/scheduler_pkt_size_distr.c      | 142 ++++++------\n drivers/crypto/scheduler/scheduler_pmd.c      |  54 ++---\n drivers/crypto/scheduler/scheduler_pmd_ops.c  | 204 +++++++++---------\n .../crypto/scheduler/scheduler_pmd_private.h  |  12 +-\n .../crypto/scheduler/scheduler_roundrobin.c   |  87 ++++----\n 14 files changed, 474 insertions(+), 425 deletions(-)",
    "diff": "diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c\nindex 70bf6fe2c..b64db456d 100644\n--- a/app/test/test_cryptodev.c\n+++ b/app/test/test_cryptodev.c\n@@ -479,29 +479,29 @@ testsuite_setup(void)\n \tchar vdev_args[VDEV_ARGS_SIZE] = {\"\"};\n \tchar temp_str[VDEV_ARGS_SIZE] = {\"mode=multi-core,\"\n \t\t\"ordering=enable,name=cryptodev_test_scheduler,corelist=\"};\n-\tuint16_t slave_core_count = 0;\n+\tuint16_t worker_core_count = 0;\n \tuint16_t socket_id = 0;\n \n \tif (gbl_driver_id == rte_cryptodev_driver_id_get(\n \t\t\tRTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {\n \n-\t\t/* Identify the Slave Cores\n-\t\t * Use 2 slave cores for the device args\n+\t\t/* Identify the Worker Cores\n+\t\t * Use 2 worker cores for the device args\n \t\t */\n \t\tRTE_LCORE_FOREACH_SLAVE(i) {\n-\t\t\tif (slave_core_count > 1)\n+\t\t\tif (worker_core_count > 1)\n \t\t\t\tbreak;\n \t\t\tsnprintf(vdev_args, sizeof(vdev_args),\n \t\t\t\t\t\"%s%d\", temp_str, i);\n \t\t\tstrcpy(temp_str, vdev_args);\n \t\t\tstrlcat(temp_str, \";\", sizeof(temp_str));\n-\t\t\tslave_core_count++;\n+\t\t\tworker_core_count++;\n \t\t\tsocket_id = rte_lcore_to_socket_id(i);\n \t\t}\n-\t\tif (slave_core_count != 2) {\n+\t\tif (worker_core_count != 2) {\n \t\t\tRTE_LOG(ERR, USER1,\n \t\t\t\t\"Cryptodev scheduler test require at least \"\n-\t\t\t\t\"two slave cores to run. \"\n+\t\t\t\t\"two worker cores to run. \"\n \t\t\t\t\"Please use the correct coremask.\\n\");\n \t\t\treturn TEST_FAILED;\n \t\t}\n@@ -11712,7 +11712,7 @@ test_chacha20_poly1305_decrypt_test_case_rfc8439(void)\n \n #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER\n \n-/* global AESNI slave IDs for the scheduler test */\n+/* global AESNI worker IDs for the scheduler test */\n uint8_t aesni_ids[2];\n \n static int\ndiff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst\nindex 1803d5f38..666c414c6 100644\n--- a/doc/guides/cryptodevs/qat.rst\n+++ b/doc/guides/cryptodevs/qat.rst\n@@ -327,7 +327,7 @@ The \"rte_cryptodev_devices_get()\" returns the devices exposed by either of these\n \n \tThe cryptodev driver name is passed to the dpdk-test-crypto-perf tool in the \"-devtype\" parameter.\n \n-\tThe qat crypto device name is in the format of the slave parameter passed to the crypto scheduler.\n+\tThe qat crypto device name is in the format of the worker parameter passed to the crypto scheduler.\n \n * The qat compressdev driver name is \"compress_qat\".\n   The rte_compressdev_devices_get() returns the devices exposed by this driver.\ndiff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst\nindex 7004ca431..565de40f3 100644\n--- a/doc/guides/cryptodevs/scheduler.rst\n+++ b/doc/guides/cryptodevs/scheduler.rst\n@@ -16,12 +16,12 @@ crypto ops among them in a certain manner.\n The Cryptodev Scheduler PMD library (**librte_pmd_crypto_scheduler**) acts as\n a software crypto PMD and shares the same API provided by librte_cryptodev.\n The PMD supports attaching multiple crypto PMDs, software or hardware, as\n-slaves, and distributes the crypto workload to them with certain behavior.\n+workers, and distributes the crypto workload to them with certain behavior.\n The behaviors are categorizes as different \"modes\". Basically, a scheduling\n-mode defines certain actions for scheduling crypto ops to its slaves.\n+mode defines certain actions for scheduling crypto ops to its workers.\n \n The librte_pmd_crypto_scheduler library exports a C API which provides an API\n-for attaching/detaching slaves, set/get scheduling modes, and enable/disable\n+for attaching/detaching workers, set/get scheduling modes, and enable/disable\n crypto ops reordering.\n \n Limitations\n@@ -62,7 +62,7 @@ two calls:\n   created. This value may be overwritten internally if there are too\n   many devices are attached.\n \n-* slave: If a cryptodev has been initialized with specific name, it can be\n+* worker: If a cryptodev has been initialized with specific name, it can be\n   attached to the scheduler using this parameter, simply filling the name\n   here. Multiple cryptodevs can be attached initially by presenting this\n   parameter multiple times.\n@@ -84,13 +84,13 @@ Example:\n \n .. code-block:: console\n \n-    ... --vdev \"crypto_aesni_mb0,name=aesni_mb_1\" --vdev \"crypto_aesni_mb1,name=aesni_mb_2\" --vdev \"crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2\" ...\n+    ... --vdev \"crypto_aesni_mb0,name=aesni_mb_1\" --vdev \"crypto_aesni_mb1,name=aesni_mb_2\" --vdev \"crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2\" ...\n \n .. note::\n \n     * The scheduler cryptodev cannot be started unless the scheduling mode\n-      is set and at least one slave is attached. Also, to configure the\n-      scheduler in the run-time, like attach/detach slave(s), change\n+      is set and at least one worker is attached. Also, to configure the\n+      scheduler in the run-time, like attach/detach worker(s), change\n       scheduling mode, or enable/disable crypto op ordering, one should stop\n       the scheduler first, otherwise an error will be returned.\n \n@@ -111,7 +111,7 @@ operation:\n    *Initialization mode parameter*: **round-robin**\n \n    Round-robin mode, which distributes the enqueued burst of crypto ops\n-   among its slaves in a round-robin manner. This mode may help to fill\n+   among its workers in a round-robin manner. This mode may help to fill\n    the throughput gap between the physical core and the existing cryptodevs\n    to increase the overall performance.\n \n@@ -119,15 +119,15 @@ operation:\n \n    *Initialization mode parameter*: **packet-size-distr**\n \n-   Packet-size based distribution mode, which works with 2 slaves, the primary\n-   slave and the secondary slave, and distributes the enqueued crypto\n+   Packet-size based distribution mode, which works with 2 workers, the primary\n+   worker and the secondary worker, and distributes the enqueued crypto\n    operations to them based on their data lengths. A crypto operation will be\n-   distributed to the primary slave if its data length is equal to or bigger\n+   distributed to the primary worker if its data length is equal to or bigger\n    than the designated threshold, otherwise it will be handled by the secondary\n-   slave.\n+   worker.\n \n    A typical usecase in this mode is with the QAT cryptodev as the primary and\n-   a software cryptodev as the secondary slave. This may help applications to\n+   a software cryptodev as the secondary worker. This may help applications to\n    process additional crypto workload than what the QAT cryptodev can handle on\n    its own, by making use of the available CPU cycles to deal with smaller\n    crypto workloads.\n@@ -148,11 +148,11 @@ operation:\n \n    *Initialization mode parameter*: **fail-over**\n \n-   Fail-over mode, which works with 2 slaves, the primary slave and the\n-   secondary slave. In this mode, the scheduler will enqueue the incoming\n-   crypto operation burst to the primary slave. When one or more crypto\n+   Fail-over mode, which works with 2 workers, the primary worker and the\n+   secondary worker. In this mode, the scheduler will enqueue the incoming\n+   crypto operation burst to the primary worker. When one or more crypto\n    operations fail to be enqueued, then they will be enqueued to the secondary\n-   slave.\n+   worker.\n \n *   **CDEV_SCHED_MODE_MULTICORE:**\n \n@@ -167,16 +167,16 @@ operation:\n    For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.\n    For large packets (1.5 kbytes) scheduler shows linear scaling in performance\n    up to eight cores.\n-   Each worker uses its own slave cryptodev. Only software cryptodevs\n+   Each worker uses its own cryptodev. Only software cryptodevs\n    are supported. Only the same type of cryptodevs should be used concurrently.\n \n    The multi-core mode uses one extra parameter:\n \n    * corelist: Semicolon-separated list of logical cores to be used as workers.\n-     The number of worker cores should be equal to the number of slave cryptodevs.\n+     The number of worker cores should be equal to the number of worker cryptodevs.\n      These cores should be present in EAL core list parameter and\n      should not be used by the application or any other process.\n \n    Example:\n     ... --vdev \"crypto_aesni_mb1,name=aesni_mb_1\" --vdev \"crypto_aesni_mb_pmd2,name=aesni_mb_2\" \\\n-    --vdev \"crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24\" ...\n+    --vdev \"crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2,mode=multi-core,corelist=23;24\" ...\ndiff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst\nindex a58a17946..7583327b6 100644\n--- a/doc/guides/rel_notes/deprecation.rst\n+++ b/doc/guides/rel_notes/deprecation.rst\n@@ -142,3 +142,10 @@ Deprecation Notices\n   Python 2 support will be completely removed in 20.11.\n   In 20.08, explicit deprecation warnings will be displayed when running\n   scripts with Python 2.\n+\n+* scheduler: The functions ``rte_cryptodev_scheduler_slave_attach``,\n+  ``rte_cryptodev_scheduler_slave_detach`` and\n+  ``rte_cryptodev_scheduler_slaves_get`` will be replaced by\n+  ``rte_cryptodev_scheduler_worker_attach``,\n+  ``rte_cryptodev_scheduler_worker_detach`` and\n+  ``rte_cryptodev_scheduler_workers_get`` accordingly.\ndiff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\nindex 730504dab..3e9c7292f 100644\n--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\n+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c\n@@ -13,31 +13,31 @@\n /** update the scheduler pmd's capability with attaching device's\n  *  capability.\n  *  For each device to be attached, the scheduler's capability should be\n- *  the common capability set of all slaves\n+ *  the common capability set of all workers\n  **/\n static uint32_t\n sync_caps(struct rte_cryptodev_capabilities *caps,\n \t\tuint32_t nb_caps,\n-\t\tconst struct rte_cryptodev_capabilities *slave_caps)\n+\t\tconst struct rte_cryptodev_capabilities *worker_caps)\n {\n-\tuint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;\n+\tuint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;\n \tuint32_t i;\n \n-\twhile (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)\n-\t\tnb_slave_caps++;\n+\twhile (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)\n+\t\tnb_worker_caps++;\n \n \tif (nb_caps == 0) {\n-\t\trte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);\n-\t\treturn nb_slave_caps;\n+\t\trte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);\n+\t\treturn nb_worker_caps;\n \t}\n \n \tfor (i = 0; i < sync_nb_caps; i++) {\n \t\tstruct rte_cryptodev_capabilities *cap = &caps[i];\n \t\tuint32_t j;\n \n-\t\tfor (j = 0; j < nb_slave_caps; j++) {\n+\t\tfor (j = 0; j < nb_worker_caps; j++) {\n \t\t\tconst struct rte_cryptodev_capabilities *s_cap =\n-\t\t\t\t\t&slave_caps[j];\n+\t\t\t\t\t&worker_caps[j];\n \n \t\t\tif (s_cap->op != cap->op || s_cap->sym.xform_type !=\n \t\t\t\t\tcap->sym.xform_type)\n@@ -72,7 +72,7 @@ sync_caps(struct rte_cryptodev_capabilities *caps,\n \t\t\tbreak;\n \t\t}\n \n-\t\tif (j < nb_slave_caps)\n+\t\tif (j < nb_worker_caps)\n \t\t\tcontinue;\n \n \t\t/* remove a uncommon cap from the array */\n@@ -97,10 +97,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)\n \t\tsched_ctx->capabilities = NULL;\n \t}\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n \t\tstruct rte_cryptodev_info dev_info;\n \n-\t\trte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);\n+\t\trte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);\n \n \t\tnb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);\n \t\tif (nb_caps == 0)\n@@ -127,10 +127,10 @@ update_scheduler_feature_flag(struct rte_cryptodev *dev)\n \n \tdev->feature_flags = 0;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n \t\tstruct rte_cryptodev_info dev_info;\n \n-\t\trte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);\n+\t\trte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);\n \n \t\tdev->feature_flags |= dev_info.feature_flags;\n \t}\n@@ -142,15 +142,15 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx)\n \tuint32_t i;\n \tuint32_t max_nb_qp;\n \n-\tif (!sched_ctx->nb_slaves)\n+\tif (!sched_ctx->nb_workers)\n \t\treturn;\n \n-\tmax_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;\n+\tmax_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n \t\tstruct rte_cryptodev_info dev_info;\n \n-\t\trte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);\n+\t\trte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);\n \t\tmax_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?\n \t\t\t\tdev_info.max_nb_queue_pairs : max_nb_qp;\n \t}\n@@ -158,13 +158,20 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx)\n \tsched_ctx->max_nb_queue_pairs = max_nb_qp;\n }\n \n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t worker_id)\n+{\n+\treturn rte_cryptodev_scheduler_worker_attach(scheduler_id, worker_id);\n+}\n+\n /** Attach a device to the scheduler. */\n int\n-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)\n+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)\n {\n \tstruct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);\n \tstruct scheduler_ctx *sched_ctx;\n-\tstruct scheduler_slave *slave;\n+\tstruct scheduler_worker *worker;\n \tstruct rte_cryptodev_info dev_info;\n \tuint32_t i;\n \n@@ -184,30 +191,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)\n \t}\n \n \tsched_ctx = dev->data->dev_private;\n-\tif (sched_ctx->nb_slaves >=\n-\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {\n-\t\tCR_SCHED_LOG(ERR, \"Too many slaves attached\");\n+\tif (sched_ctx->nb_workers >=\n+\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {\n+\t\tCR_SCHED_LOG(ERR, \"Too many workers attached\");\n \t\treturn -ENOMEM;\n \t}\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++)\n-\t\tif (sched_ctx->slaves[i].dev_id == slave_id) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Slave already added\");\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++)\n+\t\tif (sched_ctx->workers[i].dev_id == worker_id) {\n+\t\t\tCR_SCHED_LOG(ERR, \"Worker already added\");\n \t\t\treturn -ENOTSUP;\n \t\t}\n \n-\tslave = &sched_ctx->slaves[sched_ctx->nb_slaves];\n+\tworker = &sched_ctx->workers[sched_ctx->nb_workers];\n \n-\trte_cryptodev_info_get(slave_id, &dev_info);\n+\trte_cryptodev_info_get(worker_id, &dev_info);\n \n-\tslave->dev_id = slave_id;\n-\tslave->driver_id = dev_info.driver_id;\n-\tsched_ctx->nb_slaves++;\n+\tworker->dev_id = worker_id;\n+\tworker->driver_id = dev_info.driver_id;\n+\tsched_ctx->nb_workers++;\n \n \tif (update_scheduler_capability(sched_ctx) < 0) {\n-\t\tslave->dev_id = 0;\n-\t\tslave->driver_id = 0;\n-\t\tsched_ctx->nb_slaves--;\n+\t\tworker->dev_id = 0;\n+\t\tworker->driver_id = 0;\n+\t\tsched_ctx->nb_workers--;\n \n \t\tCR_SCHED_LOG(ERR, \"capabilities update failed\");\n \t\treturn -ENOTSUP;\n@@ -220,12 +227,19 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)\n \treturn 0;\n }\n \n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t worker_id)\n+{\n+\treturn rte_cryptodev_scheduler_worker_detach(scheduler_id, worker_id);\n+}\n+\n int\n-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)\n+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)\n {\n \tstruct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);\n \tstruct scheduler_ctx *sched_ctx;\n-\tuint32_t i, slave_pos;\n+\tuint32_t i, worker_pos;\n \n \tif (!dev) {\n \t\tCR_SCHED_LOG(ERR, \"Operation not supported\");\n@@ -244,26 +258,26 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)\n \n \tsched_ctx = dev->data->dev_private;\n \n-\tfor (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)\n-\t\tif (sched_ctx->slaves[slave_pos].dev_id == slave_id)\n+\tfor (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)\n+\t\tif (sched_ctx->workers[worker_pos].dev_id == worker_id)\n \t\t\tbreak;\n-\tif (slave_pos == sched_ctx->nb_slaves) {\n-\t\tCR_SCHED_LOG(ERR, \"Cannot find slave\");\n+\tif (worker_pos == sched_ctx->nb_workers) {\n+\t\tCR_SCHED_LOG(ERR, \"Cannot find worker\");\n \t\treturn -ENOTSUP;\n \t}\n \n-\tif (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {\n-\t\tCR_SCHED_LOG(ERR, \"Failed to detach slave\");\n+\tif (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {\n+\t\tCR_SCHED_LOG(ERR, \"Failed to detach worker\");\n \t\treturn -ENOTSUP;\n \t}\n \n-\tfor (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {\n-\t\tmemcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],\n-\t\t\t\tsizeof(struct scheduler_slave));\n+\tfor (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {\n+\t\tmemcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],\n+\t\t\t\tsizeof(struct scheduler_worker));\n \t}\n-\tmemset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,\n-\t\t\tsizeof(struct scheduler_slave));\n-\tsched_ctx->nb_slaves--;\n+\tmemset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,\n+\t\t\tsizeof(struct scheduler_worker));\n+\tsched_ctx->nb_workers--;\n \n \tif (update_scheduler_capability(sched_ctx) < 0) {\n \t\tCR_SCHED_LOG(ERR, \"capabilities update failed\");\n@@ -459,8 +473,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,\n \tsched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;\n \tsched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;\n \tsched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;\n-\tsched_ctx->ops.slave_attach = scheduler->ops->slave_attach;\n-\tsched_ctx->ops.slave_detach = scheduler->ops->slave_detach;\n+\tsched_ctx->ops.worker_attach = scheduler->ops->worker_attach;\n+\tsched_ctx->ops.worker_detach = scheduler->ops->worker_detach;\n \tsched_ctx->ops.option_set = scheduler->ops->option_set;\n \tsched_ctx->ops.option_get = scheduler->ops->option_get;\n \n@@ -484,12 +498,19 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,\n \treturn 0;\n }\n \n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *workers)\n+{\n+\treturn rte_cryptodev_scheduler_workers_get(scheduler_id, workers);\n+}\n+\n int\n-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)\n+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)\n {\n \tstruct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);\n \tstruct scheduler_ctx *sched_ctx;\n-\tuint32_t nb_slaves = 0;\n+\tuint32_t nb_workers = 0;\n \n \tif (!dev) {\n \t\tCR_SCHED_LOG(ERR, \"Operation not supported\");\n@@ -503,16 +524,16 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)\n \n \tsched_ctx = dev->data->dev_private;\n \n-\tnb_slaves = sched_ctx->nb_slaves;\n+\tnb_workers = sched_ctx->nb_workers;\n \n-\tif (slaves && nb_slaves) {\n+\tif (workers && nb_workers) {\n \t\tuint32_t i;\n \n-\t\tfor (i = 0; i < nb_slaves; i++)\n-\t\t\tslaves[i] = sched_ctx->slaves[i].dev_id;\n+\t\tfor (i = 0; i < nb_workers; i++)\n+\t\t\tworkers[i] = sched_ctx->workers[i].dev_id;\n \t}\n \n-\treturn (int)nb_slaves;\n+\treturn (int)nb_workers;\n }\n \n int\ndiff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h\nindex 9a72a90ae..2e88c5e31 100644\n--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h\n+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h\n@@ -10,9 +10,9 @@\n  *\n  * RTE Cryptodev Scheduler Device\n  *\n- * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)\n+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple worker\n  * Cryptodevs into a single logical crypto device, and the scheduling the\n- * crypto operations to the slaves based on the mode of the specified mode of\n+ * crypto operations to the workers based on the mode of the specified mode of\n  * operation specified and supported. This implementation supports 3 modes of\n  * operation: round robin, packet-size based, and fail-over.\n  */\n@@ -24,9 +24,15 @@\n extern \"C\" {\n #endif\n \n+/* Backwards compatibility, will be removed later */\n+#ifdef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES\n+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS \\\n+\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES\n+#endif\n+\n /** Maximum number of bonded devices per device */\n-#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES\n-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES\t(8)\n+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS\n+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS\t(8)\n #endif\n \n /** Maximum number of multi-core worker cores */\n@@ -106,34 +112,41 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,\n  *\n  * @param scheduler_id\n  *   The target scheduler device ID\n- * @param slave_id\n+ * @param worker_id\n  *   Crypto device ID to be attached\n  *\n  * @return\n- *   - 0 if the slave is attached.\n+ *   - 0 if the worker is attached.\n  *   - -ENOTSUP if the operation is not supported.\n  *   - -EBUSY if device is started.\n- *   - -ENOMEM if the scheduler's slave list is full.\n+ *   - -ENOMEM if the scheduler's worker list is full.\n  */\n int\n-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);\n+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id);\n+\n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t worker_id);\n \n /**\n  * Detach a crypto device from the scheduler\n  *\n  * @param scheduler_id\n  *   The target scheduler device ID\n- * @param slave_id\n+ * @param worker_id\n  *   Crypto device ID to be detached\n  *\n  * @return\n- *   - 0 if the slave is detached.\n+ *   - 0 if the worker is detached.\n  *   - -ENOTSUP if the operation is not supported.\n  *   - -EBUSY if device is started.\n  */\n int\n-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);\n+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id);\n \n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t worker_id);\n \n /**\n  * Set the scheduling mode\n@@ -199,21 +212,25 @@ int\n rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);\n \n /**\n- * Get the attached slaves' count and/or ID\n+ * Get the attached workers' count and/or ID\n  *\n  * @param scheduler_id\n  *   The target scheduler device ID\n- * @param slaves\n- *   If successful, the function will write back all slaves' device IDs to it.\n+ * @param workers\n+ *   If successful, the function will write back all workers' device IDs to it.\n  *   This parameter will either be an uint8_t array of\n- *   RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.\n+ *   RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS elements or NULL.\n  *\n  * @return\n- *   - non-negative number: the number of slaves attached\n+ *   - non-negative number: the number of workers attached\n  *   - -ENOTSUP if the operation is not supported.\n  */\n int\n-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);\n+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers);\n+\n+/* Backwards compatibility, will be deprecated */\n+int\n+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *workers);\n \n /**\n  * Set the mode specific option\ndiff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h\nindex c43695894..f8726c009 100644\n--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h\n+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h\n@@ -11,10 +11,10 @@\n extern \"C\" {\n #endif\n \n-typedef int (*rte_cryptodev_scheduler_slave_attach_t)(\n-\t\tstruct rte_cryptodev *dev, uint8_t slave_id);\n-typedef int (*rte_cryptodev_scheduler_slave_detach_t)(\n-\t\tstruct rte_cryptodev *dev, uint8_t slave_id);\n+typedef int (*rte_cryptodev_scheduler_worker_attach_t)(\n+\t\tstruct rte_cryptodev *dev, uint8_t worker_id);\n+typedef int (*rte_cryptodev_scheduler_worker_detach_t)(\n+\t\tstruct rte_cryptodev *dev, uint8_t worker_id);\n \n typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);\n typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);\n@@ -36,8 +36,8 @@ typedef int (*rte_cryptodev_scheduler_config_option_get)(\n \t\tvoid *option);\n \n struct rte_cryptodev_scheduler_ops {\n-\trte_cryptodev_scheduler_slave_attach_t slave_attach;\n-\trte_cryptodev_scheduler_slave_attach_t slave_detach;\n+\trte_cryptodev_scheduler_worker_attach_t worker_attach;\n+\trte_cryptodev_scheduler_worker_attach_t worker_detach;\n \n \trte_cryptodev_scheduler_start_t scheduler_start;\n \trte_cryptodev_scheduler_stop_t scheduler_stop;\ndiff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c\nindex 3a023b8ad..844312dd1 100644\n--- a/drivers/crypto/scheduler/scheduler_failover.c\n+++ b/drivers/crypto/scheduler/scheduler_failover.c\n@@ -8,20 +8,20 @@\n #include \"rte_cryptodev_scheduler_operations.h\"\n #include \"scheduler_pmd_private.h\"\n \n-#define PRIMARY_SLAVE_IDX\t0\n-#define SECONDARY_SLAVE_IDX\t1\n-#define NB_FAILOVER_SLAVES\t2\n-#define SLAVE_SWITCH_MASK\t(0x01)\n+#define PRIMARY_WORKER_IDX\t0\n+#define SECONDARY_WORKER_IDX\t1\n+#define NB_FAILOVER_WORKERS\t2\n+#define WORKER_SWITCH_MASK\t(0x01)\n \n struct fo_scheduler_qp_ctx {\n-\tstruct scheduler_slave primary_slave;\n-\tstruct scheduler_slave secondary_slave;\n+\tstruct scheduler_worker primary_worker;\n+\tstruct scheduler_worker secondary_worker;\n \n \tuint8_t deq_idx;\n };\n \n static __rte_always_inline uint16_t\n-failover_slave_enqueue(struct scheduler_slave *slave,\n+failover_worker_enqueue(struct scheduler_worker *worker,\n \t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tuint16_t i, processed_ops;\n@@ -29,9 +29,9 @@ failover_slave_enqueue(struct scheduler_slave *slave,\n \tfor (i = 0; i < nb_ops && i < 4; i++)\n \t\trte_prefetch0(ops[i]->sym->session);\n \n-\tprocessed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, ops, nb_ops);\n-\tslave->nb_inflight_cops += processed_ops;\n+\tprocessed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, ops, nb_ops);\n+\tworker->nb_inflight_cops += processed_ops;\n \n \treturn processed_ops;\n }\n@@ -46,11 +46,12 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tif (unlikely(nb_ops == 0))\n \t\treturn 0;\n \n-\tenqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,\n+\tenqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker,\n \t\t\tops, nb_ops);\n \n \tif (enqueued_ops < nb_ops)\n-\t\tenqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,\n+\t\tenqueued_ops += failover_worker_enqueue(\n+\t\t\t\t&qp_ctx->secondary_worker,\n \t\t\t\t&ops[enqueued_ops],\n \t\t\t\tnb_ops - enqueued_ops);\n \n@@ -79,28 +80,28 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct fo_scheduler_qp_ctx *qp_ctx =\n \t\t\t((struct scheduler_qp_ctx *)qp)->private_qp_ctx;\n-\tstruct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {\n-\t\t\t&qp_ctx->primary_slave, &qp_ctx->secondary_slave};\n-\tstruct scheduler_slave *slave = slaves[qp_ctx->deq_idx];\n+\tstruct scheduler_worker *workers[NB_FAILOVER_WORKERS] = {\n+\t\t\t&qp_ctx->primary_worker, &qp_ctx->secondary_worker};\n+\tstruct scheduler_worker *worker = workers[qp_ctx->deq_idx];\n \tuint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;\n \n-\tif (slave->nb_inflight_cops) {\n-\t\tnb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, ops, nb_ops);\n-\t\tslave->nb_inflight_cops -= nb_deq_ops;\n+\tif (worker->nb_inflight_cops) {\n+\t\tnb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, ops, nb_ops);\n+\t\tworker->nb_inflight_cops -= nb_deq_ops;\n \t}\n \n-\tqp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;\n+\tqp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;\n \n \tif (nb_deq_ops == nb_ops)\n \t\treturn nb_deq_ops;\n \n-\tslave = slaves[qp_ctx->deq_idx];\n+\tworker = workers[qp_ctx->deq_idx];\n \n-\tif (slave->nb_inflight_cops) {\n-\t\tnb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);\n-\t\tslave->nb_inflight_cops -= nb_deq_ops2;\n+\tif (worker->nb_inflight_cops) {\n+\t\tnb_deq_ops2 = rte_cryptodev_dequeue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);\n+\t\tworker->nb_inflight_cops -= nb_deq_ops2;\n \t}\n \n \treturn nb_deq_ops + nb_deq_ops2;\n@@ -119,15 +120,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,\n }\n \n static int\n-slave_attach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_attach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n \n static int\n-slave_detach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_detach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n@@ -138,8 +139,8 @@ scheduler_start(struct rte_cryptodev *dev)\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n \tuint16_t i;\n \n-\tif (sched_ctx->nb_slaves < 2) {\n-\t\tCR_SCHED_LOG(ERR, \"Number of slaves shall no less than 2\");\n+\tif (sched_ctx->nb_workers < 2) {\n+\t\tCR_SCHED_LOG(ERR, \"Number of workers shall no less than 2\");\n \t\treturn -ENOMEM;\n \t}\n \n@@ -156,12 +157,12 @@ scheduler_start(struct rte_cryptodev *dev)\n \t\t\t((struct scheduler_qp_ctx *)\n \t\t\t\tdev->data->queue_pairs[i])->private_qp_ctx;\n \n-\t\trte_memcpy(&qp_ctx->primary_slave,\n-\t\t\t\t&sched_ctx->slaves[PRIMARY_SLAVE_IDX],\n-\t\t\t\tsizeof(struct scheduler_slave));\n-\t\trte_memcpy(&qp_ctx->secondary_slave,\n-\t\t\t\t&sched_ctx->slaves[SECONDARY_SLAVE_IDX],\n-\t\t\t\tsizeof(struct scheduler_slave));\n+\t\trte_memcpy(&qp_ctx->primary_worker,\n+\t\t\t\t&sched_ctx->workers[PRIMARY_WORKER_IDX],\n+\t\t\t\tsizeof(struct scheduler_worker));\n+\t\trte_memcpy(&qp_ctx->secondary_worker,\n+\t\t\t\t&sched_ctx->workers[SECONDARY_WORKER_IDX],\n+\t\t\t\tsizeof(struct scheduler_worker));\n \t}\n \n \treturn 0;\n@@ -198,8 +199,8 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)\n }\n \n static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {\n-\tslave_attach,\n-\tslave_detach,\n+\tworker_attach,\n+\tworker_detach,\n \tscheduler_start,\n \tscheduler_stop,\n \tscheduler_config_qp,\n@@ -210,8 +211,8 @@ static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {\n \n static struct rte_cryptodev_scheduler fo_scheduler = {\n \t\t.name = \"failover-scheduler\",\n-\t\t.description = \"scheduler which enqueues to the primary slave, \"\n-\t\t\t\t\"and only then enqueues to the secondary slave \"\n+\t\t.description = \"scheduler which enqueues to the primary worker, \"\n+\t\t\t\t\"and only then enqueues to the secondary worker \"\n \t\t\t\t\"upon failing on enqueuing to primary\",\n \t\t.mode = CDEV_SCHED_MODE_FAILOVER,\n \t\t.ops = &scheduler_fo_ops\ndiff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c\nindex 2d6790bb3..1e2e8dbf9 100644\n--- a/drivers/crypto/scheduler/scheduler_multicore.c\n+++ b/drivers/crypto/scheduler/scheduler_multicore.c\n@@ -26,8 +26,8 @@ struct mc_scheduler_ctx {\n };\n \n struct mc_scheduler_qp_ctx {\n-\tstruct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];\n-\tuint32_t nb_slaves;\n+\tstruct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\tuint32_t nb_workers;\n \n \tuint32_t last_enq_worker_idx;\n \tuint32_t last_deq_worker_idx;\n@@ -132,15 +132,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,\n }\n \n static int\n-slave_attach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_attach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n \n static int\n-slave_detach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_detach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n@@ -154,7 +154,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \tstruct rte_ring *deq_ring;\n \tuint32_t core_id = rte_lcore_id();\n \tint i, worker_idx = -1;\n-\tstruct scheduler_slave *slave;\n+\tstruct scheduler_worker *worker;\n \tstruct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];\n \tstruct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];\n \tuint16_t processed_ops;\n@@ -177,15 +177,16 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\treturn -1;\n \t}\n \n-\tslave = &sched_ctx->slaves[worker_idx];\n+\tworker = &sched_ctx->workers[worker_idx];\n \tenq_ring = mc_ctx->sched_enq_ring[worker_idx];\n \tdeq_ring = mc_ctx->sched_deq_ring[worker_idx];\n \n \twhile (!mc_ctx->stop_signal) {\n \t\tif (pending_enq_ops) {\n \t\t\tprocessed_ops =\n-\t\t\t\trte_cryptodev_enqueue_burst(slave->dev_id,\n-\t\t\t\t\tslave->qp_id, &enq_ops[pending_enq_ops_idx],\n+\t\t\t\trte_cryptodev_enqueue_burst(worker->dev_id,\n+\t\t\t\t\tworker->qp_id,\n+\t\t\t\t\t&enq_ops[pending_enq_ops_idx],\n \t\t\t\t\tpending_enq_ops);\n \t\t\tpending_enq_ops -= processed_ops;\n \t\t\tpending_enq_ops_idx += processed_ops;\n@@ -195,8 +196,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\t\t\t\t\t\tMC_SCHED_BUFFER_SIZE, NULL);\n \t\t\tif (processed_ops) {\n \t\t\t\tpending_enq_ops_idx = rte_cryptodev_enqueue_burst(\n-\t\t\t\t\t\t\tslave->dev_id, slave->qp_id,\n-\t\t\t\t\t\t\tenq_ops, processed_ops);\n+\t\t\t\t\t\tworker->dev_id, worker->qp_id,\n+\t\t\t\t\t\tenq_ops, processed_ops);\n \t\t\t\tpending_enq_ops = processed_ops - pending_enq_ops_idx;\n \t\t\t\tinflight_ops += pending_enq_ops_idx;\n \t\t\t}\n@@ -209,8 +210,9 @@ mc_scheduler_worker(struct rte_cryptodev *dev)\n \t\t\tpending_deq_ops -= processed_ops;\n \t\t\tpending_deq_ops_idx += processed_ops;\n \t\t} else if (inflight_ops) {\n-\t\t\tprocessed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\t\t\tslave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);\n+\t\t\tprocessed_ops = rte_cryptodev_dequeue_burst(\n+\t\t\t\t\tworker->dev_id, worker->qp_id, deq_ops,\n+\t\t\t\t\tMC_SCHED_BUFFER_SIZE);\n \t\t\tif (processed_ops) {\n \t\t\t\tinflight_ops -= processed_ops;\n \t\t\t\tif (reordering_enabled) {\n@@ -264,16 +266,16 @@ scheduler_start(struct rte_cryptodev *dev)\n \t\t\t\tqp_ctx->private_qp_ctx;\n \t\tuint32_t j;\n \n-\t\tmemset(mc_qp_ctx->slaves, 0,\n-\t\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *\n-\t\t\t\tsizeof(struct scheduler_slave));\n-\t\tfor (j = 0; j < sched_ctx->nb_slaves; j++) {\n-\t\t\tmc_qp_ctx->slaves[j].dev_id =\n-\t\t\t\t\tsched_ctx->slaves[j].dev_id;\n-\t\t\tmc_qp_ctx->slaves[j].qp_id = i;\n+\t\tmemset(mc_qp_ctx->workers, 0,\n+\t\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *\n+\t\t\t\tsizeof(struct scheduler_worker));\n+\t\tfor (j = 0; j < sched_ctx->nb_workers; j++) {\n+\t\t\tmc_qp_ctx->workers[j].dev_id =\n+\t\t\t\t\tsched_ctx->workers[j].dev_id;\n+\t\t\tmc_qp_ctx->workers[j].qp_id = i;\n \t\t}\n \n-\t\tmc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;\n+\t\tmc_qp_ctx->nb_workers = sched_ctx->nb_workers;\n \n \t\tmc_qp_ctx->last_enq_worker_idx = 0;\n \t\tmc_qp_ctx->last_deq_worker_idx = 0;\n@@ -347,7 +349,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)\n \t\tmc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);\n \t\tif (!mc_ctx->sched_enq_ring[i]) {\n \t\t\tmc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,\n-\t\t\t\t\t\tPER_SLAVE_BUFF_SIZE,\n+\t\t\t\t\t\tPER_WORKER_BUFF_SIZE,\n \t\t\t\t\t\trte_socket_id(),\n \t\t\t\t\t\tRING_F_SC_DEQ | RING_F_SP_ENQ);\n \t\t\tif (!mc_ctx->sched_enq_ring[i]) {\n@@ -361,7 +363,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)\n \t\tmc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);\n \t\tif (!mc_ctx->sched_deq_ring[i]) {\n \t\t\tmc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,\n-\t\t\t\t\t\tPER_SLAVE_BUFF_SIZE,\n+\t\t\t\t\t\tPER_WORKER_BUFF_SIZE,\n \t\t\t\t\t\trte_socket_id(),\n \t\t\t\t\t\tRING_F_SC_DEQ | RING_F_SP_ENQ);\n \t\t\tif (!mc_ctx->sched_deq_ring[i]) {\n@@ -387,8 +389,8 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)\n }\n \n static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {\n-\tslave_attach,\n-\tslave_detach,\n+\tworker_attach,\n+\tworker_detach,\n \tscheduler_start,\n \tscheduler_stop,\n \tscheduler_config_qp,\ndiff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\nindex 45c8dceb4..57e330a74 100644\n--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\n+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c\n@@ -9,10 +9,10 @@\n #include \"scheduler_pmd_private.h\"\n \n #define DEF_PKT_SIZE_THRESHOLD\t\t\t(0xffffff80)\n-#define SLAVE_IDX_SWITCH_MASK\t\t\t(0x01)\n-#define PRIMARY_SLAVE_IDX\t\t\t0\n-#define SECONDARY_SLAVE_IDX\t\t\t1\n-#define NB_PKT_SIZE_SLAVES\t\t\t2\n+#define WORKER_IDX_SWITCH_MASK\t\t\t(0x01)\n+#define PRIMARY_WORKER_IDX\t\t\t0\n+#define SECONDARY_WORKER_IDX\t\t\t1\n+#define NB_PKT_SIZE_WORKERS\t\t\t2\n \n /** pkt size based scheduler context */\n struct psd_scheduler_ctx {\n@@ -21,15 +21,15 @@ struct psd_scheduler_ctx {\n \n /** pkt size based scheduler queue pair context */\n struct psd_scheduler_qp_ctx {\n-\tstruct scheduler_slave primary_slave;\n-\tstruct scheduler_slave secondary_slave;\n+\tstruct scheduler_worker primary_worker;\n+\tstruct scheduler_worker secondary_worker;\n \tuint32_t threshold;\n \tuint8_t deq_idx;\n } __rte_cache_aligned;\n \n /** scheduling operation variables' wrapping */\n struct psd_schedule_op {\n-\tuint8_t slave_idx;\n+\tuint8_t worker_idx;\n \tuint16_t pos;\n };\n \n@@ -38,13 +38,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct scheduler_qp_ctx *qp_ctx = qp;\n \tstruct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;\n-\tstruct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];\n-\tuint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {\n-\t\t\tpsd_qp_ctx->primary_slave.nb_inflight_cops,\n-\t\t\tpsd_qp_ctx->secondary_slave.nb_inflight_cops\n+\tstruct rte_crypto_op *sched_ops[NB_PKT_SIZE_WORKERS][nb_ops];\n+\tuint32_t in_flight_ops[NB_PKT_SIZE_WORKERS] = {\n+\t\t\tpsd_qp_ctx->primary_worker.nb_inflight_cops,\n+\t\t\tpsd_qp_ctx->secondary_worker.nb_inflight_cops\n \t};\n-\tstruct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {\n-\t\t{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}\n+\tstruct psd_schedule_op enq_ops[NB_PKT_SIZE_WORKERS] = {\n+\t\t{PRIMARY_WORKER_IDX, 0}, {SECONDARY_WORKER_IDX, 0}\n \t};\n \tstruct psd_schedule_op *p_enq_op;\n \tuint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;\n@@ -80,13 +80,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t/* stop schedule cops before the queue is full, this shall\n \t\t * prevent the failed enqueue\n \t\t */\n-\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==\n+\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==\n \t\t\t\tqp_ctx->max_nb_objs) {\n \t\t\ti = nb_ops;\n \t\t\tbreak;\n \t\t}\n \n-\t\tsched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];\n+\t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];\n \t\tp_enq_op->pos++;\n \n \t\tjob_len = ops[i+1]->sym->cipher.data.length;\n@@ -94,13 +94,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\t\tops[i+1]->sym->auth.data.length;\n \t\tp_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];\n \n-\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==\n+\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==\n \t\t\t\tqp_ctx->max_nb_objs) {\n \t\t\ti = nb_ops;\n \t\t\tbreak;\n \t\t}\n \n-\t\tsched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];\n+\t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];\n \t\tp_enq_op->pos++;\n \n \t\tjob_len = ops[i+2]->sym->cipher.data.length;\n@@ -108,13 +108,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\t\tops[i+2]->sym->auth.data.length;\n \t\tp_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];\n \n-\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==\n+\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==\n \t\t\t\tqp_ctx->max_nb_objs) {\n \t\t\ti = nb_ops;\n \t\t\tbreak;\n \t\t}\n \n-\t\tsched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];\n+\t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];\n \t\tp_enq_op->pos++;\n \n \t\tjob_len = ops[i+3]->sym->cipher.data.length;\n@@ -122,13 +122,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\t\tops[i+3]->sym->auth.data.length;\n \t\tp_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];\n \n-\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==\n+\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==\n \t\t\t\tqp_ctx->max_nb_objs) {\n \t\t\ti = nb_ops;\n \t\t\tbreak;\n \t\t}\n \n-\t\tsched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];\n+\t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];\n \t\tp_enq_op->pos++;\n \t}\n \n@@ -138,34 +138,34 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\t\tops[i]->sym->auth.data.length;\n \t\tp_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];\n \n-\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==\n+\t\tif (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==\n \t\t\t\tqp_ctx->max_nb_objs) {\n \t\t\ti = nb_ops;\n \t\t\tbreak;\n \t\t}\n \n-\t\tsched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];\n+\t\tsched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];\n \t\tp_enq_op->pos++;\n \t}\n \n \tprocessed_ops_pri = rte_cryptodev_enqueue_burst(\n-\t\t\tpsd_qp_ctx->primary_slave.dev_id,\n-\t\t\tpsd_qp_ctx->primary_slave.qp_id,\n-\t\t\tsched_ops[PRIMARY_SLAVE_IDX],\n-\t\t\tenq_ops[PRIMARY_SLAVE_IDX].pos);\n-\t/* enqueue shall not fail as the slave queue is monitored */\n-\tRTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);\n+\t\t\tpsd_qp_ctx->primary_worker.dev_id,\n+\t\t\tpsd_qp_ctx->primary_worker.qp_id,\n+\t\t\tsched_ops[PRIMARY_WORKER_IDX],\n+\t\t\tenq_ops[PRIMARY_WORKER_IDX].pos);\n+\t/* enqueue shall not fail as the worker queue is monitored */\n+\tRTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_WORKER_IDX].pos);\n \n-\tpsd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;\n+\tpsd_qp_ctx->primary_worker.nb_inflight_cops += processed_ops_pri;\n \n \tprocessed_ops_sec = rte_cryptodev_enqueue_burst(\n-\t\t\tpsd_qp_ctx->secondary_slave.dev_id,\n-\t\t\tpsd_qp_ctx->secondary_slave.qp_id,\n-\t\t\tsched_ops[SECONDARY_SLAVE_IDX],\n-\t\t\tenq_ops[SECONDARY_SLAVE_IDX].pos);\n-\tRTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);\n+\t\t\tpsd_qp_ctx->secondary_worker.dev_id,\n+\t\t\tpsd_qp_ctx->secondary_worker.qp_id,\n+\t\t\tsched_ops[SECONDARY_WORKER_IDX],\n+\t\t\tenq_ops[SECONDARY_WORKER_IDX].pos);\n+\tRTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_WORKER_IDX].pos);\n \n-\tpsd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;\n+\tpsd_qp_ctx->secondary_worker.nb_inflight_cops += processed_ops_sec;\n \n \treturn processed_ops_pri + processed_ops_sec;\n }\n@@ -191,33 +191,33 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct psd_scheduler_qp_ctx *qp_ctx =\n \t\t\t((struct scheduler_qp_ctx *)qp)->private_qp_ctx;\n-\tstruct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {\n-\t\t\t&qp_ctx->primary_slave, &qp_ctx->secondary_slave};\n-\tstruct scheduler_slave *slave = slaves[qp_ctx->deq_idx];\n+\tstruct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = {\n+\t\t\t&qp_ctx->primary_worker, &qp_ctx->secondary_worker};\n+\tstruct scheduler_worker *worker = workers[qp_ctx->deq_idx];\n \tuint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;\n \n-\tif (slave->nb_inflight_cops) {\n-\t\tnb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, ops, nb_ops);\n-\t\tslave->nb_inflight_cops -= nb_deq_ops_pri;\n+\tif (worker->nb_inflight_cops) {\n+\t\tnb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, ops, nb_ops);\n+\t\tworker->nb_inflight_cops -= nb_deq_ops_pri;\n \t}\n \n-\tqp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;\n+\tqp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_IDX_SWITCH_MASK;\n \n \tif (nb_deq_ops_pri == nb_ops)\n \t\treturn nb_deq_ops_pri;\n \n-\tslave = slaves[qp_ctx->deq_idx];\n+\tworker = workers[qp_ctx->deq_idx];\n \n-\tif (slave->nb_inflight_cops) {\n-\t\tnb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\t\tslave->qp_id, &ops[nb_deq_ops_pri],\n+\tif (worker->nb_inflight_cops) {\n+\t\tnb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,\n+\t\t\t\tworker->qp_id, &ops[nb_deq_ops_pri],\n \t\t\t\tnb_ops - nb_deq_ops_pri);\n-\t\tslave->nb_inflight_cops -= nb_deq_ops_sec;\n+\t\tworker->nb_inflight_cops -= nb_deq_ops_sec;\n \n-\t\tif (!slave->nb_inflight_cops)\n+\t\tif (!worker->nb_inflight_cops)\n \t\t\tqp_ctx->deq_idx = (~qp_ctx->deq_idx) &\n-\t\t\t\t\tSLAVE_IDX_SWITCH_MASK;\n+\t\t\t\t\tWORKER_IDX_SWITCH_MASK;\n \t}\n \n \treturn nb_deq_ops_pri + nb_deq_ops_sec;\n@@ -236,15 +236,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,\n }\n \n static int\n-slave_attach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_attach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n \n static int\n-slave_detach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_detach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n@@ -256,9 +256,9 @@ scheduler_start(struct rte_cryptodev *dev)\n \tstruct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;\n \tuint16_t i;\n \n-\t/* for packet size based scheduler, nb_slaves have to >= 2 */\n-\tif (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {\n-\t\tCR_SCHED_LOG(ERR, \"not enough slaves to start\");\n+\t/* for packet size based scheduler, nb_workers have to >= 2 */\n+\tif (sched_ctx->nb_workers < NB_PKT_SIZE_WORKERS) {\n+\t\tCR_SCHED_LOG(ERR, \"not enough workers to start\");\n \t\treturn -1;\n \t}\n \n@@ -267,15 +267,15 @@ scheduler_start(struct rte_cryptodev *dev)\n \t\tstruct psd_scheduler_qp_ctx *ps_qp_ctx =\n \t\t\t\tqp_ctx->private_qp_ctx;\n \n-\t\tps_qp_ctx->primary_slave.dev_id =\n-\t\t\t\tsched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;\n-\t\tps_qp_ctx->primary_slave.qp_id = i;\n-\t\tps_qp_ctx->primary_slave.nb_inflight_cops = 0;\n+\t\tps_qp_ctx->primary_worker.dev_id =\n+\t\t\t\tsched_ctx->workers[PRIMARY_WORKER_IDX].dev_id;\n+\t\tps_qp_ctx->primary_worker.qp_id = i;\n+\t\tps_qp_ctx->primary_worker.nb_inflight_cops = 0;\n \n-\t\tps_qp_ctx->secondary_slave.dev_id =\n-\t\t\t\tsched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;\n-\t\tps_qp_ctx->secondary_slave.qp_id = i;\n-\t\tps_qp_ctx->secondary_slave.nb_inflight_cops = 0;\n+\t\tps_qp_ctx->secondary_worker.dev_id =\n+\t\t\t\tsched_ctx->workers[SECONDARY_WORKER_IDX].dev_id;\n+\t\tps_qp_ctx->secondary_worker.qp_id = i;\n+\t\tps_qp_ctx->secondary_worker.nb_inflight_cops = 0;\n \n \t\tps_qp_ctx->threshold = psd_ctx->threshold;\n \t}\n@@ -300,9 +300,9 @@ scheduler_stop(struct rte_cryptodev *dev)\n \t\tstruct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];\n \t\tstruct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;\n \n-\t\tif (ps_qp_ctx->primary_slave.nb_inflight_cops +\n-\t\t\t\tps_qp_ctx->secondary_slave.nb_inflight_cops) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Some crypto ops left in slave queue\");\n+\t\tif (ps_qp_ctx->primary_worker.nb_inflight_cops +\n+\t\t\t\tps_qp_ctx->secondary_worker.nb_inflight_cops) {\n+\t\t\tCR_SCHED_LOG(ERR, \"Some crypto ops left in worker queue\");\n \t\t\treturn -1;\n \t\t}\n \t}\n@@ -399,8 +399,8 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,\n }\n \n static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {\n-\tslave_attach,\n-\tslave_detach,\n+\tworker_attach,\n+\tworker_detach,\n \tscheduler_start,\n \tscheduler_stop,\n \tscheduler_config_qp,\ndiff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c\nindex a1632a2b9..632197833 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd.c\n+++ b/drivers/crypto/scheduler/scheduler_pmd.c\n@@ -18,18 +18,18 @@ uint8_t cryptodev_scheduler_driver_id;\n \n struct scheduler_init_params {\n \tstruct rte_cryptodev_pmd_init_params def_p;\n-\tuint32_t nb_slaves;\n+\tuint32_t nb_workers;\n \tenum rte_cryptodev_scheduler_mode mode;\n \tchar mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];\n \tuint32_t enable_ordering;\n \tuint16_t wc_pool[RTE_MAX_LCORE];\n \tuint16_t nb_wc;\n-\tchar slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]\n+\tchar worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]\n \t\t\t[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];\n };\n \n #define RTE_CRYPTODEV_VDEV_NAME\t\t\t(\"name\")\n-#define RTE_CRYPTODEV_VDEV_SLAVE\t\t(\"slave\")\n+#define RTE_CRYPTODEV_VDEV_WORKER\t\t(\"worker\")\n #define RTE_CRYPTODEV_VDEV_MODE\t\t\t(\"mode\")\n #define RTE_CRYPTODEV_VDEV_MODE_PARAM\t\t(\"mode_param\")\n #define RTE_CRYPTODEV_VDEV_ORDERING\t\t(\"ordering\")\n@@ -40,7 +40,7 @@ struct scheduler_init_params {\n \n static const char * const scheduler_valid_params[] = {\n \tRTE_CRYPTODEV_VDEV_NAME,\n-\tRTE_CRYPTODEV_VDEV_SLAVE,\n+\tRTE_CRYPTODEV_VDEV_WORKER,\n \tRTE_CRYPTODEV_VDEV_MODE,\n \tRTE_CRYPTODEV_VDEV_MODE_PARAM,\n \tRTE_CRYPTODEV_VDEV_ORDERING,\n@@ -193,31 +193,31 @@ cryptodev_scheduler_create(const char *name,\n \t\tbreak;\n \t}\n \n-\tfor (i = 0; i < init_params->nb_slaves; i++) {\n-\t\tsched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =\n+\tfor (i = 0; i < init_params->nb_workers; i++) {\n+\t\tsched_ctx->init_worker_names[sched_ctx->nb_init_workers] =\n \t\t\trte_zmalloc_socket(\n \t\t\t\tNULL,\n \t\t\t\tRTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,\n \t\t\t\tSOCKET_ID_ANY);\n \n-\t\tif (!sched_ctx->init_slave_names[\n-\t\t\t\tsched_ctx->nb_init_slaves]) {\n+\t\tif (!sched_ctx->init_worker_names[\n+\t\t\t\tsched_ctx->nb_init_workers]) {\n \t\t\tCR_SCHED_LOG(ERR, \"driver %s: Insufficient memory\",\n \t\t\t\t\tname);\n \t\t\treturn -ENOMEM;\n \t\t}\n \n-\t\tstrncpy(sched_ctx->init_slave_names[\n-\t\t\t\t\tsched_ctx->nb_init_slaves],\n-\t\t\t\tinit_params->slave_names[i],\n+\t\tstrncpy(sched_ctx->init_worker_names[\n+\t\t\t\t\tsched_ctx->nb_init_workers],\n+\t\t\t\tinit_params->worker_names[i],\n \t\t\t\tRTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);\n \n-\t\tsched_ctx->nb_init_slaves++;\n+\t\tsched_ctx->nb_init_workers++;\n \t}\n \n \t/*\n \t * Initialize capabilities structure as an empty structure,\n-\t * in case device information is requested when no slaves are attached\n+\t * in case device information is requested when no workers are attached\n \t */\n \tsched_ctx->capabilities = rte_zmalloc_socket(NULL,\n \t\t\tsizeof(struct rte_cryptodev_capabilities),\n@@ -249,12 +249,12 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)\n \n \tsched_ctx = dev->data->dev_private;\n \n-\tif (sched_ctx->nb_slaves) {\n+\tif (sched_ctx->nb_workers) {\n \t\tuint32_t i;\n \n-\t\tfor (i = 0; i < sched_ctx->nb_slaves; i++)\n-\t\t\trte_cryptodev_scheduler_slave_detach(dev->data->dev_id,\n-\t\t\t\t\tsched_ctx->slaves[i].dev_id);\n+\t\tfor (i = 0; i < sched_ctx->nb_workers; i++)\n+\t\t\trte_cryptodev_scheduler_worker_detach(dev->data->dev_id,\n+\t\t\t\t\tsched_ctx->workers[i].dev_id);\n \t}\n \n \treturn rte_cryptodev_pmd_destroy(dev);\n@@ -374,19 +374,19 @@ parse_name_arg(const char *key __rte_unused,\n \treturn 0;\n }\n \n-/** Parse slave */\n+/** Parse worker */\n static int\n-parse_slave_arg(const char *key __rte_unused,\n+parse_worker_arg(const char *key __rte_unused,\n \t\tconst char *value, void *extra_args)\n {\n \tstruct scheduler_init_params *param = extra_args;\n \n-\tif (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {\n-\t\tCR_SCHED_LOG(ERR, \"Too many slaves.\");\n+\tif (param->nb_workers >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {\n+\t\tCR_SCHED_LOG(ERR, \"Too many workers.\");\n \t\treturn -ENOMEM;\n \t}\n \n-\tstrncpy(param->slave_names[param->nb_slaves++], value,\n+\tstrncpy(param->worker_names[param->nb_workers++], value,\n \t\t\tRTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);\n \n \treturn 0;\n@@ -498,8 +498,8 @@ scheduler_parse_init_params(struct scheduler_init_params *params,\n \t\tif (ret < 0)\n \t\t\tgoto free_kvlist;\n \n-\t\tret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,\n-\t\t\t\t&parse_slave_arg, params);\n+\t\tret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_WORKER,\n+\t\t\t\t&parse_worker_arg, params);\n \t\tif (ret < 0)\n \t\t\tgoto free_kvlist;\n \n@@ -534,10 +534,10 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)\n \t\t\trte_socket_id(),\n \t\t\tRTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS\n \t\t},\n-\t\t.nb_slaves = 0,\n+\t\t.nb_workers = 0,\n \t\t.mode = CDEV_SCHED_MODE_NOT_SET,\n \t\t.enable_ordering = 0,\n-\t\t.slave_names = { {0} }\n+\t\t.worker_names = { {0} }\n \t};\n \tconst char *name;\n \n@@ -566,7 +566,7 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,\n RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,\n \t\"max_nb_queue_pairs=<int> \"\n \t\"socket_id=<int> \"\n-\t\"slave=<name>\");\n+\t\"worker=<name>\");\n RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,\n \t\tcryptodev_scheduler_pmd_drv.driver,\n \t\tcryptodev_scheduler_driver_id);\ndiff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c\nindex 14e5a3712..cb125e802 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c\n+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c\n@@ -12,43 +12,43 @@\n \n #include \"scheduler_pmd_private.h\"\n \n-/** attaching the slaves predefined by scheduler's EAL options */\n+/** attaching the workers predefined by scheduler's EAL options */\n static int\n-scheduler_attach_init_slave(struct rte_cryptodev *dev)\n+scheduler_attach_init_worker(struct rte_cryptodev *dev)\n {\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n \tuint8_t scheduler_id = dev->data->dev_id;\n \tint i;\n \n-\tfor (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {\n-\t\tconst char *dev_name = sched_ctx->init_slave_names[i];\n-\t\tstruct rte_cryptodev *slave_dev =\n+\tfor (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {\n+\t\tconst char *dev_name = sched_ctx->init_worker_names[i];\n+\t\tstruct rte_cryptodev *worker_dev =\n \t\t\t\trte_cryptodev_pmd_get_named_dev(dev_name);\n \t\tint status;\n \n-\t\tif (!slave_dev) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Failed to locate slave dev %s\",\n+\t\tif (!worker_dev) {\n+\t\t\tCR_SCHED_LOG(ERR, \"Failed to locate worker dev %s\",\n \t\t\t\t\tdev_name);\n \t\t\treturn -EINVAL;\n \t\t}\n \n-\t\tstatus = rte_cryptodev_scheduler_slave_attach(\n-\t\t\t\tscheduler_id, slave_dev->data->dev_id);\n+\t\tstatus = rte_cryptodev_scheduler_worker_attach(\n+\t\t\t\tscheduler_id, worker_dev->data->dev_id);\n \n \t\tif (status < 0) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Failed to attach slave cryptodev %u\",\n-\t\t\t\t\tslave_dev->data->dev_id);\n+\t\t\tCR_SCHED_LOG(ERR, \"Failed to attach worker cryptodev %u\",\n+\t\t\t\t\tworker_dev->data->dev_id);\n \t\t\treturn status;\n \t\t}\n \n-\t\tCR_SCHED_LOG(INFO, \"Scheduler %s attached slave %s\",\n+\t\tCR_SCHED_LOG(INFO, \"Scheduler %s attached worker %s\",\n \t\t\t\tdev->data->name,\n-\t\t\t\tsched_ctx->init_slave_names[i]);\n+\t\t\t\tsched_ctx->init_worker_names[i]);\n \n-\t\trte_free(sched_ctx->init_slave_names[i]);\n-\t\tsched_ctx->init_slave_names[i] = NULL;\n+\t\trte_free(sched_ctx->init_worker_names[i]);\n+\t\tsched_ctx->init_worker_names[i] = NULL;\n \n-\t\tsched_ctx->nb_init_slaves -= 1;\n+\t\tsched_ctx->nb_init_workers -= 1;\n \t}\n \n \treturn 0;\n@@ -62,17 +62,17 @@ scheduler_pmd_config(struct rte_cryptodev *dev,\n \tuint32_t i;\n \tint ret;\n \n-\t/* although scheduler_attach_init_slave presents multiple times,\n+\t/* although scheduler_attach_init_worker presents multiple times,\n \t * there will be only 1 meaningful execution.\n \t */\n-\tret = scheduler_attach_init_slave(dev);\n+\tret = scheduler_attach_init_worker(dev);\n \tif (ret < 0)\n \t\treturn ret;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n \n-\t\tret = rte_cryptodev_configure(slave_dev_id, config);\n+\t\tret = rte_cryptodev_configure(worker_dev_id, config);\n \t\tif (ret < 0)\n \t\t\tbreak;\n \t}\n@@ -89,7 +89,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)\n \tif (sched_ctx->reordering_enabled) {\n \t\tchar order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];\n \t\tuint32_t buff_size = rte_align32pow2(\n-\t\t\tsched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);\n+\t\t\tsched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);\n \n \t\tif (qp_ctx->order_ring) {\n \t\t\trte_ring_free(qp_ctx->order_ring);\n@@ -135,10 +135,10 @@ scheduler_pmd_start(struct rte_cryptodev *dev)\n \tif (dev->data->dev_started)\n \t\treturn 0;\n \n-\t/* although scheduler_attach_init_slave presents multiple times,\n+\t/* although scheduler_attach_init_worker presents multiple times,\n \t * there will be only 1 meaningful execution.\n \t */\n-\tret = scheduler_attach_init_slave(dev);\n+\tret = scheduler_attach_init_worker(dev);\n \tif (ret < 0)\n \t\treturn ret;\n \n@@ -155,18 +155,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev)\n \t\treturn -1;\n \t}\n \n-\tif (!sched_ctx->nb_slaves) {\n-\t\tCR_SCHED_LOG(ERR, \"No slave in the scheduler\");\n+\tif (!sched_ctx->nb_workers) {\n+\t\tCR_SCHED_LOG(ERR, \"No worker in the scheduler\");\n \t\treturn -1;\n \t}\n \n-\tRTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);\n+\tRTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n \n-\t\tif ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Failed to attach slave\");\n+\t\tif ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {\n+\t\t\tCR_SCHED_LOG(ERR, \"Failed to attach worker\");\n \t\t\treturn -ENOTSUP;\n \t\t}\n \t}\n@@ -178,16 +178,16 @@ scheduler_pmd_start(struct rte_cryptodev *dev)\n \t\treturn -1;\n \t}\n \n-\t/* start all slaves */\n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *slave_dev =\n-\t\t\t\trte_cryptodev_pmd_get_dev(slave_dev_id);\n+\t/* start all workers */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *worker_dev =\n+\t\t\t\trte_cryptodev_pmd_get_dev(worker_dev_id);\n \n-\t\tret = (*slave_dev->dev_ops->dev_start)(slave_dev);\n+\t\tret = (*worker_dev->dev_ops->dev_start)(worker_dev);\n \t\tif (ret < 0) {\n-\t\t\tCR_SCHED_LOG(ERR, \"Failed to start slave dev %u\",\n-\t\t\t\t\tslave_dev_id);\n+\t\t\tCR_SCHED_LOG(ERR, \"Failed to start worker dev %u\",\n+\t\t\t\t\tworker_dev_id);\n \t\t\treturn ret;\n \t\t}\n \t}\n@@ -205,23 +205,23 @@ scheduler_pmd_stop(struct rte_cryptodev *dev)\n \tif (!dev->data->dev_started)\n \t\treturn;\n \n-\t/* stop all slaves first */\n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *slave_dev =\n-\t\t\t\trte_cryptodev_pmd_get_dev(slave_dev_id);\n+\t/* stop all workers first */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *worker_dev =\n+\t\t\t\trte_cryptodev_pmd_get_dev(worker_dev_id);\n \n-\t\t(*slave_dev->dev_ops->dev_stop)(slave_dev);\n+\t\t(*worker_dev->dev_ops->dev_stop)(worker_dev);\n \t}\n \n \tif (*sched_ctx->ops.scheduler_stop)\n \t\t(*sched_ctx->ops.scheduler_stop)(dev);\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n \n-\t\tif (*sched_ctx->ops.slave_detach)\n-\t\t\t(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);\n+\t\tif (*sched_ctx->ops.worker_detach)\n+\t\t\t(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);\n \t}\n }\n \n@@ -237,13 +237,13 @@ scheduler_pmd_close(struct rte_cryptodev *dev)\n \tif (dev->data->dev_started)\n \t\treturn -EBUSY;\n \n-\t/* close all slaves first */\n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *slave_dev =\n-\t\t\t\trte_cryptodev_pmd_get_dev(slave_dev_id);\n+\t/* close all workers first */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *worker_dev =\n+\t\t\t\trte_cryptodev_pmd_get_dev(worker_dev_id);\n \n-\t\tret = (*slave_dev->dev_ops->dev_close)(slave_dev);\n+\t\tret = (*worker_dev->dev_ops->dev_close)(worker_dev);\n \t\tif (ret < 0)\n \t\t\treturn ret;\n \t}\n@@ -283,19 +283,19 @@ scheduler_pmd_stats_get(struct rte_cryptodev *dev,\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n \tuint32_t i;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *slave_dev =\n-\t\t\t\trte_cryptodev_pmd_get_dev(slave_dev_id);\n-\t\tstruct rte_cryptodev_stats slave_stats = {0};\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *worker_dev =\n+\t\t\t\trte_cryptodev_pmd_get_dev(worker_dev_id);\n+\t\tstruct rte_cryptodev_stats worker_stats = {0};\n \n-\t\t(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);\n+\t\t(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);\n \n-\t\tstats->enqueued_count += slave_stats.enqueued_count;\n-\t\tstats->dequeued_count += slave_stats.dequeued_count;\n+\t\tstats->enqueued_count += worker_stats.enqueued_count;\n+\t\tstats->dequeued_count += worker_stats.dequeued_count;\n \n-\t\tstats->enqueue_err_count += slave_stats.enqueue_err_count;\n-\t\tstats->dequeue_err_count += slave_stats.dequeue_err_count;\n+\t\tstats->enqueue_err_count += worker_stats.enqueue_err_count;\n+\t\tstats->dequeue_err_count += worker_stats.dequeue_err_count;\n \t}\n }\n \n@@ -306,12 +306,12 @@ scheduler_pmd_stats_reset(struct rte_cryptodev *dev)\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n \tuint32_t i;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *slave_dev =\n-\t\t\t\trte_cryptodev_pmd_get_dev(slave_dev_id);\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *worker_dev =\n+\t\t\t\trte_cryptodev_pmd_get_dev(worker_dev_id);\n \n-\t\t(*slave_dev->dev_ops->stats_reset)(slave_dev);\n+\t\t(*worker_dev->dev_ops->stats_reset)(worker_dev);\n \t}\n }\n \n@@ -329,32 +329,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,\n \tif (!dev_info)\n \t\treturn;\n \n-\t/* although scheduler_attach_init_slave presents multiple times,\n+\t/* although scheduler_attach_init_worker presents multiple times,\n \t * there will be only 1 meaningful execution.\n \t */\n-\tscheduler_attach_init_slave(dev);\n+\tscheduler_attach_init_worker(dev);\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev_info slave_info;\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev_info worker_info;\n \n-\t\trte_cryptodev_info_get(slave_dev_id, &slave_info);\n-\t\tuint32_t dev_max_sess = slave_info.sym.max_nb_sessions;\n+\t\trte_cryptodev_info_get(worker_dev_id, &worker_info);\n+\t\tuint32_t dev_max_sess = worker_info.sym.max_nb_sessions;\n \t\tif (dev_max_sess != 0) {\n \t\t\tif (max_nb_sess == 0 ||\tdev_max_sess < max_nb_sess)\n-\t\t\t\tmax_nb_sess = slave_info.sym.max_nb_sessions;\n+\t\t\t\tmax_nb_sess = worker_info.sym.max_nb_sessions;\n \t\t}\n \n-\t\t/* Get the max headroom requirement among slave PMDs */\n-\t\theadroom_sz = slave_info.min_mbuf_headroom_req >\n+\t\t/* Get the max headroom requirement among worker PMDs */\n+\t\theadroom_sz = worker_info.min_mbuf_headroom_req >\n \t\t\t\theadroom_sz ?\n-\t\t\t\tslave_info.min_mbuf_headroom_req :\n+\t\t\t\tworker_info.min_mbuf_headroom_req :\n \t\t\t\theadroom_sz;\n \n-\t\t/* Get the max tailroom requirement among slave PMDs */\n-\t\ttailroom_sz = slave_info.min_mbuf_tailroom_req >\n+\t\t/* Get the max tailroom requirement among worker PMDs */\n+\t\ttailroom_sz = worker_info.min_mbuf_tailroom_req >\n \t\t\t\ttailroom_sz ?\n-\t\t\t\tslave_info.min_mbuf_tailroom_req :\n+\t\t\t\tworker_info.min_mbuf_tailroom_req :\n \t\t\t\ttailroom_sz;\n \t}\n \n@@ -409,15 +409,15 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \tif (dev->data->queue_pairs[qp_id] != NULL)\n \t\tscheduler_pmd_qp_release(dev, qp_id);\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_id = sched_ctx->slaves[i].dev_id;\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_id = sched_ctx->workers[i].dev_id;\n \n \t\t/*\n-\t\t * All slaves will share the same session mempool\n+\t\t * All workers will share the same session mempool\n \t\t * for session-less operations, so the objects\n \t\t * must be big enough for all the drivers used.\n \t\t */\n-\t\tret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,\n+\t\tret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,\n \t\t\t\tqp_conf, socket_id);\n \t\tif (ret < 0)\n \t\t\treturn ret;\n@@ -434,12 +434,12 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \n \tdev->data->queue_pairs[qp_id] = qp_ctx;\n \n-\t/* although scheduler_attach_init_slave presents multiple times,\n+\t/* although scheduler_attach_init_worker presents multiple times,\n \t * there will be only 1 meaningful execution.\n \t */\n-\tret = scheduler_attach_init_slave(dev);\n+\tret = scheduler_attach_init_worker(dev);\n \tif (ret < 0) {\n-\t\tCR_SCHED_LOG(ERR, \"Failed to attach slave\");\n+\t\tCR_SCHED_LOG(ERR, \"Failed to attach worker\");\n \t\tscheduler_pmd_qp_release(dev, qp_id);\n \t\treturn ret;\n \t}\n@@ -461,10 +461,10 @@ scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)\n \tuint8_t i = 0;\n \tuint32_t max_priv_sess_size = 0;\n \n-\t/* Check what is the maximum private session size for all slaves */\n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tuint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;\n-\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];\n+\t/* Check what is the maximum private session size for all workers */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tuint8_t worker_dev_id = sched_ctx->workers[i].dev_id;\n+\t\tstruct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];\n \t\tuint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);\n \n \t\tif (max_priv_sess_size < priv_sess_size)\n@@ -484,10 +484,10 @@ scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,\n \tuint32_t i;\n \tint ret;\n \n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tstruct scheduler_slave *slave = &sched_ctx->slaves[i];\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n \n-\t\tret = rte_cryptodev_sym_session_init(slave->dev_id, sess,\n+\t\tret = rte_cryptodev_sym_session_init(worker->dev_id, sess,\n \t\t\t\t\txform, mempool);\n \t\tif (ret < 0) {\n \t\t\tCR_SCHED_LOG(ERR, \"unable to config sym session\");\n@@ -506,11 +506,11 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,\n \tstruct scheduler_ctx *sched_ctx = dev->data->dev_private;\n \tuint32_t i;\n \n-\t/* Clear private data of slaves */\n-\tfor (i = 0; i < sched_ctx->nb_slaves; i++) {\n-\t\tstruct scheduler_slave *slave = &sched_ctx->slaves[i];\n+\t/* Clear private data of workers */\n+\tfor (i = 0; i < sched_ctx->nb_workers; i++) {\n+\t\tstruct scheduler_worker *worker = &sched_ctx->workers[i];\n \n-\t\trte_cryptodev_sym_session_clear(slave->dev_id, sess);\n+\t\trte_cryptodev_sym_session_clear(worker->dev_id, sess);\n \t}\n }\n \ndiff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h\nindex e1531d1da..adb4eb063 100644\n--- a/drivers/crypto/scheduler/scheduler_pmd_private.h\n+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h\n@@ -10,7 +10,7 @@\n #define CRYPTODEV_NAME_SCHEDULER_PMD\tcrypto_scheduler\n /**< Scheduler Crypto PMD device name */\n \n-#define PER_SLAVE_BUFF_SIZE\t\t\t(256)\n+#define PER_WORKER_BUFF_SIZE\t\t\t(256)\n \n extern int scheduler_logtype_driver;\n \n@@ -18,7 +18,7 @@ extern int scheduler_logtype_driver;\n \trte_log(RTE_LOG_ ## level, scheduler_logtype_driver,\t\t\\\n \t\t\t\"%s() line %u: \"fmt \"\\n\", __func__, __LINE__, ##args)\n \n-struct scheduler_slave {\n+struct scheduler_worker {\n \tuint8_t dev_id;\n \tuint16_t qp_id;\n \tuint32_t nb_inflight_cops;\n@@ -35,8 +35,8 @@ struct scheduler_ctx {\n \n \tuint32_t max_nb_queue_pairs;\n \n-\tstruct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];\n-\tuint32_t nb_slaves;\n+\tstruct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\tuint32_t nb_workers;\n \n \tenum rte_cryptodev_scheduler_mode mode;\n \n@@ -49,8 +49,8 @@ struct scheduler_ctx {\n \tuint16_t wc_pool[RTE_MAX_LCORE];\n \tuint16_t nb_wc;\n \n-\tchar *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];\n-\tint nb_init_slaves;\n+\tchar *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\tint nb_init_workers;\n } __rte_cache_aligned;\n \n struct scheduler_qp_ctx {\ndiff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c\nindex 9b891d978..bc4a63210 100644\n--- a/drivers/crypto/scheduler/scheduler_roundrobin.c\n+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c\n@@ -9,11 +9,11 @@\n #include \"scheduler_pmd_private.h\"\n \n struct rr_scheduler_qp_ctx {\n-\tstruct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];\n-\tuint32_t nb_slaves;\n+\tstruct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];\n+\tuint32_t nb_workers;\n \n-\tuint32_t last_enq_slave_idx;\n-\tuint32_t last_deq_slave_idx;\n+\tuint32_t last_enq_worker_idx;\n+\tuint32_t last_deq_worker_idx;\n };\n \n static uint16_t\n@@ -21,8 +21,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct rr_scheduler_qp_ctx *rr_qp_ctx =\n \t\t\t((struct scheduler_qp_ctx *)qp)->private_qp_ctx;\n-\tuint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;\n-\tstruct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];\n+\tuint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx;\n+\tstruct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx];\n \tuint16_t i, processed_ops;\n \n \tif (unlikely(nb_ops == 0))\n@@ -31,13 +31,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tfor (i = 0; i < nb_ops && i < 4; i++)\n \t\trte_prefetch0(ops[i]->sym->session);\n \n-\tprocessed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, ops, nb_ops);\n+\tprocessed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, ops, nb_ops);\n \n-\tslave->nb_inflight_cops += processed_ops;\n+\tworker->nb_inflight_cops += processed_ops;\n \n-\trr_qp_ctx->last_enq_slave_idx += 1;\n-\trr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;\n+\trr_qp_ctx->last_enq_worker_idx += 1;\n+\trr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers;\n \n \treturn processed_ops;\n }\n@@ -64,34 +64,35 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct rr_scheduler_qp_ctx *rr_qp_ctx =\n \t\t\t((struct scheduler_qp_ctx *)qp)->private_qp_ctx;\n-\tstruct scheduler_slave *slave;\n-\tuint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;\n+\tstruct scheduler_worker *worker;\n+\tuint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx;\n \tuint16_t nb_deq_ops;\n \n-\tif (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {\n+\tif (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops\n+\t\t\t== 0)) {\n \t\tdo {\n-\t\t\tlast_slave_idx += 1;\n+\t\t\tlast_worker_idx += 1;\n \n-\t\t\tif (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))\n-\t\t\t\tlast_slave_idx = 0;\n+\t\t\tif (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers))\n+\t\t\t\tlast_worker_idx = 0;\n \t\t\t/* looped back, means no inflight cops in the queue */\n-\t\t\tif (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)\n+\t\t\tif (last_worker_idx == rr_qp_ctx->last_deq_worker_idx)\n \t\t\t\treturn 0;\n-\t\t} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops\n+\t\t} while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops\n \t\t\t\t== 0);\n \t}\n \n-\tslave = &rr_qp_ctx->slaves[last_slave_idx];\n+\tworker = &rr_qp_ctx->workers[last_worker_idx];\n \n-\tnb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,\n-\t\t\tslave->qp_id, ops, nb_ops);\n+\tnb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,\n+\t\t\tworker->qp_id, ops, nb_ops);\n \n-\tlast_slave_idx += 1;\n-\tlast_slave_idx %= rr_qp_ctx->nb_slaves;\n+\tlast_worker_idx += 1;\n+\tlast_worker_idx %= rr_qp_ctx->nb_workers;\n \n-\trr_qp_ctx->last_deq_slave_idx = last_slave_idx;\n+\trr_qp_ctx->last_deq_worker_idx = last_worker_idx;\n \n-\tslave->nb_inflight_cops -= nb_deq_ops;\n+\tworker->nb_inflight_cops -= nb_deq_ops;\n \n \treturn nb_deq_ops;\n }\n@@ -109,15 +110,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,\n }\n \n static int\n-slave_attach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_attach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n \n static int\n-slave_detach(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused uint8_t slave_id)\n+worker_detach(__rte_unused struct rte_cryptodev *dev,\n+\t\t__rte_unused uint8_t worker_id)\n {\n \treturn 0;\n }\n@@ -142,19 +143,19 @@ scheduler_start(struct rte_cryptodev *dev)\n \t\t\t\tqp_ctx->private_qp_ctx;\n \t\tuint32_t j;\n \n-\t\tmemset(rr_qp_ctx->slaves, 0,\n-\t\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *\n-\t\t\t\tsizeof(struct scheduler_slave));\n-\t\tfor (j = 0; j < sched_ctx->nb_slaves; j++) {\n-\t\t\trr_qp_ctx->slaves[j].dev_id =\n-\t\t\t\t\tsched_ctx->slaves[j].dev_id;\n-\t\t\trr_qp_ctx->slaves[j].qp_id = i;\n+\t\tmemset(rr_qp_ctx->workers, 0,\n+\t\t\t\tRTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *\n+\t\t\t\tsizeof(struct scheduler_worker));\n+\t\tfor (j = 0; j < sched_ctx->nb_workers; j++) {\n+\t\t\trr_qp_ctx->workers[j].dev_id =\n+\t\t\t\t\tsched_ctx->workers[j].dev_id;\n+\t\t\trr_qp_ctx->workers[j].qp_id = i;\n \t\t}\n \n-\t\trr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;\n+\t\trr_qp_ctx->nb_workers = sched_ctx->nb_workers;\n \n-\t\trr_qp_ctx->last_enq_slave_idx = 0;\n-\t\trr_qp_ctx->last_deq_slave_idx = 0;\n+\t\trr_qp_ctx->last_enq_worker_idx = 0;\n+\t\trr_qp_ctx->last_deq_worker_idx = 0;\n \t}\n \n \treturn 0;\n@@ -191,8 +192,8 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)\n }\n \n static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {\n-\tslave_attach,\n-\tslave_detach,\n+\tworker_attach,\n+\tworker_detach,\n \tscheduler_start,\n \tscheduler_stop,\n \tscheduler_config_qp,\n@@ -204,7 +205,7 @@ static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {\n static struct rte_cryptodev_scheduler scheduler = {\n \t\t.name = \"roundrobin-scheduler\",\n \t\t.description = \"scheduler which will round robin burst across \"\n-\t\t\t\t\"slave crypto devices\",\n+\t\t\t\t\"worker crypto devices\",\n \t\t.mode = CDEV_SCHED_MODE_ROUNDROBIN,\n \t\t.ops = &scheduler_rr_ops\n };\n",
    "prefixes": []
}