get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/100011/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 100011,
    "url": "http://patches.dpdk.org/api/patches/100011/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210929163035.608387-5-ciara.power@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210929163035.608387-5-ciara.power@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210929163035.608387-5-ciara.power@intel.com",
    "date": "2021-09-29T16:30:29",
    "name": "[v3,04/10] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a26162a2004929030361a942082957d2521cf9c1",
    "submitter": {
        "id": 978,
        "url": "http://patches.dpdk.org/api/people/978/?format=api",
        "name": "Power, Ciara",
        "email": "ciara.power@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210929163035.608387-5-ciara.power@intel.com/mbox/",
    "series": [
        {
            "id": 19269,
            "url": "http://patches.dpdk.org/api/series/19269/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19269",
            "date": "2021-09-29T16:30:25",
            "name": "drivers/crypto: introduce ipsec_mb framework",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/19269/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/100011/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/100011/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D6ADEA0547;\n\tWed, 29 Sep 2021 18:31:11 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5F4764111A;\n\tWed, 29 Sep 2021 18:30:52 +0200 (CEST)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id 495FF4111A\n for <dev@dpdk.org>; Wed, 29 Sep 2021 18:30:50 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 29 Sep 2021 09:30:49 -0700",
            "from silpixa00400355.ir.intel.com (HELO\n silpixa00400355.ger.corp.intel.com) ([10.237.222.87])\n by orsmga002.jf.intel.com with ESMTP; 29 Sep 2021 09:30:47 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10122\"; a=\"285997962\"",
            "E=Sophos;i=\"5.85,332,1624345200\"; d=\"scan'208\";a=\"285997962\"",
            "E=Sophos;i=\"5.85,332,1624345200\"; d=\"scan'208\";a=\"457092763\""
        ],
        "X-ExtLoop1": "1",
        "From": "Ciara Power <ciara.power@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "roy.fan.zhang@intel.com, piotrx.bronowski@intel.com, gakhil@marvell.com,\n Ciara Power <ciara.power@intel.com>, Thomas Monjalon <thomas@monjalon.net>,\n Pablo de Lara <pablo.de.lara.guarch@intel.com>,\n Ray Kinsella <mdr@ashroe.eu>",
        "Date": "Wed, 29 Sep 2021 16:30:29 +0000",
        "Message-Id": "<20210929163035.608387-5-ciara.power@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210929163035.608387-1-ciara.power@intel.com>",
        "References": "<20210727083832.291687-1-roy.fan.zhang@intel.com>\n <20210929163035.608387-1-ciara.power@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm PMD to\n IPsec-mb framework",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Piotr Bronowski <piotrx.bronowski@intel.com>\n\nThis patch removes the crypto/aesni_gcm folder and gathers all\naesni-gcm PMD implementation specific details into a single file,\npmd_aesni_gcm.c in the crypto/ipsec_mb folder.\nA redundant check for iv length is removed.\n\nGCM ops are stored in the queue pair for multi process support, they\nare updated during queue pair setup for both primary and secondary\nprocesses.\n\nGCM ops are also set per lcore for the CPU crypto mode.\n\nSigned-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>\nSigned-off-by: Ciara Power <ciara.power@intel.com>\n\n---\nv3:\n  - Moved session GCM ops to queue pair.\n  - Added GCM ops per lcore.\n  - Fixed some formatting.\nv2:\n  - Fixed enum for GCM key length.\n  - Updated maintainers file.\n---\n MAINTAINERS                                   |    9 +-\n doc/guides/cryptodevs/aesni_gcm.rst           |    4 +-\n drivers/crypto/aesni_gcm/aesni_gcm_ops.h      |  104 --\n drivers/crypto/aesni_gcm/aesni_gcm_pmd.c      |  984 ----------------\n drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c  |  333 ------\n .../crypto/aesni_gcm/aesni_gcm_pmd_private.h  |  123 --\n drivers/crypto/aesni_gcm/meson.build          |   24 -\n drivers/crypto/aesni_gcm/version.map          |    3 -\n drivers/crypto/ipsec_mb/meson.build           |    3 +-\n drivers/crypto/ipsec_mb/pmd_aesni_gcm.c       | 1003 +++++++++++++++++\n .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |    7 +\n drivers/crypto/meson.build                    |    1 -\n 12 files changed, 1017 insertions(+), 1581 deletions(-)\n delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h\n delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c\n delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c\n delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h\n delete mode 100644 drivers/crypto/aesni_gcm/meson.build\n delete mode 100644 drivers/crypto/aesni_gcm/version.map\n create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 7b00cd8791..6247e50687 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -1042,13 +1042,6 @@ M: Fan Zhang <roy.fan.zhang@intel.com>\n F: drivers/crypto/scheduler/\n F: doc/guides/cryptodevs/scheduler.rst\n \n-Intel AES-NI GCM\n-M: Declan Doherty <declan.doherty@intel.com>\n-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>\n-F: drivers/crypto/aesni_gcm/\n-F: doc/guides/cryptodevs/aesni_gcm.rst\n-F: doc/guides/cryptodevs/features/aesni_gcm.ini\n-\n Intel QuickAssist\n M: John Griffin <john.griffin@intel.com>\n M: Fiona Trahe <fiona.trahe@intel.com>\n@@ -1062,7 +1055,9 @@ IPsec MB\n M: Fan Zhang <roy.fan.zhang@intel.com>\n M: Pablo de Lara <pablo.de.lara.guarch@intel.com>\n F: drivers/crypto/ipsec_mb/\n+F: doc/guides/cryptodevs/aesni_gcm.rst\n F: doc/guides/cryptodevs/aesni_mb.rst\n+F: doc/guides/cryptodevs/features/aesni_gcm.ini\n F: doc/guides/cryptodevs/features/aesni_mb.ini\n \n KASUMI\ndiff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst\nindex 11b23958d5..bbe9d99840 100644\n--- a/doc/guides/cryptodevs/aesni_gcm.rst\n+++ b/doc/guides/cryptodevs/aesni_gcm.rst\n@@ -83,7 +83,9 @@ and the external crypto libraries supported by them:\n    17.02 - 17.05  ISA-L Crypto v2.18\n    17.08 - 18.02  Multi-buffer library 0.46 - 0.48\n    18.05 - 19.02  Multi-buffer library 0.49 - 0.52\n-   19.05+         Multi-buffer library 0.52 - 1.0*\n+   19.05 - 20.08  Multi-buffer library 0.52 - 0.55\n+   20.11 - 21.08  Multi-buffer library 0.53 - 1.0*\n+   21.11+         Multi-buffer library 1.0*\n    =============  ================================\n \n \\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.\ndiff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h\ndeleted file mode 100644\nindex 8a0d074b6e..0000000000\n--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h\n+++ /dev/null\n@@ -1,104 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2016-2020 Intel Corporation\n- */\n-\n-#ifndef _AESNI_GCM_OPS_H_\n-#define _AESNI_GCM_OPS_H_\n-\n-#ifndef LINUX\n-#define LINUX\n-#endif\n-\n-#include <intel-ipsec-mb.h>\n-\n-/** Supported vector modes */\n-enum aesni_gcm_vector_mode {\n-\tRTE_AESNI_GCM_NOT_SUPPORTED = 0,\n-\tRTE_AESNI_GCM_SSE,\n-\tRTE_AESNI_GCM_AVX,\n-\tRTE_AESNI_GCM_AVX2,\n-\tRTE_AESNI_GCM_AVX512,\n-\tRTE_AESNI_GCM_VECTOR_NUM\n-};\n-\n-enum aesni_gcm_key {\n-\tGCM_KEY_128 = 0,\n-\tGCM_KEY_192,\n-\tGCM_KEY_256,\n-\tGCM_KEY_NUM\n-};\n-\n-typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data, uint8_t *out,\n-\t\tconst uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,\n-\t\tconst uint8_t *aad, uint64_t aad_len,\n-\t\tuint8_t *auth_tag, uint64_t auth_tag_len);\n-\n-typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data);\n-\n-typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tconst uint8_t *iv,\n-\t\tuint8_t const *aad,\n-\t\tuint64_t aad_len);\n-\n-typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tuint8_t *out,\n-\t\tconst uint8_t *in,\n-\t\tuint64_t plaintext_len);\n-\n-typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tuint8_t *auth_tag,\n-\t\tuint64_t auth_tag_len);\n-\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tconst uint8_t *iv,\n-\t\tconst uint64_t iv_len);\n-\n-typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tconst uint8_t *in,\n-\t\tconst uint64_t plaintext_len);\n-\n-typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,\n-\t\tstruct gcm_context_data *gcm_ctx_data,\n-\t\tuint8_t *auth_tag,\n-\t\tconst uint64_t auth_tag_len);\n-#endif\n-\n-/** GCM library function pointer table */\n-struct aesni_gcm_ops {\n-\taesni_gcm_t enc;        /**< GCM encode function pointer */\n-\taesni_gcm_t dec;        /**< GCM decode function pointer */\n-\taesni_gcm_pre_t pre;    /**< GCM pre-compute */\n-\taesni_gcm_init_t init;\n-\taesni_gcm_update_t update_enc;\n-\taesni_gcm_update_t update_dec;\n-\taesni_gcm_finalize_t finalize_enc;\n-\taesni_gcm_finalize_t finalize_dec;\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\taesni_gmac_init_t gmac_init;\n-\taesni_gmac_update_t gmac_update;\n-\taesni_gmac_finalize_t gmac_finalize;\n-#endif\n-};\n-\n-/** GCM per-session operation handlers */\n-struct aesni_gcm_session_ops {\n-\taesni_gcm_t cipher;\n-\taesni_gcm_pre_t pre;\n-\taesni_gcm_init_t init;\n-\taesni_gcm_update_t update;\n-\taesni_gcm_finalize_t finalize;\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\taesni_gmac_init_t gmac_init;\n-\taesni_gmac_update_t gmac_update;\n-\taesni_gmac_finalize_t gmac_finalize;\n-#endif\n-};\n-\n-#endif /* _AESNI_GCM_OPS_H_ */\ndiff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c\ndeleted file mode 100644\nindex 330aad8157..0000000000\n--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c\n+++ /dev/null\n@@ -1,984 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2016-2020 Intel Corporation\n- */\n-\n-#include <rte_common.h>\n-#include <rte_hexdump.h>\n-#include <rte_cryptodev.h>\n-#include <cryptodev_pmd.h>\n-#include <rte_bus_vdev.h>\n-#include <rte_malloc.h>\n-#include <rte_cpuflags.h>\n-#include <rte_byteorder.h>\n-\n-#include \"aesni_gcm_pmd_private.h\"\n-\n-static uint8_t cryptodev_driver_id;\n-\n-/* setup session handlers */\n-static void\n-set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)\n-{\n-\ts->ops.pre = gcm_ops->pre;\n-\ts->ops.init = gcm_ops->init;\n-\n-\tswitch (s->op) {\n-\tcase AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:\n-\t\ts->ops.cipher = gcm_ops->enc;\n-\t\ts->ops.update = gcm_ops->update_enc;\n-\t\ts->ops.finalize = gcm_ops->finalize_enc;\n-\t\tbreak;\n-\tcase AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:\n-\t\ts->ops.cipher = gcm_ops->dec;\n-\t\ts->ops.update = gcm_ops->update_dec;\n-\t\ts->ops.finalize = gcm_ops->finalize_dec;\n-\t\tbreak;\n-\tcase AESNI_GMAC_OP_GENERATE:\n-\tcase AESNI_GMAC_OP_VERIFY:\n-\t\ts->ops.finalize = gcm_ops->finalize_enc;\n-\t\tbreak;\n-\t}\n-}\n-\n-/** Parse crypto xform chain and set private session parameters */\n-int\n-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,\n-\t\tstruct aesni_gcm_session *sess,\n-\t\tconst struct rte_crypto_sym_xform *xform)\n-{\n-\tconst struct rte_crypto_sym_xform *auth_xform;\n-\tconst struct rte_crypto_sym_xform *aead_xform;\n-\tuint8_t key_length;\n-\tconst uint8_t *key;\n-\n-\t/* AES-GMAC */\n-\tif (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {\n-\t\tauth_xform = xform;\n-\t\tif (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {\n-\t\t\tAESNI_GCM_LOG(ERR, \"Only AES GMAC is supported as an \"\n-\t\t\t\t\"authentication only algorithm\");\n-\t\t\treturn -ENOTSUP;\n-\t\t}\n-\t\t/* Set IV parameters */\n-\t\tsess->iv.offset = auth_xform->auth.iv.offset;\n-\t\tsess->iv.length = auth_xform->auth.iv.length;\n-\n-\t\t/* Select Crypto operation */\n-\t\tif (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)\n-\t\t\tsess->op = AESNI_GMAC_OP_GENERATE;\n-\t\telse\n-\t\t\tsess->op = AESNI_GMAC_OP_VERIFY;\n-\n-\t\tkey_length = auth_xform->auth.key.length;\n-\t\tkey = auth_xform->auth.key.data;\n-\t\tsess->req_digest_length = auth_xform->auth.digest_length;\n-\n-\t/* AES-GCM */\n-\t} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {\n-\t\taead_xform = xform;\n-\n-\t\tif (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {\n-\t\t\tAESNI_GCM_LOG(ERR, \"The only combined operation \"\n-\t\t\t\t\t\t\"supported is AES GCM\");\n-\t\t\treturn -ENOTSUP;\n-\t\t}\n-\n-\t\t/* Set IV parameters */\n-\t\tsess->iv.offset = aead_xform->aead.iv.offset;\n-\t\tsess->iv.length = aead_xform->aead.iv.length;\n-\n-\t\t/* Select Crypto operation */\n-\t\tif (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)\n-\t\t\tsess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;\n-\t\t/* op == RTE_CRYPTO_AEAD_OP_DECRYPT */\n-\t\telse\n-\t\t\tsess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;\n-\n-\t\tkey_length = aead_xform->aead.key.length;\n-\t\tkey = aead_xform->aead.key.data;\n-\n-\t\tsess->aad_length = aead_xform->aead.aad_length;\n-\t\tsess->req_digest_length = aead_xform->aead.digest_length;\n-\t} else {\n-\t\tAESNI_GCM_LOG(ERR, \"Wrong xform type, has to be AEAD or authentication\");\n-\t\treturn -ENOTSUP;\n-\t}\n-\n-\t/* IV check */\n-\tif (sess->iv.length != 16 && sess->iv.length != 12 &&\n-\t\t\tsess->iv.length != 0) {\n-\t\tAESNI_GCM_LOG(ERR, \"Wrong IV length\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* Check key length and calculate GCM pre-compute. */\n-\tswitch (key_length) {\n-\tcase 16:\n-\t\tsess->key = GCM_KEY_128;\n-\t\tbreak;\n-\tcase 24:\n-\t\tsess->key = GCM_KEY_192;\n-\t\tbreak;\n-\tcase 32:\n-\t\tsess->key = GCM_KEY_256;\n-\t\tbreak;\n-\tdefault:\n-\t\tAESNI_GCM_LOG(ERR, \"Invalid key length\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* setup session handlers */\n-\tset_func_ops(sess, &gcm_ops[sess->key]);\n-\n-\t/* pre-generate key */\n-\tgcm_ops[sess->key].pre(key, &sess->gdata_key);\n-\n-\t/* Digest check */\n-\tif (sess->req_digest_length > 16) {\n-\t\tAESNI_GCM_LOG(ERR, \"Invalid digest length\");\n-\t\treturn -EINVAL;\n-\t}\n-\t/*\n-\t * Multi-buffer lib supports digest sizes from 4 to 16 bytes\n-\t * in version 0.50 and sizes of 8, 12 and 16 bytes,\n-\t * in version 0.49.\n-\t * If size requested is different, generate the full digest\n-\t * (16 bytes) in a temporary location and then memcpy\n-\t * the requested number of bytes.\n-\t */\n-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)\n-\tif (sess->req_digest_length < 4)\n-#else\n-\tif (sess->req_digest_length != 16 &&\n-\t\t\tsess->req_digest_length != 12 &&\n-\t\t\tsess->req_digest_length != 8)\n-#endif\n-\t\tsess->gen_digest_length = 16;\n-\telse\n-\t\tsess->gen_digest_length = sess->req_digest_length;\n-\n-\treturn 0;\n-}\n-\n-/** Get gcm session */\n-static struct aesni_gcm_session *\n-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)\n-{\n-\tstruct aesni_gcm_session *sess = NULL;\n-\tstruct rte_crypto_sym_op *sym_op = op->sym;\n-\n-\tif (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n-\t\tif (likely(sym_op->session != NULL))\n-\t\t\tsess = (struct aesni_gcm_session *)\n-\t\t\t\t\tget_sym_session_private_data(\n-\t\t\t\t\tsym_op->session,\n-\t\t\t\t\tcryptodev_driver_id);\n-\t} else  {\n-\t\tvoid *_sess;\n-\t\tvoid *_sess_private_data = NULL;\n-\n-\t\tif (rte_mempool_get(qp->sess_mp, (void **)&_sess))\n-\t\t\treturn NULL;\n-\n-\t\tif (rte_mempool_get(qp->sess_mp_priv,\n-\t\t\t\t(void **)&_sess_private_data))\n-\t\t\treturn NULL;\n-\n-\t\tsess = (struct aesni_gcm_session *)_sess_private_data;\n-\n-\t\tif (unlikely(aesni_gcm_set_session_parameters(qp->ops,\n-\t\t\t\tsess, sym_op->xform) != 0)) {\n-\t\t\trte_mempool_put(qp->sess_mp, _sess);\n-\t\t\trte_mempool_put(qp->sess_mp_priv, _sess_private_data);\n-\t\t\tsess = NULL;\n-\t\t}\n-\t\tsym_op->session = (struct rte_cryptodev_sym_session *)_sess;\n-\t\tset_sym_session_private_data(sym_op->session,\n-\t\t\t\tcryptodev_driver_id, _sess_private_data);\n-\t}\n-\n-\tif (unlikely(sess == NULL))\n-\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;\n-\n-\treturn sess;\n-}\n-\n-/**\n- * Process a crypto operation, calling\n- * the GCM API from the multi buffer library.\n- *\n- * @param\tqp\t\tqueue pair\n- * @param\top\t\tsymmetric crypto operation\n- * @param\tsession\t\tGCM session\n- *\n- * @return\n- *\n- */\n-static int\n-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,\n-\t\tstruct aesni_gcm_session *session)\n-{\n-\tuint8_t *src, *dst;\n-\tuint8_t *iv_ptr;\n-\tstruct rte_crypto_sym_op *sym_op = op->sym;\n-\tstruct rte_mbuf *m_src = sym_op->m_src;\n-\tuint32_t offset, data_offset, data_length;\n-\tuint32_t part_len, total_len, data_len;\n-\tuint8_t *tag;\n-\tunsigned int oop = 0;\n-\n-\tif (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||\n-\t\t\tsession->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {\n-\t\toffset = sym_op->aead.data.offset;\n-\t\tdata_offset = offset;\n-\t\tdata_length = sym_op->aead.data.length;\n-\t} else {\n-\t\toffset = sym_op->auth.data.offset;\n-\t\tdata_offset = offset;\n-\t\tdata_length = sym_op->auth.data.length;\n-\t}\n-\n-\tRTE_ASSERT(m_src != NULL);\n-\n-\twhile (offset >= m_src->data_len && data_length != 0) {\n-\t\toffset -= m_src->data_len;\n-\t\tm_src = m_src->next;\n-\n-\t\tRTE_ASSERT(m_src != NULL);\n-\t}\n-\n-\tsrc = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);\n-\n-\tdata_len = m_src->data_len - offset;\n-\tpart_len = (data_len < data_length) ? data_len :\n-\t\t\tdata_length;\n-\n-\tRTE_ASSERT((sym_op->m_dst == NULL) ||\n-\t\t\t((sym_op->m_dst != NULL) &&\n-\t\t\t\t\trte_pktmbuf_is_contiguous(sym_op->m_dst)));\n-\n-\t/* In-place */\n-\tif (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))\n-\t\tdst = src;\n-\t/* Out-of-place */\n-\telse {\n-\t\toop = 1;\n-\t\t/* Segmented destination buffer is not supported if operation is\n-\t\t * Out-of-place */\n-\t\tRTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));\n-\t\tdst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,\n-\t\t\t\t\tdata_offset);\n-\t}\n-\n-\tiv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,\n-\t\t\t\tsession->iv.offset);\n-\n-\tif (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {\n-\t\tqp->ops[session->key].init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsym_op->aead.aad.data,\n-\t\t\t\t(uint64_t)session->aad_length);\n-\n-\t\tqp->ops[session->key].update_enc(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx, dst, src,\n-\t\t\t\t(uint64_t)part_len);\n-\t\ttotal_len = data_length - part_len;\n-\n-\t\twhile (total_len) {\n-\t\t\tm_src = m_src->next;\n-\n-\t\t\tRTE_ASSERT(m_src != NULL);\n-\n-\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n-\t\t\tif (oop)\n-\t\t\t\tdst += part_len;\n-\t\t\telse\n-\t\t\t\tdst = src;\n-\t\t\tpart_len = (m_src->data_len < total_len) ?\n-\t\t\t\t\tm_src->data_len : total_len;\n-\n-\t\t\tqp->ops[session->key].update_enc(&session->gdata_key,\n-\t\t\t\t\t&qp->gdata_ctx, dst, src,\n-\t\t\t\t\t(uint64_t)part_len);\n-\t\t\ttotal_len -= part_len;\n-\t\t}\n-\n-\t\tif (session->req_digest_length != session->gen_digest_length)\n-\t\t\ttag = qp->temp_digest;\n-\t\telse\n-\t\t\ttag = sym_op->aead.digest.data;\n-\n-\t\tqp->ops[session->key].finalize_enc(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-\t} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {\n-\t\tqp->ops[session->key].init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsym_op->aead.aad.data,\n-\t\t\t\t(uint64_t)session->aad_length);\n-\n-\t\tqp->ops[session->key].update_dec(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx, dst, src,\n-\t\t\t\t(uint64_t)part_len);\n-\t\ttotal_len = data_length - part_len;\n-\n-\t\twhile (total_len) {\n-\t\t\tm_src = m_src->next;\n-\n-\t\t\tRTE_ASSERT(m_src != NULL);\n-\n-\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n-\t\t\tif (oop)\n-\t\t\t\tdst += part_len;\n-\t\t\telse\n-\t\t\t\tdst = src;\n-\t\t\tpart_len = (m_src->data_len < total_len) ?\n-\t\t\t\t\tm_src->data_len : total_len;\n-\n-\t\t\tqp->ops[session->key].update_dec(&session->gdata_key,\n-\t\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\t\tdst, src,\n-\t\t\t\t\t(uint64_t)part_len);\n-\t\t\ttotal_len -= part_len;\n-\t\t}\n-\n-\t\ttag = qp->temp_digest;\n-\t\tqp->ops[session->key].finalize_dec(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\t} else if (session->op == AESNI_GMAC_OP_GENERATE) {\n-\t\tqp->ops[session->key].gmac_init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsession->iv.length);\n-\n-\t\tqp->ops[session->key].gmac_update(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx, src,\n-\t\t\t\t(uint64_t)part_len);\n-\t\ttotal_len = data_length - part_len;\n-\n-\t\twhile (total_len) {\n-\t\t\tm_src = m_src->next;\n-\n-\t\t\tRTE_ASSERT(m_src != NULL);\n-\n-\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n-\t\t\tpart_len = (m_src->data_len < total_len) ?\n-\t\t\t\t\tm_src->data_len : total_len;\n-\n-\t\t\tqp->ops[session->key].gmac_update(&session->gdata_key,\n-\t\t\t\t\t&qp->gdata_ctx, src,\n-\t\t\t\t\t(uint64_t)part_len);\n-\t\t\ttotal_len -= part_len;\n-\t\t}\n-\n-\t\tif (session->req_digest_length != session->gen_digest_length)\n-\t\t\ttag = qp->temp_digest;\n-\t\telse\n-\t\t\ttag = sym_op->auth.digest.data;\n-\n-\t\tqp->ops[session->key].gmac_finalize(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-\t} else { /* AESNI_GMAC_OP_VERIFY */\n-\t\tqp->ops[session->key].gmac_init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsession->iv.length);\n-\n-\t\tqp->ops[session->key].gmac_update(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx, src,\n-\t\t\t\t(uint64_t)part_len);\n-\t\ttotal_len = data_length - part_len;\n-\n-\t\twhile (total_len) {\n-\t\t\tm_src = m_src->next;\n-\n-\t\t\tRTE_ASSERT(m_src != NULL);\n-\n-\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n-\t\t\tpart_len = (m_src->data_len < total_len) ?\n-\t\t\t\t\tm_src->data_len : total_len;\n-\n-\t\t\tqp->ops[session->key].gmac_update(&session->gdata_key,\n-\t\t\t\t\t&qp->gdata_ctx, src,\n-\t\t\t\t\t(uint64_t)part_len);\n-\t\t\ttotal_len -= part_len;\n-\t\t}\n-\n-\t\ttag = qp->temp_digest;\n-\n-\t\tqp->ops[session->key].gmac_finalize(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-\t}\n-#else\n-\t} else if (session->op == AESNI_GMAC_OP_GENERATE) {\n-\t\tqp->ops[session->key].init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsrc,\n-\t\t\t\t(uint64_t)data_length);\n-\t\tif (session->req_digest_length != session->gen_digest_length)\n-\t\t\ttag = qp->temp_digest;\n-\t\telse\n-\t\t\ttag = sym_op->auth.digest.data;\n-\t\tqp->ops[session->key].finalize_enc(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-\t} else { /* AESNI_GMAC_OP_VERIFY */\n-\t\tqp->ops[session->key].init(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\tiv_ptr,\n-\t\t\t\tsrc,\n-\t\t\t\t(uint64_t)data_length);\n-\n-\t\t/*\n-\t\t * Generate always 16 bytes and later compare only\n-\t\t * the bytes passed.\n-\t\t */\n-\t\ttag = qp->temp_digest;\n-\t\tqp->ops[session->key].finalize_enc(&session->gdata_key,\n-\t\t\t\t&qp->gdata_ctx,\n-\t\t\t\ttag,\n-\t\t\t\tsession->gen_digest_length);\n-\t}\n-#endif\n-\n-\treturn 0;\n-}\n-\n-static inline void\n-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)\n-{\n-\tuint32_t i;\n-\n-\tfor (i = 0; i < vec->num; i++)\n-\t\tvec->status[i] = errnum;\n-}\n-\n-\n-static inline int32_t\n-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, uint8_t *digest)\n-{\n-\tif (s->req_digest_length != s->gen_digest_length) {\n-\t\tuint8_t tmpdigest[s->gen_digest_length];\n-\n-\t\ts->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,\n-\t\t\ts->gen_digest_length);\n-\t\tmemcpy(digest, tmpdigest, s->req_digest_length);\n-\t} else {\n-\t\ts->ops.finalize(&s->gdata_key, gdata_ctx, digest,\n-\t\t\ts->gen_digest_length);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static inline int32_t\n-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, uint8_t *digest)\n-{\n-\tuint8_t tmpdigest[s->gen_digest_length];\n-\n-\ts->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,\n-\t\ts->gen_digest_length);\n-\n-\treturn memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :\n-\t\tEBADMSG;\n-}\n-\n-static inline void\n-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,\n-\tvoid *iv, void *aad)\n-{\n-\tuint32_t i;\n-\n-\t/* init crypto operation */\n-\ts->ops.init(&s->gdata_key, gdata_ctx, iv, aad,\n-\t\t(uint64_t)s->aad_length);\n-\n-\t/* update with sgl data */\n-\tfor (i = 0; i < sgl->num; i++) {\n-\t\tstruct rte_crypto_vec *vec = &sgl->vec[i];\n-\n-\t\ts->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,\n-\t\t\tvec->len);\n-\t}\n-}\n-\n-static inline void\n-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,\n-\tvoid *iv)\n-{\n-\ts->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,\n-\t\tsgl->vec[0].len);\n-}\n-\n-static inline uint32_t\n-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)\n-{\n-\tuint32_t i, processed;\n-\n-\tprocessed = 0;\n-\tfor (i = 0; i < vec->num; ++i) {\n-\t\taesni_gcm_process_gcm_sgl_op(s, gdata_ctx,\n-\t\t\t&vec->sgl[i], vec->iv[i].va,\n-\t\t\tvec->aad[i].va);\n-\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,\n-\t\t\tgdata_ctx, vec->digest[i].va);\n-\t\tprocessed += (vec->status[i] == 0);\n-\t}\n-\n-\treturn processed;\n-}\n-\n-static inline uint32_t\n-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)\n-{\n-\tuint32_t i, processed;\n-\n-\tprocessed = 0;\n-\tfor (i = 0; i < vec->num; ++i) {\n-\t\taesni_gcm_process_gcm_sgl_op(s, gdata_ctx,\n-\t\t\t&vec->sgl[i], vec->iv[i].va,\n-\t\t\tvec->aad[i].va);\n-\t\t vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,\n-\t\t\tgdata_ctx, vec->digest[i].va);\n-\t\tprocessed += (vec->status[i] == 0);\n-\t}\n-\n-\treturn processed;\n-}\n-\n-static inline uint32_t\n-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)\n-{\n-\tuint32_t i, processed;\n-\n-\tprocessed = 0;\n-\tfor (i = 0; i < vec->num; ++i) {\n-\t\tif (vec->sgl[i].num != 1) {\n-\t\t\tvec->status[i] = ENOTSUP;\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\taesni_gcm_process_gmac_sgl_op(s, gdata_ctx,\n-\t\t\t&vec->sgl[i], vec->iv[i].va);\n-\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,\n-\t\t\tgdata_ctx, vec->digest[i].va);\n-\t\tprocessed += (vec->status[i] == 0);\n-\t}\n-\n-\treturn processed;\n-}\n-\n-static inline uint32_t\n-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,\n-\tstruct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)\n-{\n-\tuint32_t i, processed;\n-\n-\tprocessed = 0;\n-\tfor (i = 0; i < vec->num; ++i) {\n-\t\tif (vec->sgl[i].num != 1) {\n-\t\t\tvec->status[i] = ENOTSUP;\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\taesni_gcm_process_gmac_sgl_op(s, gdata_ctx,\n-\t\t\t&vec->sgl[i], vec->iv[i].va);\n-\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,\n-\t\t\tgdata_ctx, vec->digest[i].va);\n-\t\tprocessed += (vec->status[i] == 0);\n-\t}\n-\n-\treturn processed;\n-}\n-\n-/** Process CPU crypto bulk operations */\n-uint32_t\n-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,\n-\tstruct rte_cryptodev_sym_session *sess,\n-\t__rte_unused union rte_crypto_sym_ofs ofs,\n-\tstruct rte_crypto_sym_vec *vec)\n-{\n-\tvoid *sess_priv;\n-\tstruct aesni_gcm_session *s;\n-\tstruct gcm_context_data gdata_ctx;\n-\n-\tsess_priv = get_sym_session_private_data(sess, dev->driver_id);\n-\tif (unlikely(sess_priv == NULL)) {\n-\t\taesni_gcm_fill_error_code(vec, EINVAL);\n-\t\treturn 0;\n-\t}\n-\n-\ts = sess_priv;\n-\tswitch (s->op) {\n-\tcase AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:\n-\t\treturn aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);\n-\tcase AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:\n-\t\treturn aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);\n-\tcase AESNI_GMAC_OP_GENERATE:\n-\t\treturn aesni_gmac_sgl_generate(s, &gdata_ctx, vec);\n-\tcase AESNI_GMAC_OP_VERIFY:\n-\t\treturn aesni_gmac_sgl_verify(s, &gdata_ctx, vec);\n-\tdefault:\n-\t\taesni_gcm_fill_error_code(vec, EINVAL);\n-\t\treturn 0;\n-\t}\n-}\n-\n-/**\n- * Process a completed job and return rte_mbuf which job processed\n- *\n- * @param job\tJOB_AES_HMAC job to process\n- *\n- * @return\n- * - Returns processed mbuf which is trimmed of output digest used in\n- * verification of supplied digest in the case of a HASH_CIPHER operation\n- * - Returns NULL on invalid job\n- */\n-static void\n-post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,\n-\t\tstruct rte_crypto_op *op,\n-\t\tstruct aesni_gcm_session *session)\n-{\n-\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n-\n-\t/* Verify digest if required */\n-\tif (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||\n-\t\t\tsession->op == AESNI_GMAC_OP_VERIFY) {\n-\t\tuint8_t *digest;\n-\n-\t\tuint8_t *tag = qp->temp_digest;\n-\n-\t\tif (session->op == AESNI_GMAC_OP_VERIFY)\n-\t\t\tdigest = op->sym->auth.digest.data;\n-\t\telse\n-\t\t\tdigest = op->sym->aead.digest.data;\n-\n-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG\n-\t\trte_hexdump(stdout, \"auth tag (orig):\",\n-\t\t\t\tdigest, session->req_digest_length);\n-\t\trte_hexdump(stdout, \"auth tag (calc):\",\n-\t\t\t\ttag, session->req_digest_length);\n-#endif\n-\n-\t\tif (memcmp(tag, digest,\tsession->req_digest_length) != 0)\n-\t\t\top->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n-\t} else {\n-\t\tif (session->req_digest_length != session->gen_digest_length) {\n-\t\t\tif (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)\n-\t\t\t\tmemcpy(op->sym->aead.digest.data, qp->temp_digest,\n-\t\t\t\t\t\tsession->req_digest_length);\n-\t\t\telse\n-\t\t\t\tmemcpy(op->sym->auth.digest.data, qp->temp_digest,\n-\t\t\t\t\t\tsession->req_digest_length);\n-\t\t}\n-\t}\n-}\n-\n-/**\n- * Process a completed GCM request\n- *\n- * @param qp\t\tQueue Pair to process\n- * @param op\t\tCrypto operation\n- * @param job\t\tJOB_AES_HMAC job\n- *\n- * @return\n- * - Number of processed jobs\n- */\n-static void\n-handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,\n-\t\tstruct rte_crypto_op *op,\n-\t\tstruct aesni_gcm_session *sess)\n-{\n-\tpost_process_gcm_crypto_op(qp, op, sess);\n-\n-\t/* Free session if a session-less crypto op */\n-\tif (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {\n-\t\tmemset(sess, 0, sizeof(struct aesni_gcm_session));\n-\t\tmemset(op->sym->session, 0,\n-\t\t\trte_cryptodev_sym_get_existing_header_session_size(\n-\t\t\t\top->sym->session));\n-\t\trte_mempool_put(qp->sess_mp_priv, sess);\n-\t\trte_mempool_put(qp->sess_mp, op->sym->session);\n-\t\top->sym->session = NULL;\n-\t}\n-}\n-\n-static uint16_t\n-aesni_gcm_pmd_dequeue_burst(void *queue_pair,\n-\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n-{\n-\tstruct aesni_gcm_session *sess;\n-\tstruct aesni_gcm_qp *qp = queue_pair;\n-\n-\tint retval = 0;\n-\tunsigned int i, nb_dequeued;\n-\n-\tnb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,\n-\t\t\t(void **)ops, nb_ops, NULL);\n-\n-\tfor (i = 0; i < nb_dequeued; i++) {\n-\n-\t\tsess = aesni_gcm_get_session(qp, ops[i]);\n-\t\tif (unlikely(sess == NULL)) {\n-\t\t\tops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n-\t\t\tqp->qp_stats.dequeue_err_count++;\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tretval = process_gcm_crypto_op(qp, ops[i], sess);\n-\t\tif (retval < 0) {\n-\t\t\tops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n-\t\t\tqp->qp_stats.dequeue_err_count++;\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\thandle_completed_gcm_crypto_op(qp, ops[i], sess);\n-\t}\n-\n-\tqp->qp_stats.dequeued_count += i;\n-\n-\treturn i;\n-}\n-\n-static uint16_t\n-aesni_gcm_pmd_enqueue_burst(void *queue_pair,\n-\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n-{\n-\tstruct aesni_gcm_qp *qp = queue_pair;\n-\n-\tunsigned int nb_enqueued;\n-\n-\tnb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,\n-\t\t\t(void **)ops, nb_ops, NULL);\n-\tqp->qp_stats.enqueued_count += nb_enqueued;\n-\n-\treturn nb_enqueued;\n-}\n-\n-static int aesni_gcm_remove(struct rte_vdev_device *vdev);\n-\n-static int\n-aesni_gcm_create(const char *name,\n-\t\tstruct rte_vdev_device *vdev,\n-\t\tstruct rte_cryptodev_pmd_init_params *init_params)\n-{\n-\tstruct rte_cryptodev *dev;\n-\tstruct aesni_gcm_private *internals;\n-\tenum aesni_gcm_vector_mode vector_mode;\n-\tMB_MGR *mb_mgr;\n-\n-\tdev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);\n-\tif (dev == NULL) {\n-\t\tAESNI_GCM_LOG(ERR, \"driver %s: create failed\",\n-\t\t\tinit_params->name);\n-\t\treturn -ENODEV;\n-\t}\n-\n-\t/* Check CPU for supported vector instruction set */\n-\tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))\n-\t\tvector_mode = RTE_AESNI_GCM_AVX512;\n-\telse if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))\n-\t\tvector_mode = RTE_AESNI_GCM_AVX2;\n-\telse if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))\n-\t\tvector_mode = RTE_AESNI_GCM_AVX;\n-\telse\n-\t\tvector_mode = RTE_AESNI_GCM_SSE;\n-\n-\tdev->driver_id = cryptodev_driver_id;\n-\tdev->dev_ops = rte_aesni_gcm_pmd_ops;\n-\n-\t/* register rx/tx burst functions for data path */\n-\tdev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;\n-\tdev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;\n-\n-\tdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |\n-\t\t\tRTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |\n-\t\t\tRTE_CRYPTODEV_FF_IN_PLACE_SGL |\n-\t\t\tRTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |\n-\t\t\tRTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |\n-\t\t\tRTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |\n-\t\t\tRTE_CRYPTODEV_FF_SYM_SESSIONLESS;\n-\n-\t/* Check CPU for support for AES instruction set */\n-\tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))\n-\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;\n-\telse\n-\t\tAESNI_GCM_LOG(WARNING, \"AES instructions not supported by CPU\");\n-\n-\tmb_mgr = alloc_mb_mgr(0);\n-\tif (mb_mgr == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tswitch (vector_mode) {\n-\tcase RTE_AESNI_GCM_SSE:\n-\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;\n-\t\tinit_mb_mgr_sse(mb_mgr);\n-\t\tbreak;\n-\tcase RTE_AESNI_GCM_AVX:\n-\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;\n-\t\tinit_mb_mgr_avx(mb_mgr);\n-\t\tbreak;\n-\tcase RTE_AESNI_GCM_AVX2:\n-\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;\n-\t\tinit_mb_mgr_avx2(mb_mgr);\n-\t\tbreak;\n-\tcase RTE_AESNI_GCM_AVX512:\n-\t\tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_VAES)) {\n-\t\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;\n-\t\t\tinit_mb_mgr_avx512(mb_mgr);\n-\t\t} else {\n-\t\t\tdev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;\n-\t\t\tinit_mb_mgr_avx2(mb_mgr);\n-\t\t\tvector_mode = RTE_AESNI_GCM_AVX2;\n-\t\t}\n-\t\tbreak;\n-\tdefault:\n-\t\tAESNI_GCM_LOG(ERR, \"Unsupported vector mode %u\\n\", vector_mode);\n-\t\tgoto error_exit;\n-\t}\n-\n-\tinternals = dev->data->dev_private;\n-\n-\tinternals->vector_mode = vector_mode;\n-\tinternals->mb_mgr = mb_mgr;\n-\n-\t/* Set arch independent function pointers, based on key size */\n-\tinternals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;\n-\tinternals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;\n-\tinternals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;\n-\tinternals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;\n-\tinternals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;\n-\tinternals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;\n-\tinternals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;\n-\tinternals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\tinternals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;\n-\tinternals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;\n-\tinternals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;\n-#endif\n-\n-\tinternals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;\n-\tinternals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;\n-\tinternals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;\n-\tinternals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;\n-\tinternals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;\n-\tinternals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;\n-\tinternals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;\n-\tinternals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\tinternals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;\n-\tinternals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;\n-\tinternals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;\n-#endif\n-\n-\tinternals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;\n-\tinternals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;\n-\tinternals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;\n-\tinternals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;\n-\tinternals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;\n-\tinternals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;\n-\tinternals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;\n-\tinternals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;\n-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM\n-\tinternals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;\n-\tinternals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;\n-\tinternals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;\n-#endif\n-\n-\tinternals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;\n-\n-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)\n-\tAESNI_GCM_LOG(INFO, \"IPSec Multi-buffer library version used: %s\\n\",\n-\t\t\timb_get_version_str());\n-#else\n-\tAESNI_GCM_LOG(INFO, \"IPSec Multi-buffer library version used: 0.49.0\\n\");\n-#endif\n-\n-\treturn 0;\n-\n-error_exit:\n-\tif (mb_mgr)\n-\t\tfree_mb_mgr(mb_mgr);\n-\n-\trte_cryptodev_pmd_destroy(dev);\n-\n-\treturn -1;\n-}\n-\n-static int\n-aesni_gcm_probe(struct rte_vdev_device *vdev)\n-{\n-\tstruct rte_cryptodev_pmd_init_params init_params = {\n-\t\t\"\",\n-\t\tsizeof(struct aesni_gcm_private),\n-\t\trte_socket_id(),\n-\t\tRTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS\n-\t};\n-\tconst char *name;\n-\tconst char *input_args;\n-\n-\tname = rte_vdev_device_name(vdev);\n-\tif (name == NULL)\n-\t\treturn -EINVAL;\n-\tinput_args = rte_vdev_device_args(vdev);\n-\trte_cryptodev_pmd_parse_input_args(&init_params, input_args);\n-\n-\treturn aesni_gcm_create(name, vdev, &init_params);\n-}\n-\n-static int\n-aesni_gcm_remove(struct rte_vdev_device *vdev)\n-{\n-\tstruct rte_cryptodev *cryptodev;\n-\tstruct aesni_gcm_private *internals;\n-\tconst char *name;\n-\n-\tname = rte_vdev_device_name(vdev);\n-\tif (name == NULL)\n-\t\treturn -EINVAL;\n-\n-\tcryptodev = rte_cryptodev_pmd_get_named_dev(name);\n-\tif (cryptodev == NULL)\n-\t\treturn -ENODEV;\n-\n-\tinternals = cryptodev->data->dev_private;\n-\n-\tfree_mb_mgr(internals->mb_mgr);\n-\n-\treturn rte_cryptodev_pmd_destroy(cryptodev);\n-}\n-\n-static struct rte_vdev_driver aesni_gcm_pmd_drv = {\n-\t.probe = aesni_gcm_probe,\n-\t.remove = aesni_gcm_remove\n-};\n-\n-static struct cryptodev_driver aesni_gcm_crypto_drv;\n-\n-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);\n-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);\n-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,\n-\t\"max_nb_queue_pairs=<int> \"\n-\t\"socket_id=<int>\");\n-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,\n-\t\tcryptodev_driver_id);\n-RTE_LOG_REGISTER_DEFAULT(aesni_gcm_logtype_driver, NOTICE);\ndiff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c\ndeleted file mode 100644\nindex edb7275e76..0000000000\n--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c\n+++ /dev/null\n@@ -1,333 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2016-2020 Intel Corporation\n- */\n-\n-#include <string.h>\n-\n-#include <rte_common.h>\n-#include <rte_malloc.h>\n-#include <cryptodev_pmd.h>\n-\n-#include \"aesni_gcm_pmd_private.h\"\n-\n-static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {\n-\t{\t/* AES GMAC (AUTH) */\n-\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n-\t\t{.sym = {\n-\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,\n-\t\t\t{.auth = {\n-\t\t\t\t.algo = RTE_CRYPTO_AUTH_AES_GMAC,\n-\t\t\t\t.block_size = 16,\n-\t\t\t\t.key_size = {\n-\t\t\t\t\t.min = 16,\n-\t\t\t\t\t.max = 32,\n-\t\t\t\t\t.increment = 8\n-\t\t\t\t},\n-\t\t\t\t.digest_size = {\n-\t\t\t\t\t.min = 1,\n-\t\t\t\t\t.max = 16,\n-\t\t\t\t\t.increment = 1\n-\t\t\t\t},\n-\t\t\t\t.iv_size = {\n-\t\t\t\t\t.min = 12,\n-\t\t\t\t\t.max = 12,\n-\t\t\t\t\t.increment = 0\n-\t\t\t\t}\n-\t\t\t}, }\n-\t\t}, }\n-\t},\n-\t{\t/* AES GCM */\n-\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n-\t\t{.sym = {\n-\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,\n-\t\t\t{.aead = {\n-\t\t\t\t.algo = RTE_CRYPTO_AEAD_AES_GCM,\n-\t\t\t\t.block_size = 16,\n-\t\t\t\t.key_size = {\n-\t\t\t\t\t.min = 16,\n-\t\t\t\t\t.max = 32,\n-\t\t\t\t\t.increment = 8\n-\t\t\t\t},\n-\t\t\t\t.digest_size = {\n-\t\t\t\t\t.min = 1,\n-\t\t\t\t\t.max = 16,\n-\t\t\t\t\t.increment = 1\n-\t\t\t\t},\n-\t\t\t\t.aad_size = {\n-\t\t\t\t\t.min = 0,\n-\t\t\t\t\t.max = 65535,\n-\t\t\t\t\t.increment = 1\n-\t\t\t\t},\n-\t\t\t\t.iv_size = {\n-\t\t\t\t\t.min = 12,\n-\t\t\t\t\t.max = 12,\n-\t\t\t\t\t.increment = 0\n-\t\t\t\t}\n-\t\t\t}, }\n-\t\t}, }\n-\t},\n-\tRTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()\n-};\n-\n-/** Configure device */\n-static int\n-aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,\n-\t\t__rte_unused struct rte_cryptodev_config *config)\n-{\n-\treturn 0;\n-}\n-\n-/** Start device */\n-static int\n-aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)\n-{\n-\treturn 0;\n-}\n-\n-/** Stop device */\n-static void\n-aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)\n-{\n-}\n-\n-/** Close device */\n-static int\n-aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)\n-{\n-\treturn 0;\n-}\n-\n-\n-/** Get device statistics */\n-static void\n-aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,\n-\t\tstruct rte_cryptodev_stats *stats)\n-{\n-\tint qp_id;\n-\n-\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n-\t\tstruct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];\n-\n-\t\tstats->enqueued_count += qp->qp_stats.enqueued_count;\n-\t\tstats->dequeued_count += qp->qp_stats.dequeued_count;\n-\n-\t\tstats->enqueue_err_count += qp->qp_stats.enqueue_err_count;\n-\t\tstats->dequeue_err_count += qp->qp_stats.dequeue_err_count;\n-\t}\n-}\n-\n-/** Reset device statistics */\n-static void\n-aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)\n-{\n-\tint qp_id;\n-\n-\tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n-\t\tstruct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];\n-\n-\t\tmemset(&qp->qp_stats, 0, sizeof(qp->qp_stats));\n-\t}\n-}\n-\n-\n-/** Get device info */\n-static void\n-aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,\n-\t\tstruct rte_cryptodev_info *dev_info)\n-{\n-\tstruct aesni_gcm_private *internals = dev->data->dev_private;\n-\n-\tif (dev_info != NULL) {\n-\t\tdev_info->driver_id = dev->driver_id;\n-\t\tdev_info->feature_flags = dev->feature_flags;\n-\t\tdev_info->capabilities = aesni_gcm_pmd_capabilities;\n-\n-\t\tdev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;\n-\t\t/* No limit of number of sessions */\n-\t\tdev_info->sym.max_nb_sessions = 0;\n-\t}\n-}\n-\n-/** Release queue pair */\n-static int\n-aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)\n-{\n-\tif (dev->data->queue_pairs[qp_id] != NULL) {\n-\t\tstruct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];\n-\n-\t\tif (qp->processed_pkts)\n-\t\t\trte_ring_free(qp->processed_pkts);\n-\n-\t\trte_free(dev->data->queue_pairs[qp_id]);\n-\t\tdev->data->queue_pairs[qp_id] = NULL;\n-\t}\n-\treturn 0;\n-}\n-\n-/** set a unique name for the queue pair based on it's name, dev_id and qp_id */\n-static int\n-aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,\n-\t\tstruct aesni_gcm_qp *qp)\n-{\n-\tunsigned n = snprintf(qp->name, sizeof(qp->name),\n-\t\t\t\"aesni_gcm_pmd_%u_qp_%u\",\n-\t\t\tdev->data->dev_id, qp->id);\n-\n-\tif (n >= sizeof(qp->name))\n-\t\treturn -1;\n-\n-\treturn 0;\n-}\n-\n-/** Create a ring to place process packets on */\n-static struct rte_ring *\n-aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,\n-\t\tunsigned ring_size, int socket_id)\n-{\n-\tstruct rte_ring *r;\n-\n-\tr = rte_ring_lookup(qp->name);\n-\tif (r) {\n-\t\tif (rte_ring_get_size(r) >= ring_size) {\n-\t\t\tAESNI_GCM_LOG(INFO, \"Reusing existing ring %s for processed\"\n-\t\t\t\t\" packets\", qp->name);\n-\t\t\treturn r;\n-\t\t}\n-\t\tAESNI_GCM_LOG(ERR, \"Unable to reuse existing ring %s for processed\"\n-\t\t\t\t\" packets\", qp->name);\n-\t\treturn NULL;\n-\t}\n-\n-\treturn rte_ring_create(qp->name, ring_size, socket_id,\n-\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ);\n-}\n-\n-/** Setup a queue pair */\n-static int\n-aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n-\t\tconst struct rte_cryptodev_qp_conf *qp_conf,\n-\t\tint socket_id)\n-{\n-\tstruct aesni_gcm_qp *qp = NULL;\n-\tstruct aesni_gcm_private *internals = dev->data->dev_private;\n-\n-\t/* Free memory prior to re-allocation if needed. */\n-\tif (dev->data->queue_pairs[qp_id] != NULL)\n-\t\taesni_gcm_pmd_qp_release(dev, qp_id);\n-\n-\t/* Allocate the queue pair data structure. */\n-\tqp = rte_zmalloc_socket(\"AES-NI PMD Queue Pair\", sizeof(*qp),\n-\t\t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n-\tif (qp == NULL)\n-\t\treturn (-ENOMEM);\n-\n-\tqp->id = qp_id;\n-\tdev->data->queue_pairs[qp_id] = qp;\n-\n-\tif (aesni_gcm_pmd_qp_set_unique_name(dev, qp))\n-\t\tgoto qp_setup_cleanup;\n-\n-\tqp->ops = (const struct aesni_gcm_ops *)internals->ops;\n-\n-\tqp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,\n-\t\t\tqp_conf->nb_descriptors, socket_id);\n-\tif (qp->processed_pkts == NULL)\n-\t\tgoto qp_setup_cleanup;\n-\n-\tqp->sess_mp = qp_conf->mp_session;\n-\tqp->sess_mp_priv = qp_conf->mp_session_private;\n-\n-\tmemset(&qp->qp_stats, 0, sizeof(qp->qp_stats));\n-\n-\treturn 0;\n-\n-qp_setup_cleanup:\n-\tif (qp)\n-\t\trte_free(qp);\n-\n-\treturn -1;\n-}\n-\n-/** Returns the size of the aesni gcm session structure */\n-static unsigned\n-aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)\n-{\n-\treturn sizeof(struct aesni_gcm_session);\n-}\n-\n-/** Configure a aesni gcm session from a crypto xform chain */\n-static int\n-aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,\n-\t\tstruct rte_crypto_sym_xform *xform,\n-\t\tstruct rte_cryptodev_sym_session *sess,\n-\t\tstruct rte_mempool *mempool)\n-{\n-\tvoid *sess_private_data;\n-\tint ret;\n-\tstruct aesni_gcm_private *internals = dev->data->dev_private;\n-\n-\tif (unlikely(sess == NULL)) {\n-\t\tAESNI_GCM_LOG(ERR, \"invalid session struct\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (rte_mempool_get(mempool, &sess_private_data)) {\n-\t\tAESNI_GCM_LOG(ERR,\n-\t\t\t\t\"Couldn't get object from session mempool\");\n-\t\treturn -ENOMEM;\n-\t}\n-\tret = aesni_gcm_set_session_parameters(internals->ops,\n-\t\t\t\tsess_private_data, xform);\n-\tif (ret != 0) {\n-\t\tAESNI_GCM_LOG(ERR, \"failed configure session parameters\");\n-\n-\t\t/* Return session to mempool */\n-\t\trte_mempool_put(mempool, sess_private_data);\n-\t\treturn ret;\n-\t}\n-\n-\tset_sym_session_private_data(sess, dev->driver_id,\n-\t\t\tsess_private_data);\n-\n-\treturn 0;\n-}\n-\n-/** Clear the memory of session so it doesn't leave key material behind */\n-static void\n-aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,\n-\t\tstruct rte_cryptodev_sym_session *sess)\n-{\n-\tuint8_t index = dev->driver_id;\n-\tvoid *sess_priv = get_sym_session_private_data(sess, index);\n-\n-\t/* Zero out the whole structure */\n-\tif (sess_priv) {\n-\t\tmemset(sess_priv, 0, sizeof(struct aesni_gcm_session));\n-\t\tstruct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);\n-\t\tset_sym_session_private_data(sess, index, NULL);\n-\t\trte_mempool_put(sess_mp, sess_priv);\n-\t}\n-}\n-\n-struct rte_cryptodev_ops aesni_gcm_pmd_ops = {\n-\t\t.dev_configure\t\t= aesni_gcm_pmd_config,\n-\t\t.dev_start\t\t= aesni_gcm_pmd_start,\n-\t\t.dev_stop\t\t= aesni_gcm_pmd_stop,\n-\t\t.dev_close\t\t= aesni_gcm_pmd_close,\n-\n-\t\t.stats_get\t\t= aesni_gcm_pmd_stats_get,\n-\t\t.stats_reset\t\t= aesni_gcm_pmd_stats_reset,\n-\n-\t\t.dev_infos_get\t\t= aesni_gcm_pmd_info_get,\n-\n-\t\t.queue_pair_setup\t= aesni_gcm_pmd_qp_setup,\n-\t\t.queue_pair_release\t= aesni_gcm_pmd_qp_release,\n-\n-\t\t.sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,\n-\n-\t\t.sym_session_get_size\t= aesni_gcm_pmd_sym_session_get_size,\n-\t\t.sym_session_configure\t= aesni_gcm_pmd_sym_session_configure,\n-\t\t.sym_session_clear\t= aesni_gcm_pmd_sym_session_clear\n-};\n-\n-struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;\ndiff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h\ndeleted file mode 100644\nindex 2763d1c492..0000000000\n--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h\n+++ /dev/null\n@@ -1,123 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2016-2020 Intel Corporation\n- */\n-\n-#ifndef _AESNI_GCM_PMD_PRIVATE_H_\n-#define _AESNI_GCM_PMD_PRIVATE_H_\n-\n-#include \"aesni_gcm_ops.h\"\n-\n-/*\n- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,\n- * so if macro is not defined, it means that the version is 0.49.\n- */\n-#if !defined(IMB_VERSION_NUM)\n-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))\n-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)\n-#endif\n-\n-#define CRYPTODEV_NAME_AESNI_GCM_PMD\tcrypto_aesni_gcm\n-/**< AES-NI GCM PMD device name */\n-\n-/** AES-NI GCM PMD  LOGTYPE DRIVER */\n-extern int aesni_gcm_logtype_driver;\n-#define AESNI_GCM_LOG(level, fmt, ...) \\\n-\trte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver,\t\\\n-\t\t\t\"%s() line %u: \"fmt \"\\n\", __func__, __LINE__,\t\\\n-\t\t\t\t\t## __VA_ARGS__)\n-\n-/* Maximum length for digest */\n-#define DIGEST_LENGTH_MAX 16\n-\n-/** private data structure for each virtual AESNI GCM device */\n-struct aesni_gcm_private {\n-\tenum aesni_gcm_vector_mode vector_mode;\n-\t/**< Vector mode */\n-\tunsigned max_nb_queue_pairs;\n-\t/**< Max number of queue pairs supported by device */\n-\tMB_MGR *mb_mgr;\n-\t/**< Multi-buffer instance */\n-\tstruct aesni_gcm_ops ops[GCM_KEY_NUM];\n-\t/**< Function pointer table of the gcm APIs */\n-};\n-\n-struct aesni_gcm_qp {\n-\tconst struct aesni_gcm_ops *ops;\n-\t/**< Function pointer table of the gcm APIs */\n-\tstruct rte_ring *processed_pkts;\n-\t/**< Ring for placing process packets */\n-\tstruct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */\n-\t/**< GCM parameters */\n-\tstruct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */\n-\t/**< Queue pair statistics */\n-\tstruct rte_mempool *sess_mp;\n-\t/**< Session Mempool */\n-\tstruct rte_mempool *sess_mp_priv;\n-\t/**< Session Private Data Mempool */\n-\tuint16_t id;\n-\t/**< Queue Pair Identifier */\n-\tchar name[RTE_CRYPTODEV_NAME_MAX_LEN];\n-\t/**< Unique Queue Pair Name */\n-\tuint8_t temp_digest[DIGEST_LENGTH_MAX];\n-\t/**< Buffer used to store the digest generated\n-\t * by the driver when verifying a digest provided\n-\t * by the user (using authentication verify operation)\n-\t */\n-} __rte_cache_aligned;\n-\n-\n-enum aesni_gcm_operation {\n-\tAESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,\n-\tAESNI_GCM_OP_AUTHENTICATED_DECRYPTION,\n-\tAESNI_GMAC_OP_GENERATE,\n-\tAESNI_GMAC_OP_VERIFY\n-};\n-\n-/** AESNI GCM private session structure */\n-struct aesni_gcm_session {\n-\tstruct {\n-\t\tuint16_t length;\n-\t\tuint16_t offset;\n-\t} iv;\n-\t/**< IV parameters */\n-\tuint16_t aad_length;\n-\t/**< AAD length */\n-\tuint16_t req_digest_length;\n-\t/**< Requested digest length */\n-\tuint16_t gen_digest_length;\n-\t/**< Generated digest length */\n-\tenum aesni_gcm_operation op;\n-\t/**< GCM operation type */\n-\tenum aesni_gcm_key key;\n-\t/**< GCM key type */\n-\tstruct gcm_key_data gdata_key;\n-\t/**< GCM parameters */\n-\tstruct aesni_gcm_session_ops ops;\n-\t/**< Session handlers */\n-};\n-\n-\n-/**\n- * Setup GCM session parameters\n- * @param\tsess\taesni gcm session structure\n- * @param\txform\tcrypto transform chain\n- *\n- * @return\n- * - On success returns 0\n- * - On failure returns error code < 0\n- */\n-extern int\n-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,\n-\t\tstruct aesni_gcm_session *sess,\n-\t\tconst struct rte_crypto_sym_xform *xform);\n-\n-/* Device specific operations function pointer structure */\n-extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;\n-\n-/** CPU crypto bulk process handler */\n-uint32_t\n-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,\n-\tstruct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,\n-\tstruct rte_crypto_sym_vec *vec);\n-\n-#endif /* _AESNI_GCM_PMD_PRIVATE_H_ */\ndiff --git a/drivers/crypto/aesni_gcm/meson.build b/drivers/crypto/aesni_gcm/meson.build\ndeleted file mode 100644\nindex 0fcac2a8eb..0000000000\n--- a/drivers/crypto/aesni_gcm/meson.build\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-# SPDX-License-Identifier: BSD-3-Clause\n-# Copyright(c) 2018 Intel Corporation\n-\n-IMB_required_ver = '0.52.0'\n-lib = cc.find_library('IPSec_MB', required: false)\n-if not lib.found()\n-    build = false\n-    reason = 'missing dependency, \"libIPSec_MB\"'\n-else\n-    ext_deps += lib\n-\n-    # version comes with quotes, so we split based on \" and take the middle\n-    imb_ver = cc.get_define('IMB_VERSION_STR',\n-        prefix : '#include<intel-ipsec-mb.h>').split('\"')[1]\n-\n-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))\n-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(\n-                IMB_required_ver, imb_ver)\n-        build = false\n-    endif\n-endif\n-\n-sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c')\n-deps += ['bus_vdev']\ndiff --git a/drivers/crypto/aesni_gcm/version.map b/drivers/crypto/aesni_gcm/version.map\ndeleted file mode 100644\nindex c2e0723b4c..0000000000\n--- a/drivers/crypto/aesni_gcm/version.map\n+++ /dev/null\n@@ -1,3 +0,0 @@\n-DPDK_22 {\n-\tlocal: *;\n-};\ndiff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build\nindex bac5d85e26..8550eaee9a 100644\n--- a/drivers/crypto/ipsec_mb/meson.build\n+++ b/drivers/crypto/ipsec_mb/meson.build\n@@ -23,6 +23,7 @@ endif\n \n sources = files('rte_ipsec_mb_pmd.c',\n \t\t'rte_ipsec_mb_pmd_ops.c',\n-\t\t'pmd_aesni_mb.c'\n+\t\t'pmd_aesni_mb.c',\n+\t\t'pmd_aesni_gcm.c'\n \t\t)\n deps += ['bus_vdev', 'net', 'security']\ndiff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c\nnew file mode 100644\nindex 0000000000..2fcfa97a63\n--- /dev/null\n+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c\n@@ -0,0 +1,1003 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2015-2021 Intel Corporation\n+ */\n+\n+#include <intel-ipsec-mb.h>\n+\n+#if defined(RTE_LIB_SECURITY)\n+#define AESNI_MB_DOCSIS_SEC_ENABLED 1\n+#include <rte_ether.h>\n+#include <rte_security.h>\n+#include <rte_security_driver.h>\n+#endif\n+\n+#include \"rte_ipsec_mb_pmd_private.h\"\n+\n+#define AESNI_GCM_IV_LENGTH 12\n+\n+static const struct rte_cryptodev_capabilities aesni_gcm_capabilities[] = {\n+\t{\t/* AES GMAC (AUTH) */\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t\t{.sym = {\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,\n+\t\t\t{.auth = {\n+\t\t\t\t.algo = RTE_CRYPTO_AUTH_AES_GMAC,\n+\t\t\t\t.block_size = 16,\n+\t\t\t\t.key_size = {\n+\t\t\t\t\t.min = 16,\n+\t\t\t\t\t.max = 32,\n+\t\t\t\t\t.increment = 8\n+\t\t\t\t},\n+\t\t\t\t.digest_size = {\n+\t\t\t\t\t.min = 1,\n+\t\t\t\t\t.max = 16,\n+\t\t\t\t\t.increment = 1\n+\t\t\t\t},\n+\t\t\t\t.iv_size = {\n+\t\t\t\t\t.min = AESNI_GCM_IV_LENGTH,\n+\t\t\t\t\t.max = AESNI_GCM_IV_LENGTH,\n+\t\t\t\t\t.increment = 0\n+\t\t\t\t}\n+\t\t\t}, }\n+\t\t}, }\n+\t},\n+\t{\t/* AES GCM */\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t\t{.sym = {\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,\n+\t\t\t{.aead = {\n+\t\t\t\t.algo = RTE_CRYPTO_AEAD_AES_GCM,\n+\t\t\t\t.block_size = 16,\n+\t\t\t\t.key_size = {\n+\t\t\t\t\t.min = 16,\n+\t\t\t\t\t.max = 32,\n+\t\t\t\t\t.increment = 8\n+\t\t\t\t},\n+\t\t\t\t.digest_size = {\n+\t\t\t\t\t.min = 1,\n+\t\t\t\t\t.max = 16,\n+\t\t\t\t\t.increment = 1\n+\t\t\t\t},\n+\t\t\t\t.aad_size = {\n+\t\t\t\t\t.min = 0,\n+\t\t\t\t\t.max = 65535,\n+\t\t\t\t\t.increment = 1\n+\t\t\t\t},\n+\t\t\t\t.iv_size = {\n+\t\t\t\t\t.min = AESNI_GCM_IV_LENGTH,\n+\t\t\t\t\t.max = AESNI_GCM_IV_LENGTH,\n+\t\t\t\t\t.increment = 0\n+\t\t\t\t}\n+\t\t\t}, }\n+\t\t}, }\n+\t},\n+\tRTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()\n+};\n+\n+uint8_t pmd_driver_id_aesni_gcm;\n+\n+enum aesni_gcm_key_length {\n+\tGCM_KEY_128 = 0,\n+\tGCM_KEY_192,\n+\tGCM_KEY_256,\n+\tGCM_NUM_KEY_TYPES\n+};\n+\n+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t    struct gcm_context_data *gcm_ctx_data,\n+\t\t\t    uint8_t *out, const uint8_t *in,\n+\t\t\t    uint64_t plaintext_len, const uint8_t *iv,\n+\t\t\t    const uint8_t *aad, uint64_t aad_len,\n+\t\t\t    uint8_t *auth_tag, uint64_t auth_tag_len);\n+\n+typedef void (*aesni_gcm_pre_t)(const void *key,\n+\t\t\t\tstruct gcm_key_data *gcm_data);\n+\n+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t const uint8_t *iv, uint8_t const *aad,\n+\t\t\t\t uint64_t aad_len);\n+\n+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t   struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t   uint8_t *out, const uint8_t *in,\n+\t\t\t\t   uint64_t plaintext_len);\n+\n+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t     struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t     uint8_t *auth_tag, uint64_t auth_tag_len);\n+\n+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t  struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t  const uint8_t *iv, const uint64_t iv_len);\n+\n+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t    struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t    const uint8_t *in,\n+\t\t\t\t    const uint64_t plaintext_len);\n+\n+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,\n+\t\t\t\t      struct gcm_context_data *gcm_ctx_data,\n+\t\t\t\t      uint8_t *auth_tag,\n+\t\t\t\t      const uint64_t auth_tag_len);\n+\n+/** GCM operation handlers */\n+struct aesni_gcm_ops {\n+\taesni_gcm_t enc;\n+\taesni_gcm_t dec;\n+\taesni_gcm_pre_t pre;\n+\taesni_gcm_init_t init;\n+\taesni_gcm_update_t update_enc;\n+\taesni_gcm_update_t update_dec;\n+\taesni_gcm_finalize_t finalize_enc;\n+\taesni_gcm_finalize_t finalize_dec;\n+\taesni_gmac_init_t gmac_init;\n+\taesni_gmac_update_t gmac_update;\n+\taesni_gmac_finalize_t gmac_finalize;\n+};\n+\n+RTE_DEFINE_PER_LCORE(struct aesni_gcm_ops[GCM_NUM_KEY_TYPES], gcm_ops);\n+\n+struct aesni_gcm_qp_data {\n+\tstruct gcm_context_data gcm_ctx_data;\n+\tuint8_t temp_digest[DIGEST_LENGTH_MAX];\n+\t/* *< Buffers used to store the digest generated\n+\t * by the driver when verifying a digest provided\n+\t * by the user (using authentication verify operation)\n+\t */\n+\tstruct aesni_gcm_ops ops[GCM_NUM_KEY_TYPES];\n+\t/**< Operation Handlers */\n+};\n+\n+/** AESNI GCM private session structure */\n+struct aesni_gcm_session {\n+\tstruct {\n+\t\tuint16_t length;\n+\t\tuint16_t offset;\n+\t} iv;\n+\t/**< IV parameters */\n+\tuint16_t aad_length;\n+\t/**< AAD length */\n+\tuint16_t req_digest_length;\n+\t/**< Requested digest length */\n+\tuint16_t gen_digest_length;\n+\t/**< Generated digest length */\n+\tenum ipsec_mb_operation op;\n+\t/**< GCM operation type */\n+\tstruct gcm_key_data gdata_key;\n+\t/**< GCM parameters */\n+\tenum aesni_gcm_key_length key_length;\n+\t/** Key Length */\n+};\n+\n+static void\n+aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)\n+{\n+\t/* Set 128 bit function pointers. */\n+\tops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;\n+\tops[GCM_KEY_128].init = mb_mgr->gcm128_init;\n+\n+\tops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;\n+\tops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;\n+\tops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;\n+\n+\tops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;\n+\tops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;\n+\tops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;\n+\n+\tops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;\n+\tops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;\n+\tops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;\n+\n+\t/* Set 192 bit function pointers. */\n+\tops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;\n+\tops[GCM_KEY_192].init = mb_mgr->gcm192_init;\n+\n+\tops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;\n+\tops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;\n+\tops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;\n+\n+\tops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;\n+\tops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;\n+\tops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;\n+\n+\tops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;\n+\tops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;\n+\tops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;\n+\n+\t/* Set 256 bit function pointers. */\n+\tops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;\n+\tops[GCM_KEY_256].init = mb_mgr->gcm256_init;\n+\n+\tops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;\n+\tops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;\n+\tops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;\n+\n+\tops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;\n+\tops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;\n+\tops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;\n+\n+\tops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;\n+\tops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;\n+\tops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;\n+}\n+\n+static int\n+aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,\n+\t\t\t    const struct rte_crypto_sym_xform *xform)\n+{\n+\tstruct aesni_gcm_session *sess = session;\n+\tconst struct rte_crypto_sym_xform *auth_xform;\n+\tconst struct rte_crypto_sym_xform *cipher_xform;\n+\tconst struct rte_crypto_sym_xform *aead_xform;\n+\n+\tuint8_t key_length;\n+\tconst uint8_t *key;\n+\tenum ipsec_mb_operation mode;\n+\tint ret = 0;\n+\n+\tret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,\n+\t\t\t\t&cipher_xform, &aead_xform);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/**< GCM key type */\n+\n+\tsess->op = mode;\n+\n+\tswitch (sess->op) {\n+\tcase IPSEC_MB_OP_HASH_GEN_ONLY:\n+\tcase IPSEC_MB_OP_HASH_VERIFY_ONLY:\n+\t\t/* AES-GMAC\n+\t\t * auth_xform = xform;\n+\t\t */\n+\t\tif (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {\n+\t\t\tIPSEC_MB_LOG(ERR,\n+\t\"Only AES GMAC is supported as an authentication only algorithm\");\n+\t\t\tret = -ENOTSUP;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\t\t/* Set IV parameters */\n+\t\tsess->iv.offset = auth_xform->auth.iv.offset;\n+\t\tsess->iv.length = auth_xform->auth.iv.length;\n+\t\tkey_length = auth_xform->auth.key.length;\n+\t\tkey = auth_xform->auth.key.data;\n+\t\tsess->req_digest_length = auth_xform->auth.digest_length;\n+\t\tbreak;\n+\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:\n+\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:\n+\t\t/* AES-GCM\n+\t\t * aead_xform = xform;\n+\t\t */\n+\n+\t\tif (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {\n+\t\t\tIPSEC_MB_LOG(ERR,\n+\t\t\t\"The only combined operation supported is AES GCM\");\n+\t\t\tret = -ENOTSUP;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\t\t/* Set IV parameters */\n+\t\tsess->iv.offset = aead_xform->aead.iv.offset;\n+\t\tsess->iv.length = aead_xform->aead.iv.length;\n+\t\tkey_length = aead_xform->aead.key.length;\n+\t\tkey = aead_xform->aead.key.data;\n+\t\tsess->aad_length = aead_xform->aead.aad_length;\n+\t\tsess->req_digest_length = aead_xform->aead.digest_length;\n+\t\tbreak;\n+\tdefault:\n+\t\tIPSEC_MB_LOG(\n+\t\t    ERR, \"Wrong xform type, has to be AEAD or authentication\");\n+\t\tret = -ENOTSUP;\n+\t\tgoto error_exit;\n+\t}\n+\n+\t/* Check key length, and calculate GCM pre-compute. */\n+\tswitch (key_length) {\n+\tcase 16:\n+\t\tsess->key_length = GCM_KEY_128;\n+\t\tmb_mgr->gcm128_pre(key, &sess->gdata_key);\n+\t\tbreak;\n+\tcase 24:\n+\t\tsess->key_length = GCM_KEY_192;\n+\t\tmb_mgr->gcm192_pre(key, &sess->gdata_key);\n+\t\tbreak;\n+\tcase 32:\n+\t\tsess->key_length = GCM_KEY_256;\n+\t\tmb_mgr->gcm256_pre(key, &sess->gdata_key);\n+\t\tbreak;\n+\tdefault:\n+\t\tIPSEC_MB_LOG(ERR, \"Invalid key length\");\n+\t\tret = -EINVAL;\n+\t\tgoto error_exit;\n+\t}\n+\n+\t/* Digest check */\n+\tif (sess->req_digest_length > 16) {\n+\t\tIPSEC_MB_LOG(ERR, \"Invalid digest length\");\n+\t\tret = -EINVAL;\n+\t\tgoto error_exit;\n+\t}\n+\t/*\n+\t * If size requested is different, generate the full digest\n+\t * (16 bytes) in a temporary location and then memcpy\n+\t * the requested number of bytes.\n+\t */\n+\tif (sess->req_digest_length < 4)\n+\t\tsess->gen_digest_length = 16;\n+\telse\n+\t\tsess->gen_digest_length = sess->req_digest_length;\n+\n+error_exit:\n+\treturn ret;\n+}\n+\n+/**\n+ * Process a completed job and return rte_mbuf which job processed\n+ *\n+ * @param job\tIMB_JOB job to process\n+ *\n+ * @return\n+ * - Returns processed mbuf which is trimmed of output digest used in\n+ * verification of supplied digest in the case of a HASH_CIPHER operation\n+ * - Returns NULL on invalid job\n+ */\n+static void\n+post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,\n+\t\tstruct rte_crypto_op *op,\n+\t\tstruct aesni_gcm_session *session)\n+{\n+\tstruct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);\n+\n+\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t/* Verify digest if required */\n+\tif (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||\n+\t\t\tsession->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {\n+\t\tuint8_t *digest;\n+\n+\t\tuint8_t *tag = qp_data->temp_digest;\n+\n+\t\tif (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)\n+\t\t\tdigest = op->sym->auth.digest.data;\n+\t\telse\n+\t\t\tdigest = op->sym->aead.digest.data;\n+\n+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG\n+\t\trte_hexdump(stdout, \"auth tag (orig):\",\n+\t\t\t\tdigest, session->req_digest_length);\n+\t\trte_hexdump(stdout, \"auth tag (calc):\",\n+\t\t\t\ttag, session->req_digest_length);\n+#endif\n+\n+\t\tif (memcmp(tag, digest,\tsession->req_digest_length) != 0)\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n+\t} else {\n+\t\tif (session->req_digest_length != session->gen_digest_length) {\n+\t\t\tif (session->op ==\n+\t\t\t\tIPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)\n+\t\t\t\tmemcpy(op->sym->aead.digest.data,\n+\t\t\t\t\tqp_data->temp_digest,\n+\t\t\t\t\tsession->req_digest_length);\n+\t\t\telse\n+\t\t\t\tmemcpy(op->sym->auth.digest.data,\n+\t\t\t\t\tqp_data->temp_digest,\n+\t\t\t\t\tsession->req_digest_length);\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Process a completed GCM request\n+ *\n+ * @param qp\t\tQueue Pair to process\n+ * @param op\t\tCrypto operation\n+ * @param sess\t\tAESNI-GCM session\n+ *\n+ */\n+static void\n+handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,\n+\t\tstruct rte_crypto_op *op,\n+\t\tstruct aesni_gcm_session *sess)\n+{\n+\tpost_process_gcm_crypto_op(qp, op, sess);\n+\n+\t/* Free session if a session-less crypto op */\n+\tif (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {\n+\t\tmemset(sess, 0, sizeof(struct aesni_gcm_session));\n+\t\tmemset(op->sym->session, 0,\n+\t\t\trte_cryptodev_sym_get_existing_header_session_size(\n+\t\t\t\top->sym->session));\n+\t\trte_mempool_put(qp->sess_mp_priv, sess);\n+\t\trte_mempool_put(qp->sess_mp, op->sym->session);\n+\t\top->sym->session = NULL;\n+\t}\n+}\n+\n+/**\n+ * Process a crypto operation, calling\n+ * the GCM API from the multi buffer library.\n+ *\n+ * @param\tqp\t\tqueue pair\n+ * @param\top\t\tsymmetric crypto operation\n+ * @param\tsession\t\tGCM session\n+ *\n+ * @return\n+ *  0 on success\n+ */\n+static int\n+process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,\n+\t\tstruct aesni_gcm_session *session)\n+{\n+\tstruct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);\n+\tuint8_t *src, *dst;\n+\tuint8_t *iv_ptr;\n+\tstruct rte_crypto_sym_op *sym_op = op->sym;\n+\tstruct rte_mbuf *m_src = sym_op->m_src;\n+\tuint32_t offset, data_offset, data_length;\n+\tuint32_t part_len, total_len, data_len;\n+\tuint8_t *tag;\n+\tunsigned int oop = 0;\n+\tstruct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];\n+\n+\tif (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||\n+\t\t\tsession->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {\n+\t\toffset = sym_op->aead.data.offset;\n+\t\tdata_offset = offset;\n+\t\tdata_length = sym_op->aead.data.length;\n+\t} else {\n+\t\toffset = sym_op->auth.data.offset;\n+\t\tdata_offset = offset;\n+\t\tdata_length = sym_op->auth.data.length;\n+\t}\n+\n+\tRTE_ASSERT(m_src != NULL);\n+\n+\twhile (offset >= m_src->data_len && data_length != 0) {\n+\t\toffset -= m_src->data_len;\n+\t\tm_src = m_src->next;\n+\n+\t\tRTE_ASSERT(m_src != NULL);\n+\t}\n+\n+\tsrc = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);\n+\n+\tdata_len = m_src->data_len - offset;\n+\tpart_len = (data_len < data_length) ? data_len :\n+\t\t\tdata_length;\n+\n+\tRTE_ASSERT((sym_op->m_dst == NULL) ||\n+\t\t\t((sym_op->m_dst != NULL) &&\n+\t\t\t\trte_pktmbuf_is_contiguous(sym_op->m_dst)));\n+\n+\t/* In-place */\n+\tif (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))\n+\t\tdst = src;\n+\t/* Out-of-place */\n+\telse {\n+\t\toop = 1;\n+\t\t/* Segmented destination buffer is not supported\n+\t\t * if operation is Out-of-place\n+\t\t */\n+\t\tRTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));\n+\t\tdst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,\n+\t\t\t\t\tdata_offset);\n+\t}\n+\n+\tiv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,\n+\t\t\t\tsession->iv.offset);\n+\n+\tif (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {\n+\t\tops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,\n+\t\t\t\tsym_op->aead.aad.data,\n+\t\t\t\t(uint64_t)session->aad_length);\n+\n+\t\tops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tdst, src, (uint64_t)part_len);\n+\t\ttotal_len = data_length - part_len;\n+\n+\t\twhile (total_len) {\n+\t\t\tm_src = m_src->next;\n+\n+\t\t\tRTE_ASSERT(m_src != NULL);\n+\n+\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n+\t\t\tif (oop)\n+\t\t\t\tdst += part_len;\n+\t\t\telse\n+\t\t\t\tdst = src;\n+\t\t\tpart_len = (m_src->data_len < total_len) ?\n+\t\t\t\t\tm_src->data_len : total_len;\n+\n+\t\t\tops->update_enc(&session->gdata_key,\n+\t\t\t\t\t&qp_data->gcm_ctx_data,\n+\t\t\t\t\tdst, src, (uint64_t)part_len);\n+\t\t\ttotal_len -= part_len;\n+\t\t}\n+\n+\t\tif (session->req_digest_length != session->gen_digest_length)\n+\t\t\ttag = qp_data->temp_digest;\n+\t\telse\n+\t\t\ttag = sym_op->aead.digest.data;\n+\n+\t\tops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\ttag, session->gen_digest_length);\n+\t} else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {\n+\t\tops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,\n+\t\t\t\tsym_op->aead.aad.data,\n+\t\t\t\t(uint64_t)session->aad_length);\n+\n+\t\tops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tdst, src, (uint64_t)part_len);\n+\t\ttotal_len = data_length - part_len;\n+\n+\t\twhile (total_len) {\n+\t\t\tm_src = m_src->next;\n+\n+\t\t\tRTE_ASSERT(m_src != NULL);\n+\n+\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n+\t\t\tif (oop)\n+\t\t\t\tdst += part_len;\n+\t\t\telse\n+\t\t\t\tdst = src;\n+\t\t\tpart_len = (m_src->data_len < total_len) ?\n+\t\t\t\t\tm_src->data_len : total_len;\n+\n+\t\t\tops->update_dec(&session->gdata_key,\n+\t\t\t\t\t&qp_data->gcm_ctx_data,\n+\t\t\t\t\tdst, src, (uint64_t)part_len);\n+\t\t\ttotal_len -= part_len;\n+\t\t}\n+\n+\t\ttag = qp_data->temp_digest;\n+\t\tops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\ttag, session->gen_digest_length);\n+\t} else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {\n+\t\tops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tiv_ptr, session->iv.length);\n+\n+\t\tops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tsrc, (uint64_t)part_len);\n+\t\ttotal_len = data_length - part_len;\n+\n+\t\twhile (total_len) {\n+\t\t\tm_src = m_src->next;\n+\n+\t\t\tRTE_ASSERT(m_src != NULL);\n+\n+\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n+\t\t\tpart_len = (m_src->data_len < total_len) ?\n+\t\t\t\t\tm_src->data_len : total_len;\n+\n+\t\t\tops->gmac_update(&session->gdata_key,\n+\t\t\t\t\t&qp_data->gcm_ctx_data, src,\n+\t\t\t\t\t(uint64_t)part_len);\n+\t\t\ttotal_len -= part_len;\n+\t\t}\n+\n+\t\tif (session->req_digest_length != session->gen_digest_length)\n+\t\t\ttag = qp_data->temp_digest;\n+\t\telse\n+\t\t\ttag = sym_op->auth.digest.data;\n+\n+\t\tops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\ttag, session->gen_digest_length);\n+\t} else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */\n+\t\tops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tiv_ptr, session->iv.length);\n+\n+\t\tops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\tsrc, (uint64_t)part_len);\n+\t\ttotal_len = data_length - part_len;\n+\n+\t\twhile (total_len) {\n+\t\t\tm_src = m_src->next;\n+\n+\t\t\tRTE_ASSERT(m_src != NULL);\n+\n+\t\t\tsrc = rte_pktmbuf_mtod(m_src, uint8_t *);\n+\t\t\tpart_len = (m_src->data_len < total_len) ?\n+\t\t\t\t\tm_src->data_len : total_len;\n+\n+\t\t\tops->gmac_update(&session->gdata_key,\n+\t\t\t\t\t&qp_data->gcm_ctx_data, src,\n+\t\t\t\t\t(uint64_t)part_len);\n+\t\t\ttotal_len -= part_len;\n+\t\t}\n+\n+\t\ttag = qp_data->temp_digest;\n+\n+\t\tops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,\n+\t\t\t\ttag, session->gen_digest_length);\n+\t}\n+\treturn 0;\n+}\n+\n+/** Get gcm session */\n+static inline struct aesni_gcm_session *\n+aesni_gcm_get_session(struct ipsec_mb_qp *qp,\n+\t     struct rte_crypto_op *op)\n+{\n+\tstruct aesni_gcm_session *sess = NULL;\n+\tuint32_t driver_id =\n+\t    ipsec_mb_get_driver_id(IPSEC_MB_PMD_TYPE_AESNI_GCM);\n+\tstruct rte_crypto_sym_op *sym_op = op->sym;\n+\n+\tif (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {\n+\t\tif (likely(sym_op->session != NULL))\n+\t\t\tsess = (struct aesni_gcm_session *)\n+\t\t\t    get_sym_session_private_data(sym_op->session,\n+\t\t\t\t\t\t\t driver_id);\n+\t} else {\n+\t\tvoid *_sess;\n+\t\tvoid *_sess_private_data = NULL;\n+\n+\t\tif (rte_mempool_get(qp->sess_mp, (void **)&_sess))\n+\t\t\treturn NULL;\n+\n+\t\tif (rte_mempool_get(qp->sess_mp_priv,\n+\t\t\t\t(void **)&_sess_private_data))\n+\t\t\treturn NULL;\n+\n+\t\tsess = (struct aesni_gcm_session *)_sess_private_data;\n+\n+\t\tif (unlikely(aesni_gcm_session_configure(qp->mb_mgr,\n+\t\t\t\t _sess_private_data, sym_op->xform) != 0)) {\n+\t\t\trte_mempool_put(qp->sess_mp, _sess);\n+\t\t\trte_mempool_put(qp->sess_mp_priv, _sess_private_data);\n+\t\t\tsess = NULL;\n+\t\t}\n+\t\tsym_op->session = (struct rte_cryptodev_sym_session *)_sess;\n+\t\tset_sym_session_private_data(sym_op->session, driver_id,\n+\t\t\t\t\t     _sess_private_data);\n+\t}\n+\n+\tif (unlikely(sess == NULL))\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;\n+\n+\treturn sess;\n+}\n+\n+static uint16_t\n+aesni_gcm_pmd_dequeue_burst(void *queue_pair,\n+\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n+{\n+\tstruct aesni_gcm_session *sess;\n+\tstruct ipsec_mb_qp *qp = queue_pair;\n+\n+\tint retval = 0;\n+\tunsigned int i, nb_dequeued;\n+\n+\tnb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,\n+\t\t\t(void **)ops, nb_ops, NULL);\n+\n+\tfor (i = 0; i < nb_dequeued; i++) {\n+\n+\t\tsess = aesni_gcm_get_session(qp, ops[i]);\n+\t\tif (unlikely(sess == NULL)) {\n+\t\t\tops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\tqp->stats.dequeue_err_count++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tretval = process_gcm_crypto_op(qp, ops[i], sess);\n+\t\tif (retval < 0) {\n+\t\t\tops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\tqp->stats.dequeue_err_count++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\thandle_completed_gcm_crypto_op(qp, ops[i], sess);\n+\t}\n+\n+\tqp->stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+static inline void\n+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,\n+\t\t\t  int32_t errnum)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < vec->num; i++)\n+\t\tvec->status[i] = errnum;\n+}\n+\n+static inline int32_t\n+aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,\n+\t\t\t\t     struct gcm_context_data *gdata_ctx,\n+\t\t\t\t     uint8_t *digest, struct aesni_gcm_ops ops)\n+{\n+\tif (s->req_digest_length != s->gen_digest_length) {\n+\t\tuint8_t tmpdigest[s->gen_digest_length];\n+\n+\t\tops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,\n+\t\t\t\ts->gen_digest_length);\n+\t\tmemcpy(digest, tmpdigest, s->req_digest_length);\n+\t} else {\n+\t\tops.finalize_enc(&s->gdata_key, gdata_ctx, digest,\n+\t\t\t\ts->gen_digest_length);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int32_t\n+aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,\n+\t\t\t\t     struct gcm_context_data *gdata_ctx,\n+\t\t\t\t     uint8_t *digest, struct aesni_gcm_ops ops)\n+{\n+\tuint8_t tmpdigest[s->gen_digest_length];\n+\n+\tops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,\n+\t\t\ts->gen_digest_length);\n+\n+\treturn memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0\n+\t\t\t\t\t\t\t\t    : EBADMSG;\n+}\n+\n+static inline void\n+aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,\n+\t\t\t     struct gcm_context_data *gdata_ctx,\n+\t\t\t     struct rte_crypto_sgl *sgl, void *iv, void *aad,\n+\t\t\t     struct aesni_gcm_ops ops)\n+{\n+\tuint32_t i;\n+\n+\t/* init crypto operation */\n+\tops.init(&s->gdata_key, gdata_ctx, iv, aad,\n+\t\t    (uint64_t)s->aad_length);\n+\n+\t/* update with sgl data */\n+\tfor (i = 0; i < sgl->num; i++) {\n+\t\tstruct rte_crypto_vec *vec = &sgl->vec[i];\n+\n+\t\tswitch (s->op) {\n+\t\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:\n+\t\t\tops.update_enc(&s->gdata_key, gdata_ctx,\n+\t\t\t      vec->base, vec->base, vec->len);\n+\t\t\tbreak;\n+\t\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:\n+\t\t\tops.update_dec(&s->gdata_key, gdata_ctx,\n+\t\t\t      vec->base, vec->base, vec->len);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tIPSEC_MB_LOG(ERR, \"Invalid session op\");\n+\t\t\tbreak;\n+\t\t}\n+\n+\t}\n+}\n+\n+static inline void\n+aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,\n+\t\t\t      struct gcm_context_data *gdata_ctx,\n+\t\t\t      struct rte_crypto_sgl *sgl, void *iv,\n+\t\t\t      struct aesni_gcm_ops ops)\n+{\n+\tops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,\n+\t\t    sgl->vec[0].len);\n+}\n+\n+static inline uint32_t\n+aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,\n+\t\t      struct gcm_context_data *gdata_ctx,\n+\t\t      struct rte_crypto_sym_vec *vec,\n+\t\t      struct aesni_gcm_ops ops)\n+{\n+\tuint32_t i, processed;\n+\n+\tprocessed = 0;\n+\tfor (i = 0; i < vec->num; ++i) {\n+\t\taesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],\n+\t\t\t\t\t     vec->iv[i].va, vec->aad[i].va,\n+\t\t\t\t\t     ops);\n+\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_encryption(\n+\t\t    s, gdata_ctx, vec->digest[i].va, ops);\n+\t\tprocessed += (vec->status[i] == 0);\n+\t}\n+\n+\treturn processed;\n+}\n+\n+static inline uint32_t\n+aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,\n+\t\t      struct gcm_context_data *gdata_ctx,\n+\t\t      struct rte_crypto_sym_vec *vec,\n+\t\t      struct aesni_gcm_ops ops)\n+{\n+\tuint32_t i, processed;\n+\n+\tprocessed = 0;\n+\tfor (i = 0; i < vec->num; ++i) {\n+\t\taesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],\n+\t\t\t\t\t     vec->iv[i].va, vec->aad[i].va,\n+\t\t\t\t\t     ops);\n+\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_decryption(\n+\t\t    s, gdata_ctx, vec->digest[i].va, ops);\n+\t\tprocessed += (vec->status[i] == 0);\n+\t}\n+\n+\treturn processed;\n+}\n+\n+static inline uint32_t\n+aesni_gmac_sgl_generate(struct aesni_gcm_session *s,\n+\t\t\tstruct gcm_context_data *gdata_ctx,\n+\t\t\tstruct rte_crypto_sym_vec *vec,\n+\t\t\tstruct aesni_gcm_ops ops)\n+{\n+\tuint32_t i, processed;\n+\n+\tprocessed = 0;\n+\tfor (i = 0; i < vec->num; ++i) {\n+\t\tif (vec->sgl[i].num != 1) {\n+\t\t\tvec->status[i] = ENOTSUP;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\taesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],\n+\t\t\t\t\t      vec->iv[i].va, ops);\n+\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_encryption(\n+\t\t    s, gdata_ctx, vec->digest[i].va, ops);\n+\t\tprocessed += (vec->status[i] == 0);\n+\t}\n+\n+\treturn processed;\n+}\n+\n+static inline uint32_t\n+aesni_gmac_sgl_verify(struct aesni_gcm_session *s,\n+\t\t      struct gcm_context_data *gdata_ctx,\n+\t\t      struct rte_crypto_sym_vec *vec,\n+\t\t      struct aesni_gcm_ops ops)\n+{\n+\tuint32_t i, processed;\n+\n+\tprocessed = 0;\n+\tfor (i = 0; i < vec->num; ++i) {\n+\t\tif (vec->sgl[i].num != 1) {\n+\t\t\tvec->status[i] = ENOTSUP;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\taesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],\n+\t\t\t\t\t      vec->iv[i].va, ops);\n+\t\tvec->status[i] = aesni_gcm_sgl_op_finalize_decryption(\n+\t\t    s, gdata_ctx, vec->digest[i].va, ops);\n+\t\tprocessed += (vec->status[i] == 0);\n+\t}\n+\n+\treturn processed;\n+}\n+\n+/** Process CPU crypto bulk operations */\n+static uint32_t\n+aesni_gcm_process_bulk(struct rte_cryptodev *dev,\n+\t\t\tstruct rte_cryptodev_sym_session *sess,\n+\t\t\t__rte_unused union rte_crypto_sym_ofs ofs,\n+\t\t\tstruct rte_crypto_sym_vec *vec)\n+{\n+\tvoid *sess_priv;\n+\tstruct aesni_gcm_session *s;\n+\tstruct gcm_context_data gdata_ctx;\n+\tIMB_MGR *mb_mgr;\n+\n+\tsess_priv = get_sym_session_private_data(sess, dev->driver_id);\n+\tif (unlikely(sess_priv == NULL)) {\n+\t\taesni_gcm_fill_error_code(vec, EINVAL);\n+\t\treturn 0;\n+\t}\n+\n+\ts = sess_priv;\n+\n+\t/* get per-thread MB MGR, create one if needed */\n+\tmb_mgr = get_per_thread_mb_mgr();\n+\tif (unlikely(mb_mgr == NULL))\n+\t\treturn 0;\n+\n+\t/* Check if function pointers have been set for this thread ops. */\n+\tif (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))\n+\t\taesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);\n+\n+\tswitch (s->op) {\n+\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:\n+\t\treturn aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,\n+\t\t\t\tRTE_PER_LCORE(gcm_ops)[s->key_length]);\n+\tcase IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:\n+\t\treturn aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,\n+\t\t\t\tRTE_PER_LCORE(gcm_ops)[s->key_length]);\n+\tcase IPSEC_MB_OP_HASH_GEN_ONLY:\n+\t\treturn aesni_gmac_sgl_generate(s, &gdata_ctx, vec,\n+\t\t\t\tRTE_PER_LCORE(gcm_ops)[s->key_length]);\n+\tcase IPSEC_MB_OP_HASH_VERIFY_ONLY:\n+\t\treturn aesni_gmac_sgl_verify(s, &gdata_ctx, vec,\n+\t\t\t\tRTE_PER_LCORE(gcm_ops)[s->key_length]);\n+\tdefault:\n+\t\taesni_gcm_fill_error_code(vec, EINVAL);\n+\t\treturn 0;\n+\t}\n+}\n+\n+static int\n+aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n+\t\t\t\tconst struct rte_cryptodev_qp_conf *qp_conf,\n+\t\t\t\tint socket_id)\n+{\n+\tint ret = ipsec_mb_pmd_qp_setup(dev, qp_id, qp_conf, socket_id);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\n+\tstruct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];\n+\tstruct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);\n+\taesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);\n+\treturn 0;\n+}\n+\n+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {\n+\t.dev_configure = ipsec_mb_pmd_config,\n+\t.dev_start = ipsec_mb_pmd_start,\n+\t.dev_stop = ipsec_mb_pmd_stop,\n+\t.dev_close = ipsec_mb_pmd_close,\n+\n+\t.stats_get = ipsec_mb_pmd_stats_get,\n+\t.stats_reset = ipsec_mb_pmd_stats_reset,\n+\n+\t.dev_infos_get = ipsec_mb_pmd_info_get,\n+\n+\t.queue_pair_setup = aesni_gcm_qp_setup,\n+\t.queue_pair_release = ipsec_mb_pmd_qp_release,\n+\n+\t.sym_cpu_process = aesni_gcm_process_bulk,\n+\n+\t.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,\n+\t.sym_session_configure = ipsec_mb_pmd_sym_session_configure,\n+\t.sym_session_clear = ipsec_mb_pmd_sym_session_clear\n+};\n+\n+static int\n+cryptodev_aesni_gcm_probe(struct rte_vdev_device *vdev)\n+{\n+\treturn cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);\n+}\n+\n+static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {\n+\t.probe = cryptodev_aesni_gcm_probe,\n+\t.remove = cryptodev_ipsec_mb_remove\n+};\n+\n+static struct cryptodev_driver aesni_gcm_crypto_drv;\n+\n+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,\n+\t\t      cryptodev_aesni_gcm_pmd_drv);\n+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);\n+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,\n+\t\t\t      \"max_nb_queue_pairs=<int> socket_id=<int>\");\n+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,\n+\t\t\t       cryptodev_aesni_gcm_pmd_drv.driver,\n+\t\t\t       pmd_driver_id_aesni_gcm);\n+\n+/* Constructor function to register aesni-gcm PMD */\n+RTE_INIT(ipsec_mb_register_aesni_gcm)\n+{\n+\tstruct ipsec_mb_pmd_data *aesni_gcm_data =\n+\t\t&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];\n+\n+\taesni_gcm_data->caps = aesni_gcm_capabilities;\n+\taesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;\n+\taesni_gcm_data->feature_flags =\n+\t\tRTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |\n+\t\tRTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |\n+\t\tRTE_CRYPTODEV_FF_IN_PLACE_SGL |\n+\t\tRTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |\n+\t\tRTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |\n+\t\tRTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |\n+\t\tRTE_CRYPTODEV_FF_SYM_SESSIONLESS;\n+\taesni_gcm_data->internals_priv_size = 0;\n+\taesni_gcm_data->ops = &aesni_gcm_pmd_ops;\n+\taesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);\n+\taesni_gcm_data->queue_pair_configure = NULL;\n+\taesni_gcm_data->session_configure = aesni_gcm_session_configure;\n+\taesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);\n+}\ndiff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h\nindex 2b589eee47..3407c3c070 100644\n--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h\n+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h\n@@ -37,6 +37,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);\n #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb\n /**< IPSEC Multi buffer aesni_mb PMD device name */\n \n+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm\n+/**< IPSEC Multi buffer PMD aesni_gcm device name */\n+\n /** PMD LOGTYPE DRIVER, common to all PMDs */\n extern int ipsec_mb_logtype_driver;\n #define IPSEC_MB_LOG(level, fmt, ...)                                         \\\n@@ -46,6 +49,7 @@ extern int ipsec_mb_logtype_driver;\n /** All supported device types */\n enum ipsec_mb_pmd_types {\n \tIPSEC_MB_PMD_TYPE_AESNI_MB = 0,\n+\tIPSEC_MB_PMD_TYPE_AESNI_GCM,\n \tIPSEC_MB_N_PMD_TYPES\n };\n \n@@ -65,6 +69,7 @@ enum ipsec_mb_operation {\n };\n \n extern uint8_t pmd_driver_id_aesni_mb;\n+extern uint8_t pmd_driver_id_aesni_gcm;\n \n /** Helper function. Gets driver ID based on PMD type */\n static __rte_always_inline uint8_t\n@@ -73,6 +78,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)\n \tswitch (pmd_type) {\n \tcase IPSEC_MB_PMD_TYPE_AESNI_MB:\n \t\treturn pmd_driver_id_aesni_mb;\n+\tcase IPSEC_MB_PMD_TYPE_AESNI_GCM:\n+\t\treturn pmd_driver_id_aesni_gcm;\n \tdefault:\n \t\tbreak;\n \t}\ndiff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build\nindex b2ccea6f94..14a13f2263 100644\n--- a/drivers/crypto/meson.build\n+++ b/drivers/crypto/meson.build\n@@ -7,7 +7,6 @@ endif\n \n drivers = [\n         'ipsec_mb',\n-        'aesni_gcm',\n         'armv8',\n         'bcmfs',\n         'caam_jr',\n",
    "prefixes": [
        "v3",
        "04/10"
    ]
}