get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/107820/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 107820,
    "url": "http://patches.dpdk.org/api/patches/107820/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220218171527.56719-3-kai.ji@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220218171527.56719-3-kai.ji@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220218171527.56719-3-kai.ji@intel.com",
    "date": "2022-02-18T17:15:20",
    "name": "[v9,2/9] crypto/qat: support symmetric build op request",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "51a84eef5e8242999feeab702c7f26422d847365",
    "submitter": {
        "id": 2202,
        "url": "http://patches.dpdk.org/api/people/2202/?format=api",
        "name": "Ji, Kai",
        "email": "kai.ji@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220218171527.56719-3-kai.ji@intel.com/mbox/",
    "series": [
        {
            "id": 21741,
            "url": "http://patches.dpdk.org/api/series/21741/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21741",
            "date": "2022-02-18T17:15:18",
            "name": "drivers/qat: QAT symmetric crypto datapatch rework",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/21741/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/107820/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/107820/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A3742A0032;\n\tFri, 18 Feb 2022 18:16:04 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 53D0E41156;\n\tFri, 18 Feb 2022 18:15:52 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 8F6FA4113F\n for <dev@dpdk.org>; Fri, 18 Feb 2022 18:15:47 +0100 (CET)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Feb 2022 09:15:33 -0800",
            "from silpixa00400465.ir.intel.com ([10.55.128.22])\n by orsmga005.jf.intel.com with ESMTP; 18 Feb 2022 09:15:32 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1645204547; x=1676740547;\n h=from:to:cc:subject:date:message-id:in-reply-to: references;\n bh=znXwFeMfccnXYMRV+AgNgETp48V4DfG2CasnP6Flm5c=;\n b=O+la1FNaz0902h+1A6nJbXlTFxYstVlTX8W0V05azoNVTm0znRQ7rNWx\n dPGfHWvCGUUmEj9yhcquzYeJ7VEI+ySJMrRCsy9X2nCHlmcyULiuBc6j3\n q1MOH50vLUanseJFrjSCMtyunRvdQul6aajOrIzI+SAEZ5UeJNKNjW0i6\n v+46nrlYb+lBNdx5ZbieiOOqlcDtDqnCOBNcYXZSxpRRuXiY0soL0nhKq\n No0MPCb05WOq5aVCwA4k6+5j5g3cGhgFSCrDCLR7DBfnYzvCt78VvxcLy\n jaTJR5xAe2lSL5Hw/o8cQxXLTR9G0KW8ZQvPlp5dWx4fGDLb3jgIGcBRU Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10262\"; a=\"238571892\"",
            "E=Sophos;i=\"5.88,379,1635231600\"; d=\"scan'208\";a=\"238571892\"",
            "E=Sophos;i=\"5.88,379,1635231600\"; d=\"scan'208\";a=\"705446201\""
        ],
        "X-ExtLoop1": "1",
        "From": "Kai Ji <kai.ji@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "gakhil@marvell.com,\n\troy.fan.zhang@intel.com,\n\tKai Ji <kai.ji@intel.com>",
        "Subject": "[dpdk-dev v9 2/9] crypto/qat: support symmetric build op request",
        "Date": "Sat, 19 Feb 2022 01:15:20 +0800",
        "Message-Id": "<20220218171527.56719-3-kai.ji@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20220218171527.56719-1-kai.ji@intel.com>",
        "References": "<20220217162909.22713-1-kai.ji@intel.com>\n <20220218171527.56719-1-kai.ji@intel.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch adds common inline functions for QAT symmetric\ncrypto driver to process crypto op, and the implementation of\nbuild op request function for QAT generation 1.\n\nSigned-off-by: Kai Ji <kai.ji@intel.com>\n---\n drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-\n drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-\n drivers/crypto/qat/qat_sym.c                 |  90 +-\n 3 files changed, 1019 insertions(+), 90 deletions(-)",
    "diff": "diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h\nindex 67a4d2cb2c..1130e0e76f 100644\n--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h\n+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017-2021 Intel Corporation\n+ * Copyright(c) 2017-2022 Intel Corporation\n  */\n \n #ifndef _QAT_CRYPTO_PMD_GENS_H_\n@@ -8,14 +8,844 @@\n #include <rte_cryptodev.h>\n #include \"qat_crypto.h\"\n #include \"qat_sym_session.h\"\n+#include \"qat_sym.h\"\n+\n+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \\\n+\tRTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)\n+\n+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \\\n+\t(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \\\n+\tICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))\n+\n+static __rte_always_inline int\n+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,\n+\t\tuint8_t *iv, int ivlen, int srclen,\n+\t\tvoid *bpi_ctx)\n+{\n+\tEVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;\n+\tint encrypted_ivlen;\n+\tuint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];\n+\tuint8_t *encr = encrypted_iv;\n+\n+\t/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */\n+\tif (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)\n+\t\t\t\t\t\t\t\t<= 0)\n+\t\tgoto cipher_decrypt_err;\n+\n+\tfor (; srclen != 0; --srclen, ++dst, ++src, ++encr)\n+\t\t*dst = *src ^ *encr;\n+\n+\treturn 0;\n+\n+cipher_decrypt_err:\n+\tQAT_DP_LOG(ERR, \"libcrypto ECB cipher decrypt for BPI IV failed\");\n+\treturn -EINVAL;\n+}\n+\n+static __rte_always_inline uint32_t\n+qat_bpicipher_preprocess(struct qat_sym_session *ctx,\n+\t\t\t\tstruct rte_crypto_op *op)\n+{\n+\tint block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);\n+\tstruct rte_crypto_sym_op *sym_op = op->sym;\n+\tuint8_t last_block_len = block_len > 0 ?\n+\t\t\tsym_op->cipher.data.length % block_len : 0;\n+\n+\tif (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {\n+\t\t/* Decrypt last block */\n+\t\tuint8_t *last_block, *dst, *iv;\n+\t\tuint32_t last_block_offset = sym_op->cipher.data.offset +\n+\t\t\t\tsym_op->cipher.data.length - last_block_len;\n+\t\tlast_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,\n+\t\t\t\tuint8_t *, last_block_offset);\n+\n+\t\tif (unlikely((sym_op->m_dst != NULL)\n+\t\t\t\t&& (sym_op->m_dst != sym_op->m_src)))\n+\t\t\t/* out-of-place operation (OOP) */\n+\t\t\tdst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,\n+\t\t\t\t\t\tuint8_t *, last_block_offset);\n+\t\telse\n+\t\t\tdst = last_block;\n+\n+\t\tif (last_block_len < sym_op->cipher.data.length)\n+\t\t\t/* use previous block ciphertext as IV */\n+\t\t\tiv = last_block - block_len;\n+\t\telse\n+\t\t\t/* runt block, i.e. less than one full block */\n+\t\t\tiv = rte_crypto_op_ctod_offset(op, uint8_t *,\n+\t\t\t\t\tctx->cipher_iv.offset);\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: src before pre-process:\",\n+\t\t\tlast_block, last_block_len);\n+\t\tif (sym_op->m_dst != NULL)\n+\t\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: dst before pre-process:\",\n+\t\t\tdst, last_block_len);\n+#endif\n+\t\top_bpi_cipher_decrypt(last_block, dst, iv, block_len,\n+\t\t\t\tlast_block_len, ctx->bpi_ctx);\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: src after pre-process:\",\n+\t\t\tlast_block, last_block_len);\n+\t\tif (sym_op->m_dst != NULL)\n+\t\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: dst after pre-process:\",\n+\t\t\tdst, last_block_len);\n+#endif\n+\t}\n+\n+\treturn sym_op->cipher.data.length - last_block_len;\n+}\n+\n+static __rte_always_inline int\n+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_op *op)\n+{\n+\tif (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||\n+\t\tctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||\n+\t\tctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {\n+\t\tif (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||\n+\t\t\t\t(op->sym->auth.data.length % BYTE_LENGTH != 0)))\n+\t\t\treturn -EINVAL;\n+\t\treturn 1;\n+\t}\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_op *op)\n+{\n+\tif (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||\n+\t\tctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||\n+\t\tctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {\n+\t\tif (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||\n+\t\t\t((op->sym->cipher.data.offset %\n+\t\t\tBYTE_LENGTH) != 0)))\n+\t\t\treturn -EINVAL;\n+\t\treturn 1;\n+\t}\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int32_t\n+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,\n+\t\tvoid *opaque, struct qat_sym_op_cookie *cookie,\n+\t\tstruct rte_crypto_vec *src_vec, uint16_t n_src,\n+\t\tstruct rte_crypto_vec *dst_vec, uint16_t n_dst)\n+{\n+\tstruct qat_sgl *list;\n+\tuint32_t i;\n+\tuint32_t tl_src = 0, total_len_src, total_len_dst;\n+\tuint64_t src_data_start = 0, dst_data_start = 0;\n+\tint is_sgl = n_src > 1 || n_dst > 1;\n+\n+\tif (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||\n+\t\t\tn_dst > QAT_SYM_SGL_MAX_NUMBER))\n+\t\treturn -1;\n+\n+\tif (likely(!is_sgl)) {\n+\t\tsrc_data_start = src_vec[0].iova;\n+\t\ttl_src = total_len_src =\n+\t\t\t\tsrc_vec[0].len;\n+\t\tif (unlikely(n_dst)) { /* oop */\n+\t\t\ttotal_len_dst = dst_vec[0].len;\n+\n+\t\t\tdst_data_start = dst_vec[0].iova;\n+\t\t\tif (unlikely(total_len_src != total_len_dst))\n+\t\t\t\treturn -EINVAL;\n+\t\t} else {\n+\t\t\tdst_data_start = src_data_start;\n+\t\t\ttotal_len_dst = tl_src;\n+\t\t}\n+\t} else { /* sgl */\n+\t\ttotal_len_dst = total_len_src = 0;\n+\n+\t\tICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,\n+\t\t\tQAT_COMN_PTR_TYPE_SGL);\n+\n+\t\tlist = (struct qat_sgl *)&cookie->qat_sgl_src;\n+\t\tfor (i = 0; i < n_src; i++) {\n+\t\t\tlist->buffers[i].len = src_vec[i].len;\n+\t\t\tlist->buffers[i].resrvd = 0;\n+\t\t\tlist->buffers[i].addr = src_vec[i].iova;\n+\t\t\tif (tl_src + src_vec[i].len > UINT32_MAX) {\n+\t\t\t\tQAT_DP_LOG(ERR, \"Message too long\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\ttl_src += src_vec[i].len;\n+\t\t}\n+\n+\t\tlist->num_bufs = i;\n+\t\tsrc_data_start = cookie->qat_sgl_src_phys_addr;\n+\n+\t\tif (unlikely(n_dst > 0)) { /* oop sgl */\n+\t\t\tuint32_t tl_dst = 0;\n+\n+\t\t\tlist = (struct qat_sgl *)&cookie->qat_sgl_dst;\n+\n+\t\t\tfor (i = 0; i < n_dst; i++) {\n+\t\t\t\tlist->buffers[i].len = dst_vec[i].len;\n+\t\t\t\tlist->buffers[i].resrvd = 0;\n+\t\t\t\tlist->buffers[i].addr = dst_vec[i].iova;\n+\t\t\t\tif (tl_dst + dst_vec[i].len > UINT32_MAX) {\n+\t\t\t\t\tQAT_DP_LOG(ERR, \"Message too long\");\n+\t\t\t\t\treturn -ENOTSUP;\n+\t\t\t\t}\n+\n+\t\t\t\ttl_dst += dst_vec[i].len;\n+\t\t\t}\n+\n+\t\t\tif (tl_src != tl_dst)\n+\t\t\t\treturn -EINVAL;\n+\t\t\tlist->num_bufs = i;\n+\t\t\tdst_data_start = cookie->qat_sgl_dst_phys_addr;\n+\t\t} else\n+\t\t\tdst_data_start = src_data_start;\n+\t}\n+\n+\treq->comn_mid.src_data_addr = src_data_start;\n+\treq->comn_mid.dest_data_addr = dst_data_start;\n+\treq->comn_mid.src_length = total_len_src;\n+\treq->comn_mid.dst_length = total_len_dst;\n+\treq->comn_mid.opaque_data = (uintptr_t)opaque;\n+\n+\treturn tl_src;\n+}\n+\n+static __rte_always_inline uint64_t\n+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,\n+\t\tstruct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,\n+\t\tstruct rte_crypto_va_iova_ptr *cipher_iv,\n+\t\tstruct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,\n+\t\tstruct rte_crypto_va_iova_ptr *digest __rte_unused)\n+{\n+\tuint32_t cipher_len = 0, cipher_ofs = 0;\n+\tint n_src = 0;\n+\tint ret;\n+\n+\tret = qat_cipher_is_len_in_bits(ctx, op);\n+\tswitch (ret) {\n+\tcase 1:\n+\t\tcipher_len = op->sym->cipher.data.length >> 3;\n+\t\tcipher_ofs = op->sym->cipher.data.offset >> 3;\n+\t\tbreak;\n+\tcase 0:\n+\t\tif (ctx->bpi_ctx) {\n+\t\t\t/* DOCSIS - only send complete blocks to device.\n+\t\t\t * Process any partial block using CFB mode.\n+\t\t\t * Even if 0 complete blocks, still send this to device\n+\t\t\t * to get into rx queue for post-process and dequeuing\n+\t\t\t */\n+\t\t\tcipher_len = qat_bpicipher_preprocess(ctx, op);\n+\t\t\tcipher_ofs = op->sym->cipher.data.offset;\n+\t\t} else {\n+\t\t\tcipher_len = op->sym->cipher.data.length;\n+\t\t\tcipher_ofs = op->sym->cipher.data.offset;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tQAT_DP_LOG(ERR,\n+\t  \"SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values\");\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn UINT64_MAX;\n+\t}\n+\n+\tcipher_iv->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\tctx->cipher_iv.offset);\n+\tcipher_iv->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\tctx->cipher_iv.offset);\n+\n+\tn_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,\n+\t\t\tcipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);\n+\tif (n_src < 0 || n_src > op->sym->m_src->nb_segs) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn UINT64_MAX;\n+\t}\n+\n+\tin_sgl->num = n_src;\n+\n+\t/* Out-Of-Place operation */\n+\tif (unlikely((op->sym->m_dst != NULL) &&\n+\t\t\t(op->sym->m_dst != op->sym->m_src))) {\n+\t\tint n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,\n+\t\t\t\tcipher_len, out_sgl->vec,\n+\t\t\t\tQAT_SYM_SGL_MAX_NUMBER);\n+\n+\t\tif ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn UINT64_MAX;\n+\t\t}\n+\n+\t\tout_sgl->num = n_dst;\n+\t} else\n+\t\tout_sgl->num = 0;\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline uint64_t\n+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,\n+\t\tstruct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,\n+\t\tstruct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,\n+\t\tstruct rte_crypto_va_iova_ptr *auth_iv,\n+\t\tstruct rte_crypto_va_iova_ptr *digest)\n+{\n+\tuint32_t auth_ofs = 0, auth_len = 0;\n+\tint n_src, ret;\n+\n+\tret = qat_auth_is_len_in_bits(ctx, op);\n+\tswitch (ret) {\n+\tcase 1:\n+\t\tauth_ofs = op->sym->auth.data.offset >> 3;\n+\t\tauth_len = op->sym->auth.data.length >> 3;\n+\t\tauth_iv->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\t\tctx->auth_iv.offset);\n+\t\tauth_iv->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\t\tctx->auth_iv.offset);\n+\t\tbreak;\n+\tcase 0:\n+\t\tif (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||\n+\t\t\tctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {\n+\t\t\t/* AES-GMAC */\n+\t\t\tauth_ofs = op->sym->auth.data.offset;\n+\t\t\tauth_len = op->sym->auth.data.length;\n+\t\t\tauth_iv->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\t\t\tctx->auth_iv.offset);\n+\t\t\tauth_iv->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\t\t\tctx->auth_iv.offset);\n+\t\t} else {\n+\t\t\tauth_ofs = op->sym->auth.data.offset;\n+\t\t\tauth_len = op->sym->auth.data.length;\n+\t\t\tauth_iv->va = NULL;\n+\t\t\tauth_iv->iova = 0;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tQAT_DP_LOG(ERR,\n+\t\"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values\");\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn UINT64_MAX;\n+\t}\n+\n+\tn_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,\n+\t\t\tauth_ofs + auth_len, in_sgl->vec,\n+\t\t\tQAT_SYM_SGL_MAX_NUMBER);\n+\tif (n_src < 0 || n_src > op->sym->m_src->nb_segs) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn UINT64_MAX;\n+\t}\n+\n+\tin_sgl->num = n_src;\n+\n+\t/* Out-Of-Place operation */\n+\tif (unlikely((op->sym->m_dst != NULL) &&\n+\t\t\t(op->sym->m_dst != op->sym->m_src))) {\n+\t\tint n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,\n+\t\t\t\tauth_ofs + auth_len, out_sgl->vec,\n+\t\t\t\tQAT_SYM_SGL_MAX_NUMBER);\n+\n+\t\tif ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn UINT64_MAX;\n+\t\t}\n+\t\tout_sgl->num = n_dst;\n+\t} else\n+\t\tout_sgl->num = 0;\n+\n+\tdigest->va = (void *)op->sym->auth.digest.data;\n+\tdigest->iova = op->sym->auth.digest.phys_addr;\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline uint64_t\n+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,\n+\t\tstruct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,\n+\t\tstruct rte_crypto_va_iova_ptr *cipher_iv,\n+\t\tstruct rte_crypto_va_iova_ptr *auth_iv_or_aad,\n+\t\tstruct rte_crypto_va_iova_ptr *digest)\n+{\n+\tunion rte_crypto_sym_ofs ofs;\n+\tuint32_t min_ofs = 0, max_len = 0;\n+\tuint32_t cipher_len = 0, cipher_ofs = 0;\n+\tuint32_t auth_len = 0, auth_ofs = 0;\n+\tint is_oop = (op->sym->m_dst != NULL) &&\n+\t\t\t(op->sym->m_dst != op->sym->m_src);\n+\tint is_sgl = op->sym->m_src->nb_segs > 1;\n+\tint n_src;\n+\tint ret;\n+\n+\tif (unlikely(is_oop))\n+\t\tis_sgl |= op->sym->m_dst->nb_segs > 1;\n+\n+\tcipher_iv->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\tctx->cipher_iv.offset);\n+\tcipher_iv->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\tctx->cipher_iv.offset);\n+\tauth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\tctx->auth_iv.offset);\n+\tauth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\tctx->auth_iv.offset);\n+\tdigest->va = (void *)op->sym->auth.digest.data;\n+\tdigest->iova = op->sym->auth.digest.phys_addr;\n+\n+\tret = qat_cipher_is_len_in_bits(ctx, op);\n+\tswitch (ret) {\n+\tcase 1:\n+\t\tcipher_len = op->sym->aead.data.length >> 3;\n+\t\tcipher_ofs = op->sym->aead.data.offset >> 3;\n+\t\tbreak;\n+\tcase 0:\n+\t\tcipher_len = op->sym->aead.data.length;\n+\t\tcipher_ofs = op->sym->aead.data.offset;\n+\t\tbreak;\n+\tdefault:\n+\t\tQAT_DP_LOG(ERR,\n+\t\"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values\");\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tret = qat_auth_is_len_in_bits(ctx, op);\n+\tswitch (ret) {\n+\tcase 1:\n+\t\tauth_len = op->sym->auth.data.length >> 3;\n+\t\tauth_ofs = op->sym->auth.data.offset >> 3;\n+\t\tbreak;\n+\tcase 0:\n+\t\tauth_len = op->sym->auth.data.length;\n+\t\tauth_ofs = op->sym->auth.data.offset;\n+\t\tbreak;\n+\tdefault:\n+\t\tQAT_DP_LOG(ERR,\n+\t\"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values\");\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmin_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;\n+\tmax_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);\n+\n+\t/* digest in buffer check. Needed only for wireless algos */\n+\tif (ret == 1) {\n+\t\t/* Handle digest-encrypted cases, i.e.\n+\t\t * auth-gen-then-cipher-encrypt and\n+\t\t * cipher-decrypt-then-auth-verify\n+\t\t */\n+\t\tuint64_t auth_end_iova;\n+\n+\t\tif (unlikely(is_sgl)) {\n+\t\t\tuint32_t remaining_off = auth_ofs + auth_len;\n+\t\t\tstruct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :\n+\t\t\t\top->sym->m_src);\n+\n+\t\t\twhile (remaining_off >= rte_pktmbuf_data_len(sgl_buf)\n+\t\t\t\t\t&& sgl_buf->next != NULL) {\n+\t\t\t\tremaining_off -= rte_pktmbuf_data_len(sgl_buf);\n+\t\t\t\tsgl_buf = sgl_buf->next;\n+\t\t\t}\n+\n+\t\t\tauth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(\n+\t\t\t\tsgl_buf, remaining_off);\n+\t\t} else\n+\t\t\tauth_end_iova = (is_oop ?\n+\t\t\t\trte_pktmbuf_iova(op->sym->m_dst) :\n+\t\t\t\trte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +\n+\t\t\t\t\tauth_len;\n+\n+\t\t/* Then check if digest-encrypted conditions are met */\n+\t\tif ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&\n+\t\t\t\t(digest->iova == auth_end_iova))\n+\t\t\tmax_len = RTE_MAX(max_len, auth_ofs + auth_len +\n+\t\t\t\t\tctx->digest_length);\n+\t}\n+\n+\tn_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,\n+\t\t\tin_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);\n+\tif (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn -1;\n+\t}\n+\tin_sgl->num = n_src;\n+\n+\tif (unlikely((op->sym->m_dst != NULL) &&\n+\t\t\t(op->sym->m_dst != op->sym->m_src))) {\n+\t\tint n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,\n+\t\t\t\tmax_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);\n+\n+\t\tif (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn -1;\n+\t\t}\n+\t\tout_sgl->num = n_dst;\n+\t} else\n+\t\tout_sgl->num = 0;\n+\n+\tofs.ofs.cipher.head = cipher_ofs;\n+\tofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;\n+\tofs.ofs.auth.head = auth_ofs;\n+\tofs.ofs.auth.tail = max_len - auth_ofs - auth_len;\n+\n+\treturn ofs.raw;\n+}\n+\n+static __rte_always_inline uint64_t\n+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,\n+\t\tstruct qat_sym_session *ctx,\n+\t\tstruct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,\n+\t\tstruct rte_crypto_va_iova_ptr *cipher_iv,\n+\t\tstruct rte_crypto_va_iova_ptr *auth_iv_or_aad,\n+\t\tstruct rte_crypto_va_iova_ptr *digest)\n+{\n+\tuint32_t cipher_len = 0, cipher_ofs = 0;\n+\tint32_t n_src = 0;\n+\n+\tcipher_iv->va = rte_crypto_op_ctod_offset(op, void *,\n+\t\t\tctx->cipher_iv.offset);\n+\tcipher_iv->iova = rte_crypto_op_ctophys_offset(op,\n+\t\t\tctx->cipher_iv.offset);\n+\tauth_iv_or_aad->va = (void *)op->sym->aead.aad.data;\n+\tauth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;\n+\tdigest->va = (void *)op->sym->aead.digest.data;\n+\tdigest->iova = op->sym->aead.digest.phys_addr;\n+\n+\tcipher_len = op->sym->aead.data.length;\n+\tcipher_ofs = op->sym->aead.data.offset;\n+\n+\tn_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,\n+\t\t\tin_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);\n+\tif (n_src < 0 || n_src > op->sym->m_src->nb_segs) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn UINT64_MAX;\n+\t}\n+\tin_sgl->num = n_src;\n+\n+\t/* Out-Of-Place operation */\n+\tif (unlikely((op->sym->m_dst != NULL) &&\n+\t\t\t(op->sym->m_dst != op->sym->m_src))) {\n+\t\tint n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,\n+\t\t\t\tcipher_len, out_sgl->vec,\n+\t\t\t\tQAT_SYM_SGL_MAX_NUMBER);\n+\t\tif (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn UINT64_MAX;\n+\t\t}\n+\n+\t\tout_sgl->num = n_dst;\n+\t} else\n+\t\tout_sgl->num = 0;\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline void\n+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,\n+\t\tstruct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,\n+\t\tstruct icp_qat_fw_la_bulk_req *qat_req)\n+{\n+\t/* copy IV into request if it fits */\n+\tif (iv_len <= sizeof(cipher_param->u.cipher_IV_array))\n+\t\trte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,\n+\t\t\t\tiv_len);\n+\telse {\n+\t\tICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(\n+\t\t\t\tqat_req->comn_hdr.serv_specif_flags,\n+\t\t\t\tICP_QAT_FW_CIPH_IV_64BIT_PTR);\n+\t\tcipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;\n+\t}\n+}\n+\n+static __rte_always_inline void\n+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < n; i++)\n+\t\tsta[i] = status;\n+}\n+\n+static __rte_always_inline void\n+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,\n+\tstruct icp_qat_fw_la_bulk_req *req,\n+\tstruct rte_crypto_va_iova_ptr *iv,\n+\tunion rte_crypto_sym_ofs ofs, uint32_t data_len)\n+{\n+\tstruct icp_qat_fw_la_cipher_req_params *cipher_param;\n+\n+\tcipher_param = (void *)&req->serv_specif_rqpars;\n+\n+\t/* cipher IV */\n+\tqat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);\n+\tcipher_param->cipher_offset = ofs.ofs.cipher.head;\n+\tcipher_param->cipher_length = data_len - ofs.ofs.cipher.head -\n+\t\t\tofs.ofs.cipher.tail;\n+}\n+\n+static __rte_always_inline void\n+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,\n+\tstruct icp_qat_fw_la_bulk_req *req,\n+\tstruct rte_crypto_va_iova_ptr *digest,\n+\tstruct rte_crypto_va_iova_ptr *auth_iv,\n+\tunion rte_crypto_sym_ofs ofs, uint32_t data_len)\n+{\n+\tstruct icp_qat_fw_la_cipher_req_params *cipher_param;\n+\tstruct icp_qat_fw_la_auth_req_params *auth_param;\n+\n+\tcipher_param = (void *)&req->serv_specif_rqpars;\n+\tauth_param = (void *)((uint8_t *)cipher_param +\n+\t\t\tICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);\n+\n+\tauth_param->auth_off = ofs.ofs.auth.head;\n+\tauth_param->auth_len = data_len - ofs.ofs.auth.head -\n+\t\t\tofs.ofs.auth.tail;\n+\tauth_param->auth_res_addr = digest->iova;\n+\n+\tswitch (ctx->qat_hash_alg) {\n+\tcase ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:\n+\tcase ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:\n+\tcase ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:\n+\t\tauth_param->u1.aad_adr = auth_iv->iova;\n+\t\tbreak;\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_128:\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_64:\n+\t\tICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(\n+\t\t\treq->comn_hdr.serv_specif_flags,\n+\t\t\t\tICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);\n+\t\trte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,\n+\t\t\t\tctx->auth_iv.length);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+static __rte_always_inline int\n+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,\n+\tstruct icp_qat_fw_la_bulk_req *req,\n+\tstruct rte_crypto_vec *src_vec,\n+\tuint16_t n_src_vecs,\n+\tstruct rte_crypto_vec *dst_vec,\n+\tuint16_t n_dst_vecs,\n+\tstruct rte_crypto_va_iova_ptr *cipher_iv,\n+\tstruct rte_crypto_va_iova_ptr *digest,\n+\tstruct rte_crypto_va_iova_ptr *auth_iv,\n+\tunion rte_crypto_sym_ofs ofs, uint32_t data_len)\n+{\n+\tstruct icp_qat_fw_la_cipher_req_params *cipher_param;\n+\tstruct icp_qat_fw_la_auth_req_params *auth_param;\n+\tstruct rte_crypto_vec *cvec = n_dst_vecs > 0 ?\n+\t\t\tdst_vec : src_vec;\n+\trte_iova_t auth_iova_end;\n+\tint cipher_len, auth_len;\n+\tint is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;\n+\n+\tcipher_param = (void *)&req->serv_specif_rqpars;\n+\tauth_param = (void *)((uint8_t *)cipher_param +\n+\t\t\tICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);\n+\n+\tcipher_len = data_len - ofs.ofs.cipher.head -\n+\t\t\tofs.ofs.cipher.tail;\n+\tauth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;\n+\n+\tif (unlikely(cipher_len < 0 || auth_len < 0))\n+\t\treturn -1;\n+\n+\tcipher_param->cipher_offset = ofs.ofs.cipher.head;\n+\tcipher_param->cipher_length = cipher_len;\n+\tqat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);\n+\n+\tauth_param->auth_off = ofs.ofs.auth.head;\n+\tauth_param->auth_len = auth_len;\n+\tauth_param->auth_res_addr = digest->iova;\n+\n+\tswitch (ctx->qat_hash_alg) {\n+\tcase ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:\n+\tcase ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:\n+\tcase ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:\n+\t\tauth_param->u1.aad_adr = auth_iv->iova;\n+\t\tbreak;\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_128:\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_64:\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tif (unlikely(is_sgl)) {\n+\t\t/* sgl */\n+\t\tint i = n_dst_vecs ? n_dst_vecs : n_src_vecs;\n+\t\tuint32_t remaining_off = data_len - ofs.ofs.auth.tail;\n+\n+\t\twhile (remaining_off >= cvec->len && i >= 1) {\n+\t\t\ti--;\n+\t\t\tremaining_off -= cvec->len;\n+\t\t\tcvec++;\n+\t\t}\n+\n+\t\tauth_iova_end = cvec->iova + remaining_off;\n+\t} else\n+\t\tauth_iova_end = cvec[0].iova + auth_param->auth_off +\n+\t\t\tauth_param->auth_len;\n+\n+\t/* Then check if digest-encrypted conditions are met */\n+\tif ((auth_param->auth_off + auth_param->auth_len <\n+\t\tcipher_param->cipher_offset + cipher_param->cipher_length) &&\n+\t\t\t(digest->iova == auth_iova_end)) {\n+\t\t/* Handle partial digest encryption */\n+\t\tif (cipher_param->cipher_offset + cipher_param->cipher_length <\n+\t\t\tauth_param->auth_off + auth_param->auth_len +\n+\t\t\t\tctx->digest_length && !is_sgl)\n+\t\t\treq->comn_mid.dst_length = req->comn_mid.src_length =\n+\t\t\t\tauth_param->auth_off + auth_param->auth_len +\n+\t\t\t\t\tctx->digest_length;\n+\t\tstruct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;\n+\t\tICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,\n+\t\t\tICP_QAT_FW_LA_DIGEST_IN_BUFFER);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline void\n+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,\n+\tstruct icp_qat_fw_la_bulk_req *req,\n+\tstruct rte_crypto_va_iova_ptr *iv,\n+\tstruct rte_crypto_va_iova_ptr *digest,\n+\tstruct rte_crypto_va_iova_ptr *aad,\n+\tunion rte_crypto_sym_ofs ofs, uint32_t data_len)\n+{\n+\tstruct icp_qat_fw_la_cipher_req_params *cipher_param =\n+\t\t(void *)&req->serv_specif_rqpars;\n+\tstruct icp_qat_fw_la_auth_req_params *auth_param =\n+\t\t(void *)((uint8_t *)&req->serv_specif_rqpars +\n+\t\tICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);\n+\tuint8_t *aad_data;\n+\tuint8_t aad_ccm_real_len;\n+\tuint8_t aad_len_field_sz;\n+\tuint32_t msg_len_be;\n+\trte_iova_t aad_iova = 0;\n+\tuint8_t q;\n+\n+\tswitch (ctx->qat_hash_alg) {\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_128:\n+\tcase ICP_QAT_HW_AUTH_ALGO_GALOIS_64:\n+\t\tICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(\n+\t\t\treq->comn_hdr.serv_specif_flags,\n+\t\t\t\tICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);\n+\t\trte_memcpy(cipher_param->u.cipher_IV_array, iv->va,\n+\t\t\t\tctx->cipher_iv.length);\n+\t\taad_iova = aad->iova;\n+\t\tbreak;\n+\tcase ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:\n+\t\taad_data = aad->va;\n+\t\taad_iova = aad->iova;\n+\t\taad_ccm_real_len = 0;\n+\t\taad_len_field_sz = 0;\n+\t\tmsg_len_be = rte_bswap32((uint32_t)data_len -\n+\t\t\t\tofs.ofs.cipher.head);\n+\n+\t\tif (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {\n+\t\t\taad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;\n+\t\t\taad_ccm_real_len = ctx->aad_len -\n+\t\t\t\tICP_QAT_HW_CCM_AAD_B0_LEN -\n+\t\t\t\tICP_QAT_HW_CCM_AAD_LEN_INFO;\n+\t\t} else {\n+\t\t\taad_data = iv->va;\n+\t\t\taad_iova = iv->iova;\n+\t\t}\n+\n+\t\tq = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;\n+\t\taad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(\n+\t\t\taad_len_field_sz, ctx->digest_length, q);\n+\t\tif (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {\n+\t\t\tmemcpy(aad_data\t+ ctx->cipher_iv.length +\n+\t\t\t\tICP_QAT_HW_CCM_NONCE_OFFSET + (q -\n+\t\t\t\tICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),\n+\t\t\t\t(uint8_t *)&msg_len_be,\n+\t\t\t\tICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);\n+\t\t} else {\n+\t\t\tmemcpy(aad_data\t+ ctx->cipher_iv.length +\n+\t\t\t\tICP_QAT_HW_CCM_NONCE_OFFSET,\n+\t\t\t\t(uint8_t *)&msg_len_be +\n+\t\t\t\t(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE\n+\t\t\t\t- q), q);\n+\t\t}\n+\n+\t\tif (aad_len_field_sz > 0) {\n+\t\t\t*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =\n+\t\t\t\trte_bswap16(aad_ccm_real_len);\n+\n+\t\t\tif ((aad_ccm_real_len + aad_len_field_sz)\n+\t\t\t\t% ICP_QAT_HW_CCM_AAD_B0_LEN) {\n+\t\t\t\tuint8_t pad_len = 0;\n+\t\t\t\tuint8_t pad_idx = 0;\n+\n+\t\t\t\tpad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -\n+\t\t\t\t\t((aad_ccm_real_len +\n+\t\t\t\t\taad_len_field_sz) %\n+\t\t\t\t\tICP_QAT_HW_CCM_AAD_B0_LEN);\n+\t\t\t\tpad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +\n+\t\t\t\t\taad_ccm_real_len +\n+\t\t\t\t\taad_len_field_sz;\n+\t\t\t\tmemset(&aad_data[pad_idx], 0, pad_len);\n+\t\t\t}\n+\t\t}\n+\n+\t\trte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)\n+\t\t\t+ ICP_QAT_HW_CCM_NONCE_OFFSET,\n+\t\t\t(uint8_t *)iv->va +\n+\t\t\tICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);\n+\t\t*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =\n+\t\t\tq - ICP_QAT_HW_CCM_NONCE_OFFSET;\n+\n+\t\trte_memcpy((uint8_t *)aad->va +\n+\t\t\t\tICP_QAT_HW_CCM_NONCE_OFFSET,\n+\t\t\t(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,\n+\t\t\tctx->cipher_iv.length);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tcipher_param->cipher_offset = ofs.ofs.cipher.head;\n+\tcipher_param->cipher_length = data_len - ofs.ofs.cipher.head -\n+\t\t\tofs.ofs.cipher.tail;\n+\tauth_param->auth_off = ofs.ofs.cipher.head;\n+\tauth_param->auth_len = cipher_param->cipher_length;\n+\tauth_param->auth_res_addr = digest->iova;\n+\tauth_param->u1.aad_adr = aad_iova;\n+}\n \n extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;\n extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;\n \n+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */\n+int\n+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,\n+\tuint8_t *out_msg, void *op_cookie);\n+\n+int\n+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie);\n+\n+int\n+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie);\n+\n+int\n+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie);\n+\n /* -----------------GENx control path APIs ---------------- */\n uint64_t\n qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);\n \n+int\n+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);\n+\n void\n qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,\n \t\tuint8_t hash_flag);\ndiff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c\nindex 90b3ec803c..c429825a67 100644\n--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c\n+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017-2021 Intel Corporation\n+ * Copyright(c) 2017-2022 Intel Corporation\n  */\n \n #include <rte_cryptodev.h>\n@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(\n \treturn feature_flags;\n }\n \n+int\n+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie)\n+{\n+\tregister struct icp_qat_fw_la_bulk_req *req;\n+\tstruct rte_crypto_op *op = in_op;\n+\tstruct qat_sym_op_cookie *cookie = op_cookie;\n+\tstruct rte_crypto_sgl in_sgl, out_sgl;\n+\tstruct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],\n+\t\t\tout_vec[QAT_SYM_SGL_MAX_NUMBER];\n+\tstruct rte_crypto_va_iova_ptr cipher_iv;\n+\tunion rte_crypto_sym_ofs ofs;\n+\tint32_t total_len;\n+\n+\tin_sgl.vec = in_vec;\n+\tout_sgl.vec = out_vec;\n+\n+\treq = (struct icp_qat_fw_la_bulk_req *)out_msg;\n+\trte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));\n+\n+\tofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,\n+\t\t\t&cipher_iv, NULL, NULL);\n+\tif (unlikely(ofs.raw == UINT64_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttotal_len = qat_sym_build_req_set_data(req, in_op, cookie,\n+\t\t\tin_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);\n+\tif (unlikely(total_len < 0)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tenqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tqat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,\n+\t\t\tNULL, NULL, NULL);\n+#endif\n+\n+\treturn 0;\n+}\n+\n+int\n+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie)\n+{\n+\tregister struct icp_qat_fw_la_bulk_req *req;\n+\tstruct rte_crypto_op *op = in_op;\n+\tstruct qat_sym_op_cookie *cookie = op_cookie;\n+\tstruct rte_crypto_sgl in_sgl, out_sgl;\n+\tstruct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],\n+\t\t\tout_vec[QAT_SYM_SGL_MAX_NUMBER];\n+\tstruct rte_crypto_va_iova_ptr auth_iv;\n+\tstruct rte_crypto_va_iova_ptr digest;\n+\tunion rte_crypto_sym_ofs ofs;\n+\tint32_t total_len;\n+\n+\tin_sgl.vec = in_vec;\n+\tout_sgl.vec = out_vec;\n+\n+\treq = (struct icp_qat_fw_la_bulk_req *)out_msg;\n+\trte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));\n+\n+\tofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,\n+\t\t\tNULL, &auth_iv, &digest);\n+\tif (unlikely(ofs.raw == UINT64_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttotal_len = qat_sym_build_req_set_data(req, in_op, cookie,\n+\t\t\tin_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);\n+\tif (unlikely(total_len < 0)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tenqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,\n+\t\t\ttotal_len);\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tqat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,\n+\t\t\t&auth_iv, NULL, &digest);\n+#endif\n+\n+\treturn 0;\n+}\n+\n+int\n+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie)\n+{\n+\tregister struct icp_qat_fw_la_bulk_req *req;\n+\tstruct rte_crypto_op *op = in_op;\n+\tstruct qat_sym_op_cookie *cookie = op_cookie;\n+\tstruct rte_crypto_sgl in_sgl, out_sgl;\n+\tstruct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],\n+\t\t\tout_vec[QAT_SYM_SGL_MAX_NUMBER];\n+\tstruct rte_crypto_va_iova_ptr cipher_iv;\n+\tstruct rte_crypto_va_iova_ptr aad;\n+\tstruct rte_crypto_va_iova_ptr digest;\n+\tunion rte_crypto_sym_ofs ofs;\n+\tint32_t total_len;\n+\n+\tin_sgl.vec = in_vec;\n+\tout_sgl.vec = out_vec;\n+\n+\treq = (struct icp_qat_fw_la_bulk_req *)out_msg;\n+\trte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));\n+\n+\tofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,\n+\t\t\t&cipher_iv, &aad, &digest);\n+\tif (unlikely(ofs.raw == UINT64_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttotal_len = qat_sym_build_req_set_data(req, in_op, cookie,\n+\t\t\tin_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);\n+\tif (unlikely(total_len < 0)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tenqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,\n+\t\ttotal_len);\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tqat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,\n+\t\t\tNULL, &aad, &digest);\n+#endif\n+\n+\treturn 0;\n+}\n+\n+int\n+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,\n+\t\tuint8_t *out_msg, void *op_cookie)\n+{\n+\tregister struct icp_qat_fw_la_bulk_req *req;\n+\tstruct rte_crypto_op *op = in_op;\n+\tstruct qat_sym_op_cookie *cookie = op_cookie;\n+\tstruct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};\n+\tstruct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],\n+\t\t\tout_vec[QAT_SYM_SGL_MAX_NUMBER];\n+\tstruct rte_crypto_va_iova_ptr cipher_iv;\n+\tstruct rte_crypto_va_iova_ptr auth_iv;\n+\tstruct rte_crypto_va_iova_ptr digest;\n+\tunion rte_crypto_sym_ofs ofs;\n+\tint32_t total_len;\n+\n+\tin_sgl.vec = in_vec;\n+\tout_sgl.vec = out_vec;\n+\n+\treq = (struct icp_qat_fw_la_bulk_req *)out_msg;\n+\trte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));\n+\n+\tofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,\n+\t\t\t&cipher_iv, &auth_iv, &digest);\n+\tif (unlikely(ofs.raw == UINT64_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttotal_len = qat_sym_build_req_set_data(req, in_op, cookie,\n+\t\t\tin_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);\n+\tif (unlikely(total_len < 0)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tenqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,\n+\t\t\tout_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,\n+\t\t\tofs, total_len);\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tqat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,\n+\t\t\t&auth_iv, &digest);\n+#endif\n+\n+\treturn 0;\n+}\n+\n #ifdef RTE_LIB_SECURITY\n \n #define QAT_SECURITY_SYM_CAPABILITIES\t\t\t\t\t\\\ndiff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c\nindex 00ec703754..f814bf8f75 100644\n--- a/drivers/crypto/qat/qat_sym.c\n+++ b/drivers/crypto/qat/qat_sym.c\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2015-2019 Intel Corporation\n+ * Copyright(c) 2015-2022 Intel Corporation\n  */\n \n #include <openssl/evp.h>\n@@ -11,93 +11,7 @@\n #include <rte_byteorder.h>\n \n #include \"qat_sym.h\"\n-\n-\n-/** Decrypt a single partial block\n- *  Depends on openssl libcrypto\n- *  Uses ECB+XOR to do CFB encryption, same result, more performant\n- */\n-static inline int\n-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,\n-\t\tuint8_t *iv, int ivlen, int srclen,\n-\t\tvoid *bpi_ctx)\n-{\n-\tEVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;\n-\tint encrypted_ivlen;\n-\tuint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];\n-\tuint8_t *encr = encrypted_iv;\n-\n-\t/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */\n-\tif (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)\n-\t\t\t\t\t\t\t\t<= 0)\n-\t\tgoto cipher_decrypt_err;\n-\n-\tfor (; srclen != 0; --srclen, ++dst, ++src, ++encr)\n-\t\t*dst = *src ^ *encr;\n-\n-\treturn 0;\n-\n-cipher_decrypt_err:\n-\tQAT_DP_LOG(ERR, \"libcrypto ECB cipher decrypt for BPI IV failed\");\n-\treturn -EINVAL;\n-}\n-\n-\n-static inline uint32_t\n-qat_bpicipher_preprocess(struct qat_sym_session *ctx,\n-\t\t\t\tstruct rte_crypto_op *op)\n-{\n-\tint block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);\n-\tstruct rte_crypto_sym_op *sym_op = op->sym;\n-\tuint8_t last_block_len = block_len > 0 ?\n-\t\t\tsym_op->cipher.data.length % block_len : 0;\n-\n-\tif (last_block_len &&\n-\t\t\tctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {\n-\n-\t\t/* Decrypt last block */\n-\t\tuint8_t *last_block, *dst, *iv;\n-\t\tuint32_t last_block_offset = sym_op->cipher.data.offset +\n-\t\t\t\tsym_op->cipher.data.length - last_block_len;\n-\t\tlast_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,\n-\t\t\t\tuint8_t *, last_block_offset);\n-\n-\t\tif (unlikely((sym_op->m_dst != NULL)\n-\t\t\t\t&& (sym_op->m_dst != sym_op->m_src)))\n-\t\t\t/* out-of-place operation (OOP) */\n-\t\t\tdst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,\n-\t\t\t\t\t\tuint8_t *, last_block_offset);\n-\t\telse\n-\t\t\tdst = last_block;\n-\n-\t\tif (last_block_len < sym_op->cipher.data.length)\n-\t\t\t/* use previous block ciphertext as IV */\n-\t\t\tiv = last_block - block_len;\n-\t\telse\n-\t\t\t/* runt block, i.e. less than one full block */\n-\t\t\tiv = rte_crypto_op_ctod_offset(op, uint8_t *,\n-\t\t\t\t\tctx->cipher_iv.offset);\n-\n-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n-\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: src before pre-process:\",\n-\t\t\tlast_block, last_block_len);\n-\t\tif (sym_op->m_dst != NULL)\n-\t\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI:dst before pre-process:\",\n-\t\t\tdst, last_block_len);\n-#endif\n-\t\tbpi_cipher_decrypt(last_block, dst, iv, block_len,\n-\t\t\t\tlast_block_len, ctx->bpi_ctx);\n-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n-\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: src after pre-process:\",\n-\t\t\tlast_block, last_block_len);\n-\t\tif (sym_op->m_dst != NULL)\n-\t\t\tQAT_DP_HEXDUMP_LOG(DEBUG, \"BPI: dst after pre-process:\",\n-\t\t\tdst, last_block_len);\n-#endif\n-\t}\n-\n-\treturn sym_op->cipher.data.length - last_block_len;\n-}\n+#include \"dev/qat_crypto_pmd_gens.h\"\n \n static inline void\n set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,\n",
    "prefixes": [
        "v9",
        "2/9"
    ]
}