From patchwork Fri Apr 21 13:12:13 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126389 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8AF6D429AD; Fri, 21 Apr 2023 15:12:36 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E41B142B71; Fri, 21 Apr 2023 15:12:33 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 1DE40410F3 for ; Fri, 21 Apr 2023 15:12:29 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082750; x=1713618750; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=hNU8gxx4eseAH7M1/hsKxagrAYuaDyQa/zvh2Fkuui8=; b=gt+1D9W27rVXrdS6SjSGeTlw9ajJhPBEw9iFJuueaMOigFXK7R2pH+V1 qGRGL0iNwfEsUUeOKBU0477TKRWSA6lbJoK/jcyrMh9osC3W2EVNi0ihh 6yTFac36jUP4V2wC4st4KrQwxxB6mGs9EXIZc/iL5iHEJprI+IZcG60/I SYHToaz9PIbwoxXRCvrKoZZpQjDsvorCwusiLkqDQryPOHWeW7SewtFUk 4+zapWP1AF5Z2RnjehRTzHpbdUavHP5QOSUl5dVDHmzXCiqzCcuEMZaWd Ckep/tkEaLU8cP2Vh9xXKbM3t7jW/ZQel0Pi4Lmg8gkvySe1fV0JaksjF w==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927699" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927699" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817336" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817336" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:28 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 1/8] crypto/ipsec_mb: use GMAC dedicated algorithms Date: Fri, 21 Apr 2023 13:12:13 +0000 Message-Id: <20230421131221.1732314-2-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara AES-GMAC can be done with auth-only enums IMB_AES_GMAC_128/192/256, which allows another cipher algorithm to be used, instead of being part of AES-GCM. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 104 +++++++++++-------------- 1 file changed, 47 insertions(+), 57 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index ac20d01937..c53548aa3b 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -57,8 +57,7 @@ is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) { return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == IMB_AUTH_AES_CCM || - (hash_alg == IMB_AUTH_AES_GMAC && - cipher_mode == IMB_CIPHER_GCM)); + cipher_mode == IMB_CIPHER_GCM); } /** Set session authentication parameters */ @@ -155,7 +154,6 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } else sess->cipher.direction = IMB_DIR_DECRYPT; - sess->auth.algo = IMB_AUTH_AES_GMAC; if (sess->auth.req_digest_len > get_digest_byte_length(IMB_AUTH_AES_GMAC)) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); @@ -167,16 +165,19 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, switch (xform->auth.key.length) { case IMB_KEY_128_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_128; IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; break; case IMB_KEY_192_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_192; IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; break; case IMB_KEY_256_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_256; IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; @@ -1039,19 +1040,20 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->u.GCM.aad = aad->va; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - } else { - /* For GMAC */ - job->u.GCM.aad = buf; - job->u.GCM.aad_len_in_bytes = len; - job->cipher_mode = IMB_CIPHER_GCM; - } + job->u.GCM.aad = aad->va; + job->u.GCM.aad_len_in_bytes = session->aead.aad_len; job->enc_keys = &session->cipher.gcm_key; job->dec_keys = &session->cipher.gcm_key; break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->u.GMAC._key = &session->cipher.gcm_key; + job->u.GMAC._iv = iv->va; + job->u.GMAC.iv_len_in_bytes = session->iv.length; + break; + case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = aad->va; job->u.CHACHA20_POLY1305.aad_len_in_bytes = @@ -1091,16 +1093,10 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; - if (job->hash_alg == IMB_AUTH_AES_GMAC && - session->cipher.mode != IMB_CIPHER_GCM) { - job->msg_len_to_hash_in_bytes = 0; - job->msg_len_to_cipher_in_bytes = 0; - } else { - job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - - sofs.ofs.auth.tail; - job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - - sofs.ofs.cipher.tail; - } + job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - + sofs.ofs.auth.tail; + job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - + sofs.ofs.cipher.tail; job->user_data = udata; } @@ -1184,8 +1180,6 @@ sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len) job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN) *auth_len = (job->msg_len_to_hash_in_bits >> 3) + job->hash_start_src_offset_in_bytes; - else if (job->hash_alg == IMB_AUTH_AES_GMAC) - *auth_len = job->u.GCM.aad_len_in_bytes; else *auth_len = job->msg_len_to_hash_in_bytes + job->hash_start_src_offset_in_bytes; @@ -1352,24 +1346,24 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->u.GCM.aad = op->sym->aead.aad.data; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - if (sgl) { - job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; - job->cipher_mode = IMB_CIPHER_GCM_SGL; - job->hash_alg = IMB_AUTH_GCM_SGL; - } - } else { - /* For GMAC */ - job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src, - uint8_t *, op->sym->auth.data.offset); - job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length; - job->cipher_mode = IMB_CIPHER_GCM; + job->u.GCM.aad = op->sym->aead.aad.data; + job->u.GCM.aad_len_in_bytes = session->aead.aad_len; + if (sgl) { + job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; + job->cipher_mode = IMB_CIPHER_GCM_SGL; + job->hash_alg = IMB_AUTH_GCM_SGL; } job->enc_keys = &session->cipher.gcm_key; job->dec_keys = &session->cipher.gcm_key; break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->u.GMAC._key = &session->cipher.gcm_key; + job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->auth_iv.offset); + job->u.GMAC.iv_len_in_bytes = session->auth_iv.length; + break; case IMB_AUTH_ZUC_EIA3_BITLEN: case IMB_AUTH_ZUC256_EIA3_BITLEN: job->u.ZUC_EIA3._key = session->auth.zuc_auth_key; @@ -1472,19 +1466,21 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->hash_start_src_offset_in_bytes = - op->sym->aead.data.offset; - job->msg_len_to_hash_in_bytes = - op->sym->aead.data.length; - } else { /* AES-GMAC only, only AAD used */ - job->msg_len_to_hash_in_bytes = 0; - job->hash_start_src_offset_in_bytes = 0; - } - + job->hash_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->aead.data.length; job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->hash_start_src_offset_in_bytes = + op->sym->auth.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->auth.data.length; + break; case IMB_AUTH_GCM_SGL: case IMB_AUTH_CHACHA20_POLY1305_SGL: @@ -1567,15 +1563,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, op->sym->cipher.data.length; break; case IMB_CIPHER_GCM: - if (session->cipher.mode == IMB_CIPHER_NULL) { - /* AES-GMAC only (only AAD used) */ - job->msg_len_to_cipher_in_bytes = 0; - job->cipher_start_src_offset_in_bytes = 0; - } else { - job->cipher_start_src_offset_in_bytes = - op->sym->aead.data.offset; - job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; - } + job->cipher_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; break; case IMB_CIPHER_CCM: case IMB_CIPHER_CHACHA20_POLY1305: From patchwork Fri Apr 21 13:12:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126390 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BEFCD429AD; Fri, 21 Apr 2023 15:12:43 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0EFF942D0B; Fri, 21 Apr 2023 15:12:35 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id BDD394114B for ; Fri, 21 Apr 2023 15:12:31 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082752; x=1713618752; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=bpfEtkW9RAKKGKZGWuUmt3vz7R8QH+LLew6CWJvmbak=; b=SWaEHekLdT+2RMDabQ4QMcb20mWibC9DWz44q7GgtZbvo8cx1TGSBqpX n9J/hoS7nbp7ZkII4ohMZT4Fdm5HpU5gwCUJLOWFtVYZeqxbki999rpFe pVuzRGjnhM4zvv7PlkyjaJDNxIWhB1c7a9bc1iAfLgvmKO4c9xIFGXnE4 dqsWxguFhQeFU7gxF2dxG36EhPzQJ8N4BVYp2ZSOKFRVM330shzPALFkP 5UzfRQhHpk5XnEo0rzIOrtwhKGlrhmslJyCVyKQsqhMnad3MIgUUoLXJy jsD83giih+F+mx9vNPdWbnN8OP1uTuJqsec8sk1AxJPb7x2/35z+Yx45U w==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927704" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927704" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817346" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817346" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:29 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Marcel Cornu , Pablo de Lara , Ciara Power Subject: [PATCH 2/8] crypto/ipsec_mb: use burst API in aesni_mb Date: Fri, 21 Apr 2023 13:12:14 +0000 Message-Id: <20230421131221.1732314-3-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Marcel Cornu Use new ipsec_mb burst API in dequeue burst function, when ipsec_mb version is v1.3 or newer. Signed-off-by: Marcel Cornu Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 133 ++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index c53548aa3b..5789b82d8e 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -9,6 +9,10 @@ struct aesni_mb_op_buf_data { uint32_t offset; }; +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL}; +#endif + /** * Calculate the authentication pre-computes * @@ -1974,6 +1978,133 @@ set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) return job; } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static uint16_t +aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ipsec_mb_qp *qp = queue_pair; + IMB_MGR *mb_mgr = qp->mb_mgr; + struct rte_crypto_op *op; + struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE]; + IMB_JOB *job; + int retval, processed_jobs = 0; + uint16_t i, nb_jobs; + + if (unlikely(nb_ops == 0 || mb_mgr == NULL)) + return 0; + + uint8_t digest_idx = qp->digest_idx; + uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ? + IMB_MAX_BURST_SIZE : nb_ops; + + /* + * If nb_ops is greater than the max supported + * ipsec_mb burst size, then process in bursts of + * IMB_MAX_BURST_SIZE until all operations are submitted + */ + while (nb_ops) { + uint16_t nb_submit_ops; + uint16_t n = (nb_ops / burst_sz) ? + burst_sz : nb_ops; + + while (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) { + /* + * Not enough free jobs in the queue + * Flush n jobs until enough jobs available + */ + nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + } + + /* + * Get the next operations to process from ingress queue. + * There is no need to return the job to the IMB_MGR + * if there are no more operations to process, since + * the IMB_MGR can use that pointer again in next + * get_next calls. + */ + nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue, + (void **)deqd_ops, n, NULL); + for (i = 0; i < nb_submit_ops; i++) { + job = jobs[i]; + op = deqd_ops[i]; + +#ifdef AESNI_MB_DOCSIS_SEC_ENABLED + if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) + retval = set_sec_mb_job_params(job, qp, op, + &digest_idx); + else +#endif + retval = set_mb_job_params(job, qp, op, + &digest_idx, mb_mgr); + + if (unlikely(retval != 0)) { + qp->stats.dequeue_err_count++; + set_job_null_op(job, op); + } + } + + /* Submit jobs to multi-buffer for processing */ +#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG + int err = 0; + + nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs); + err = imb_get_errno(mb_mgr); + if (err) + IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err)); +#else + nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr, + nb_submit_ops, jobs); +#endif + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + + qp->digest_idx = digest_idx; + + if (processed_jobs < 1) { + nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); + + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + } + nb_ops -= n; + } + + return processed_jobs; +} +#else static uint16_t aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -2054,7 +2185,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, return processed_jobs; } - +#endif static inline int check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) { From patchwork Fri Apr 21 13:12:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126391 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 23863429AD; Fri, 21 Apr 2023 15:12:50 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 29E0542D12; Fri, 21 Apr 2023 15:12:36 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 665624114B for ; Fri, 21 Apr 2023 15:12:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082753; x=1713618753; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=RThlGkv0ELtY8+ujtaZFcNbjwhVEsiQI1hottTimYGY=; b=fsAufMQ5+1Xahu/ZpuUcgDjQ7C4V0VnVRF9JpNegX/kI7qyDkHD9wPYI N24tSofYnYMMxkBwDLmXFMP5grrYy7CHOnH25QGEo4fzvUK4eSb8DZw9B Q25XSmkgVTCdjQwzC11IFjM0zEgw6qtXhhvFtxE1wdBDJs6MiZFuoURxB GyA0KtQgjh3YF7E8RjFPQc/xGKZeSthAM8dyIqEb8AEdGz4HsxGG2sFHZ T2WB9VpDZMrsI7FKsmaDO2CGOfWxCGAfoZStm+f7iW4xzrxAZyinOvxo0 TtWiM3AIg5AZ7IHGc9xrBVxSvyRB++85Www4Eab/TsyoEDPBrpS14L8/1 g==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927707" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927707" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817353" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817353" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:31 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 3/8] crypto/ipsec_mb: use new SGL API Date: Fri, 21 Apr 2023 13:12:15 +0000 Message-Id: <20230421131221.1732314-4-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Use new SGL API available from IPSec Multi-buffer v1.3, where only one function call is required to submit all segments to be processed in an SGL scenario. Instead of having one call per segment, there is only one call per buffer. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 187 +++++++++++++++----- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 7 + 2 files changed, 153 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 5789b82d8e..3ebbade8ca 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1241,6 +1241,141 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) return 0; } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static inline int +single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) +{ + uint32_t num_segs = 0; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + job->sgl_state = IMB_SGL_ALL; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + total_len = op->sym->aead.data.length; + + while (total_len != 0) { + uint32_t data_len, part_len; + + if (src_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid source buffer"); + return -EINVAL; + } + + data_len = src_sgl.m->data_len - src_sgl.offset; + + sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + + if (dst_sgl.m != NULL) { + if (dst_sgl.m->data_len - dst_sgl.offset == 0) { + dst_sgl.m = dst_sgl.m->next; + if (dst_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid destination buffer"); + return -EINVAL; + } + dst_sgl.offset = 0; + } + part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - + dst_sgl.offset)); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, + uint8_t *, dst_sgl.offset); + dst_sgl.offset += part_len; + } else { + part_len = RTE_MIN(data_len, total_len); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + } + + sgl_segs[num_segs].len = part_len; + + total_len -= part_len; + + if (part_len != data_len) { + src_sgl.offset += part_len; + } else { + src_sgl.m = src_sgl.m->next; + src_sgl.offset = 0; + } + num_segs++; + } + job->num_sgl_io_segs = num_segs; + job->sgl_io_segs = sgl_segs; + return 0; +} +#endif + +static inline int +multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) +{ + int ret; + IMB_JOB base_job; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + base_job = *job; + job->sgl_state = IMB_SGL_INIT; + job = IMB_SUBMIT_JOB(mb_mgr); + total_len = op->sym->aead.data.length; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + + while (job->sgl_state != IMB_SGL_COMPLETE) { + job = IMB_GET_NEXT_JOB(mb_mgr); + *job = base_job; + ret = handle_aead_sgl_job(job, mb_mgr, &total_len, + &src_sgl, &dst_sgl); + if (ret < 0) + return ret; + } + return 0; +} /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1262,19 +1397,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); - struct aesni_mb_op_buf_data src_sgl = {0}; - struct aesni_mb_op_buf_data dst_sgl = {0}; struct aesni_mb_session *session; - uint32_t m_offset, oop; + uint32_t m_offset; + int oop; uint32_t auth_off_in_bytes; uint32_t ciph_off_in_bytes; uint32_t auth_len_in_bytes; uint32_t ciph_len_in_bytes; - uint32_t total_len; - IMB_JOB base_job; uint8_t sgl = 0; uint8_t lb_sgl = 0; - int ret; session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { @@ -1602,41 +1733,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (lb_sgl) return handle_sgl_linear(job, op, m_offset, session); - base_job = *job; - job->sgl_state = IMB_SGL_INIT; - job = IMB_SUBMIT_JOB(mb_mgr); - total_len = op->sym->aead.data.length; - - src_sgl.m = m_src; - src_sgl.offset = m_offset; - - while (src_sgl.offset >= src_sgl.m->data_len) { - src_sgl.offset -= src_sgl.m->data_len; - src_sgl.m = src_sgl.m->next; - - RTE_ASSERT(src_sgl.m != NULL); - } - - if (oop) { - dst_sgl.m = m_dst; - dst_sgl.offset = m_offset; - - while (dst_sgl.offset >= dst_sgl.m->data_len) { - dst_sgl.offset -= dst_sgl.m->data_len; - dst_sgl.m = dst_sgl.m->next; - - RTE_ASSERT(dst_sgl.m != NULL); - } - } - - while (job->sgl_state != IMB_SGL_COMPLETE) { - job = IMB_GET_NEXT_JOB(mb_mgr); - *job = base_job; - ret = handle_aead_sgl_job(job, mb_mgr, &total_len, - &src_sgl, &dst_sgl); - if (ret < 0) - return ret; - } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (m_src->nb_segs <= MAX_NUM_SEGS) + return single_sgl_job(job, op, oop, + m_offset, m_src, m_dst, + qp_data->sgl_segs); + else +#endif + return multi_sgl_job(job, op, oop, + m_offset, m_src, m_dst, mb_mgr); } return 0; diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 8a7c74f621..e17b53e4fe 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -20,6 +20,10 @@ #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +#define MAX_NUM_SEGS 16 +#endif + static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { { /* MD5 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, @@ -729,6 +733,9 @@ struct aesni_mb_qp_data { * by the driver when verifying a digest provided * by the user (using authentication verify operation) */ +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS]; +#endif union { struct gcm_context_data gcm_sgl_ctx; struct chacha20_poly1305_context_data chacha_sgl_ctx; From patchwork Fri Apr 21 13:12:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126392 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2B379429AD; Fri, 21 Apr 2023 15:12:59 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0792D42D33; Fri, 21 Apr 2023 15:12:38 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 668A44114B for ; Fri, 21 Apr 2023 15:12:34 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082754; x=1713618754; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=5ITM7j6HFfzyx4TNh8l2VJIUh7qbIXRVU5oto1UULM0=; b=TTcLPVnbkP/UsjjK2il1RYKFkqlBDXNXiGKpky6crNbU6OeKrk6OSD9T pITfLoN+U6g1G2H2RdiUfur3zAjKe2eimPsJ8z9hcB2x/YlMkh7gDZT91 4i5tSgWqB+m+UqTrMizq/Syjz7s4N+cKwB8Tjnx3fG6XxI/EJlD3A6/IY XMiLDSha1n/9KgW40MrDqa8t3uG/UIFKvI/xO05tg/j43lOfZTotJDnNg WgoTqBYfOQPPAmC6c3UsKYx50uc9rOVScCCXvnun++Fv/kVCwjEOxYWHb VUxiwF1jPbHchd+1jQ8MLRmEEKluhGrUf0VqKQI8qMh5j4hjvoUVOoytL g==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927710" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927710" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817362" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817362" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:33 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara Subject: [PATCH 4/8] crypto/ipsec_mb: remove unneeded fields in crypto session Date: Fri, 21 Apr 2023 13:12:16 +0000 Message-Id: <20230421131221.1732314-5-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Cipher direction, cipher mode and hash algorithm are duplicated in crypto session. Signed-off-by: Pablo de Lara --- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index e17b53e4fe..3cf44f8bc4 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -852,9 +852,6 @@ get_digest_byte_length(IMB_HASH_ALG algo) /** AES-NI multi-buffer private session structure */ struct aesni_mb_session { - IMB_CIPHER_MODE cipher_mode; - IMB_CIPHER_DIRECTION cipher_direction; - IMB_HASH_ALG hash_alg; IMB_CHAIN_ORDER chain_order; /* common job fields */ struct { From patchwork Fri Apr 21 13:12:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126393 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2D3FD429AD; Fri, 21 Apr 2023 15:13:06 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1A2D642D38; Fri, 21 Apr 2023 15:12:39 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id E336E42D17 for ; Fri, 21 Apr 2023 15:12:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082757; x=1713618757; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=dnlJ/8S4kkwz/pPb4P9ZiCU5kpo8YWkKOLdTGVf1Hd4=; b=UJ7yYpKXa3C1DqVAB9BHofsTfclfjkmy2hYehgzVQO/faVIUcCstkEfE g+ylKvR5ke6DUm9/xC/QoKxJU5e2jmbdj1ziODv6tpQWP31H+2kFQGyM4 UWNaK8k+PxGywvNRzc9GSPc/tqkEvs1W7tIAs0BTi/WfMjX0FPMyeGxqQ niX0v8kyYoBr0a+i8LtbEAXFqtOc7s45egNtGEbgHhaa0IpNsF8mz9xA8 iTZZtWF6DvNCeVGJ8ZePY47LNsT4t9306rx0TqsSyWCy/47FhnBdHQh9C 7rMbyU6e5TOyOnHMHD/DsF00k0DoPsA2hZ8X5yAU8cExAbIv/uG5oXBsJ A==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927717" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927717" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817378" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817378" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:34 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 5/8] crypto/ipsec_mb: store template job Date: Fri, 21 Apr 2023 13:12:17 +0000 Message-Id: <20230421131221.1732314-6-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Store template IMB_JOB in session that will have filled all session-related fields. These fields include cipher direction, chain order, cipher mode, hash algorithm, key length, IV lengths, AAD length, digest length, and key pointers. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 403 ++++++++------------ drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 20 +- 2 files changed, 159 insertions(+), 264 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 3ebbade8ca..8ccdd2ad2e 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -76,7 +76,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, uint32_t auth_precompute = 1; if (xform == NULL) { - sess->auth.algo = IMB_AUTH_NULL; + sess->template_job.hash_alg = IMB_AUTH_NULL; return 0; } @@ -87,7 +87,6 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->auth_iv.offset = xform->auth.iv.offset; - sess->auth_iv.length = xform->auth.iv.length; /* Set the request digest size */ sess->auth.req_digest_len = xform->auth.digest_length; @@ -97,13 +96,13 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, /* Set Authentication Parameters */ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) { - sess->auth.algo = IMB_AUTH_NULL; - sess->auth.gen_digest_len = 0; + sess->template_job.hash_alg = IMB_AUTH_NULL; + sess->template_job.auth_tag_output_len_in_bytes = 0; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { - sess->auth.algo = IMB_AUTH_AES_XCBC; + sess->template_job.hash_alg = IMB_AUTH_AES_XCBC; uint16_t xcbc_mac_digest_len = get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); @@ -111,18 +110,21 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, sess->auth.xcbc.k1_expanded, sess->auth.xcbc.k2, sess->auth.xcbc.k3); + sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded; + sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2; + sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { uint32_t dust[4*15]; - sess->auth.algo = IMB_AUTH_AES_CMAC; + sess->template_job.hash_alg = IMB_AUTH_AES_CMAC; uint16_t cmac_digest_len = get_digest_byte_length(IMB_AUTH_AES_CMAC); @@ -140,70 +142,74 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, * the requested number of bytes. */ if (sess->auth.req_digest_len < 4) - sess->auth.gen_digest_len = cmac_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len; else - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, sess->auth.cmac.expkey, dust); IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, sess->auth.cmac.skey1, sess->auth.cmac.skey2); + sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey; + sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1; + sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { - sess->cipher.direction = IMB_DIR_ENCRYPT; - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; } else - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; if (sess->auth.req_digest_len > get_digest_byte_length(IMB_AUTH_AES_GMAC)) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; - sess->iv.length = xform->auth.iv.length; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; + sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length; sess->iv.offset = xform->auth.iv.offset; switch (xform->auth.key.length) { case IMB_KEY_128_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_128; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128; IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; break; case IMB_KEY_192_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_192; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192; IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; break; case IMB_KEY_256_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_256; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256; IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; break; default: IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); return -EINVAL; } + sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { if (xform->auth.key.length == 16) { - sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; if (sess->auth.req_digest_len != 4) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } } else if (xform->auth.key.length == 32) { - sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (sess->auth.req_digest_len != 4 && sess->auth.req_digest_len != 8 && @@ -219,13 +225,14 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, xform->auth.key.length); + sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key; return 0; } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { - sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN; uint16_t snow3g_uia2_digest_len = get_truncated_digest_byte_length( IMB_AUTH_SNOW3G_UIA2_BITLEN); @@ -233,33 +240,37 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, &sess->auth.pKeySched_snow3g_auth); + sess->template_job.u.SNOW3G_UIA2._key = (void *) + &sess->auth.pKeySched_snow3g_auth; return 0; } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { - sess->auth.algo = IMB_AUTH_KASUMI_UIA1; + sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1; uint16_t kasumi_f9_digest_len = get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); if (sess->auth.req_digest_len != kasumi_f9_digest_len) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, &sess->auth.pKeySched_kasumi_auth); + sess->template_job.u.KASUMI_UIA1._key = (void *) + &sess->auth.pKeySched_kasumi_auth; return 0; } switch (xform->auth.algo) { case RTE_CRYPTO_AUTH_MD5_HMAC: - sess->auth.algo = IMB_AUTH_MD5; + sess->template_job.hash_alg = IMB_AUTH_MD5; hash_oneblock_fn = mb_mgr->md5_one_block; break; case RTE_CRYPTO_AUTH_SHA1_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_1; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; hash_oneblock_fn = mb_mgr->sha1_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_1)) { @@ -271,11 +282,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA1: - sess->auth.algo = IMB_AUTH_SHA_1; + sess->template_job.hash_alg = IMB_AUTH_SHA_1; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA224_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_224; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; hash_oneblock_fn = mb_mgr->sha224_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_224)) { @@ -287,11 +298,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA224: - sess->auth.algo = IMB_AUTH_SHA_224; + sess->template_job.hash_alg = IMB_AUTH_SHA_224; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA256_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_256; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; hash_oneblock_fn = mb_mgr->sha256_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_256)) { @@ -303,11 +314,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA256: - sess->auth.algo = IMB_AUTH_SHA_256; + sess->template_job.hash_alg = IMB_AUTH_SHA_256; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA384_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_384; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; hash_oneblock_fn = mb_mgr->sha384_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_384)) { @@ -319,11 +330,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA384: - sess->auth.algo = IMB_AUTH_SHA_384; + sess->template_job.hash_alg = IMB_AUTH_SHA_384; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA512_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_512; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; hash_oneblock_fn = mb_mgr->sha512_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_512)) { @@ -335,7 +346,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA512: - sess->auth.algo = IMB_AUTH_SHA_512; + sess->template_job.hash_alg = IMB_AUTH_SHA_512; auth_precompute = 0; break; default: @@ -344,9 +355,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, return -ENOTSUP; } uint16_t trunc_digest_size = - get_truncated_digest_byte_length(sess->auth.algo); + get_truncated_digest_byte_length(sess->template_job.hash_alg); uint16_t full_digest_size = - get_digest_byte_length(sess->auth.algo); + get_digest_byte_length(sess->template_job.hash_alg); if (sess->auth.req_digest_len > full_digest_size || sess->auth.req_digest_len == 0) { @@ -356,9 +367,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, if (sess->auth.req_digest_len != trunc_digest_size && sess->auth.req_digest_len != full_digest_size) - sess->auth.gen_digest_len = full_digest_size; + sess->template_job.auth_tag_output_len_in_bytes = full_digest_size; else - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; /* Plain SHA does not require precompute key */ if (auth_precompute == 0) @@ -370,14 +381,18 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, sess->auth.pads.inner, sess->auth.pads.outer, hashed_key, xform->auth.key.length, - get_auth_algo_blocksize(sess->auth.algo)); + get_auth_algo_blocksize(sess->template_job.hash_alg)); } else { calculate_auth_precomputes(hash_oneblock_fn, sess->auth.pads.inner, sess->auth.pads.outer, xform->auth.key.data, xform->auth.key.length, - get_auth_algo_blocksize(sess->auth.algo)); + get_auth_algo_blocksize(sess->template_job.hash_alg)); } + sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = + sess->auth.pads.inner; + sess->template_job.u.HMAC._hashed_auth_key_xor_opad = + sess->auth.pads.outer; return 0; } @@ -396,7 +411,7 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, uint8_t is_kasumi = 0; if (xform == NULL) { - sess->cipher.mode = IMB_CIPHER_NULL; + sess->template_job.cipher_mode = IMB_CIPHER_NULL; return 0; } @@ -408,10 +423,10 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Select cipher direction */ switch (xform->cipher.op) { case RTE_CRYPTO_CIPHER_OP_ENCRYPT: - sess->cipher.direction = IMB_DIR_ENCRYPT; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; break; case RTE_CRYPTO_CIPHER_OP_DECRYPT: - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; break; default: IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); @@ -421,48 +436,48 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Select cipher mode */ switch (xform->cipher.algo) { case RTE_CRYPTO_CIPHER_AES_CBC: - sess->cipher.mode = IMB_CIPHER_CBC; + sess->template_job.cipher_mode = IMB_CIPHER_CBC; is_aes = 1; break; case RTE_CRYPTO_CIPHER_AES_CTR: - sess->cipher.mode = IMB_CIPHER_CNTR; + sess->template_job.cipher_mode = IMB_CIPHER_CNTR; is_aes = 1; break; case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: - sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI; + sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI; is_docsis = 1; break; case RTE_CRYPTO_CIPHER_DES_CBC: - sess->cipher.mode = IMB_CIPHER_DES; + sess->template_job.cipher_mode = IMB_CIPHER_DES; break; case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: - sess->cipher.mode = IMB_CIPHER_DOCSIS_DES; + sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES; break; case RTE_CRYPTO_CIPHER_3DES_CBC: - sess->cipher.mode = IMB_CIPHER_DES3; + sess->template_job.cipher_mode = IMB_CIPHER_DES3; is_3DES = 1; break; case RTE_CRYPTO_CIPHER_AES_ECB: - sess->cipher.mode = IMB_CIPHER_ECB; + sess->template_job.cipher_mode = IMB_CIPHER_ECB; is_aes = 1; break; case RTE_CRYPTO_CIPHER_ZUC_EEA3: - sess->cipher.mode = IMB_CIPHER_ZUC_EEA3; + sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3; is_zuc = 1; break; case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; + sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; is_snow3g = 1; break; case RTE_CRYPTO_CIPHER_KASUMI_F8: - sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; + sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; is_kasumi = 1; break; case RTE_CRYPTO_CIPHER_NULL: - sess->cipher.mode = IMB_CIPHER_NULL; - sess->cipher.key_length_in_bytes = 0; + sess->template_job.cipher_mode = IMB_CIPHER_NULL; + sess->template_job.key_len_in_bytes = 0; sess->iv.offset = xform->cipher.iv.offset; - sess->iv.length = xform->cipher.iv.length; + sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; return 0; default: IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); @@ -471,25 +486,25 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->iv.offset = xform->cipher.iv.offset; - sess->iv.length = xform->cipher.iv.length; + sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; /* Check key length and choose key expansion function for AES */ if (is_aes) { switch (xform->cipher.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_192_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -498,16 +513,19 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } + + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } else if (is_docsis) { switch (xform->cipher.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -516,6 +534,8 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } else if (is_3DES) { uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], sess->cipher.exp_3des_keys.key[1], @@ -559,38 +579,46 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } - sess->cipher.key_length_in_bytes = 24; + sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr; + sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr; + sess->template_job.key_len_in_bytes = 24; } else if (is_zuc) { if (xform->cipher.key.length != 16 && xform->cipher.key.length != 32) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = xform->cipher.key.length; + sess->template_job.key_len_in_bytes = xform->cipher.key.length; memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, xform->cipher.key.length); + sess->template_job.enc_keys = sess->cipher.zuc_cipher_key; + sess->template_job.dec_keys = sess->cipher.zuc_cipher_key; } else if (is_snow3g) { if (xform->cipher.key.length != 16) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 16; + sess->template_job.key_len_in_bytes = 16; IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, &sess->cipher.pKeySched_snow3g_cipher); + sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher; + sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher; } else if (is_kasumi) { if (xform->cipher.key.length != 16) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 16; + sess->template_job.key_len_in_bytes = 16; IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, &sess->cipher.pKeySched_kasumi_cipher); + sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher; + sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher; } else { if (xform->cipher.key.length != 8) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 8; + sess->template_job.key_len_in_bytes = 8; IMB_DES_KEYSCHED(mb_mgr, (uint64_t *)sess->cipher.expanded_aes_keys.encode, @@ -598,6 +626,8 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IMB_DES_KEYSCHED(mb_mgr, (uint64_t *)sess->cipher.expanded_aes_keys.decode, xform->cipher.key.data); + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } return 0; @@ -610,11 +640,11 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, { switch (xform->aead.op) { case RTE_CRYPTO_AEAD_OP_ENCRYPT: - sess->cipher.direction = IMB_DIR_ENCRYPT; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; break; case RTE_CRYPTO_AEAD_OP_DECRYPT: - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; break; default: @@ -624,27 +654,28 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->iv.offset = xform->aead.iv.offset; - sess->iv.length = xform->aead.iv.length; + sess->template_job.iv_len_in_bytes = xform->aead.iv.length; /* Set digest sizes */ sess->auth.req_digest_len = xform->aead.digest_length; - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; switch (xform->aead.algo) { case RTE_CRYPTO_AEAD_AES_CCM: - sess->cipher.mode = IMB_CIPHER_CCM; - sess->auth.algo = IMB_AUTH_AES_CCM; + sess->template_job.cipher_mode = IMB_CIPHER_CCM; + sess->template_job.hash_alg = IMB_AUTH_AES_CCM; + sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length; /* Check key length and choose key expansion function for AES */ switch (xform->aead.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -654,6 +685,8 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; /* CCM digests must be between 4 and 16 and an even number */ if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || @@ -664,22 +697,23 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AEAD_AES_GCM: - sess->cipher.mode = IMB_CIPHER_GCM; - sess->auth.algo = IMB_AUTH_AES_GMAC; + sess->template_job.cipher_mode = IMB_CIPHER_GCM; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC; + sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length; switch (xform->aead.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; case IMB_KEY_192_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; @@ -688,6 +722,8 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } + sess->template_job.enc_keys = &sess->cipher.gcm_key; + sess->template_job.dec_keys = &sess->cipher.gcm_key; /* GCM digest size must be between 1 and 16 */ if (sess->auth.req_digest_len == 0 || sess->auth.req_digest_len > 16) { @@ -697,16 +733,20 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: - sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305; - sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305; + sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305; + sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305; + sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes = + xform->aead.aad_length; if (xform->aead.key.length != 32) { IPSEC_MB_LOG(ERR, "Invalid key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 32; + sess->template_job.key_len_in_bytes = 32; memcpy(sess->cipher.expanded_aes_keys.encode, xform->aead.key.data, 32); + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; if (sess->auth.req_digest_len != 16) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; @@ -741,16 +781,16 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, /* Select Crypto operation - hash then cipher / cipher then hash */ switch (mode) { case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_HASH_GEN_ONLY: case IPSEC_MB_OP_HASH_VERIFY_ONLY: case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; /* * Multi buffer library operates only at two modes, @@ -760,18 +800,16 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, * the first operation and decryption the last one. */ case IPSEC_MB_OP_ENCRYPT_ONLY: - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_DECRYPT_ONLY: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: - sess->chain_order = IMB_ORDER_CIPHER_HASH; - sess->aead.aad_len = xform->aead.aad_length; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; - sess->aead.aad_len = xform->aead.aad_length; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_NOT_SUPPORTED: default: @@ -781,8 +819,7 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } /* Default IV length = 0 */ - sess->iv.length = 0; - sess->auth_iv.length = 0; + sess->template_job.iv_len_in_bytes = 0; ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); if (ret != 0) { @@ -864,10 +901,10 @@ aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, /* Select CRC generate/verify */ if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { - sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; + sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { - sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; + sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; } else { IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); @@ -875,7 +912,7 @@ aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, } sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; - sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN; + sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN; return 0; } @@ -907,12 +944,12 @@ aesni_mb_set_docsis_sec_session_parameters( switch (conf->docsis.direction) { case RTE_SECURITY_DOCSIS_UPLINK: - ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH; + ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; docsis_xform = &conf->docsis; cipher_xform = conf->crypto_xform; break; case RTE_SECURITY_DOCSIS_DOWNLINK: - ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER; + ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; cipher_xform = conf->crypto_xform; docsis_xform = &conf->docsis; break; @@ -923,7 +960,7 @@ aesni_mb_set_docsis_sec_session_parameters( } /* Default IV length = 0 */ - ipsec_sess->iv.length = 0; + ipsec_sess->template_job.iv_len_in_bytes = 0; ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, docsis_xform); @@ -958,7 +995,7 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, uint32_t cipher_end, auth_end; /* Only cipher then hash needs special calculation. */ - if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) + if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) return auth_offset; m_src = op->sym->m_src; @@ -1004,80 +1041,35 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) { - /* Set crypto operation */ - job->chain_order = session->chain_order; - - /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set authentication parameters */ - job->hash_alg = session->auth.algo; job->iv = iv->va; switch (job->hash_alg) { - case IMB_AUTH_AES_XCBC: - job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; - job->u.XCBC._k2 = session->auth.xcbc.k2; - job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_CCM: job->u.CCM.aad = (uint8_t *)aad->va + 18; - job->u.CCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; job->iv++; break; - case IMB_AUTH_AES_CMAC: - job->u.CMAC._key_expanded = session->auth.cmac.expkey; - job->u.CMAC._skey1 = session->auth.cmac.skey1; - job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_GMAC: job->u.GCM.aad = aad->va; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = &session->cipher.gcm_key; - job->dec_keys = &session->cipher.gcm_key; break; case IMB_AUTH_AES_GMAC_128: case IMB_AUTH_AES_GMAC_192: case IMB_AUTH_AES_GMAC_256: - job->u.GMAC._key = &session->cipher.gcm_key; job->u.GMAC._iv = iv->va; - job->u.GMAC.iv_len_in_bytes = session->iv.length; break; case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = aad->va; - job->u.CHACHA20_POLY1305.aad_len_in_bytes = - session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.encode; break; default: job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner; job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer; - - if (job->cipher_mode == IMB_CIPHER_DES3) { - job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; - job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; - } else { - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - } } /* @@ -1087,10 +1079,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, /* Set digest location and length */ job->auth_tag_output = digest; - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - - /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; /* Data Parameters */ job->src = buf; @@ -1235,8 +1223,10 @@ handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, static inline int imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) { - if (alg == IMB_CIPHER_CHACHA20_POLY1305 - || alg == IMB_CIPHER_GCM) + if (alg == IMB_CIPHER_CHACHA20_POLY1305 || + alg == IMB_CIPHER_CHACHA20_POLY1305_SGL || + alg == IMB_CIPHER_GCM_SGL || + alg == IMB_CIPHER_GCM) return 1; return 0; } @@ -1413,28 +1403,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } - /* Set crypto operation */ - job->chain_order = session->chain_order; - - /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set authentication parameters */ - job->hash_alg = session->auth.algo; - const int aead = is_aead_algo(job->hash_alg, job->cipher_mode); - if (job->cipher_mode == IMB_CIPHER_DES3) { - job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; - job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; - } else { - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - } - if (!op->sym->m_dst) { /* in-place operation */ m_dst = m_src; @@ -1451,89 +1424,49 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { sgl = 1; - if (!imb_lib_support_sgl_algo(session->cipher.mode)) + if (!imb_lib_support_sgl_algo(job->cipher_mode)) lb_sgl = 1; } switch (job->hash_alg) { - case IMB_AUTH_AES_XCBC: - job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; - job->u.XCBC._k2 = session->auth.xcbc.k2; - job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_CCM: job->u.CCM.aad = op->sym->aead.aad.data + 18; - job->u.CCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - - case IMB_AUTH_AES_CMAC: - job->u.CMAC._key_expanded = session->auth.cmac.expkey; - job->u.CMAC._skey1 = session->auth.cmac.skey1; - job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_GMAC: job->u.GCM.aad = op->sym->aead.aad.data; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; if (sgl) { job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; } - job->enc_keys = &session->cipher.gcm_key; - job->dec_keys = &session->cipher.gcm_key; break; case IMB_AUTH_AES_GMAC_128: case IMB_AUTH_AES_GMAC_192: case IMB_AUTH_AES_GMAC_256: - job->u.GMAC._key = &session->cipher.gcm_key; job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); - job->u.GMAC.iv_len_in_bytes = session->auth_iv.length; break; case IMB_AUTH_ZUC_EIA3_BITLEN: case IMB_AUTH_ZUC256_EIA3_BITLEN: - job->u.ZUC_EIA3._key = session->auth.zuc_auth_key; job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); break; case IMB_AUTH_SNOW3G_UIA2_BITLEN: - job->u.SNOW3G_UIA2._key = (void *) - &session->auth.pKeySched_snow3g_auth; job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); break; - case IMB_AUTH_KASUMI_UIA1: - job->u.KASUMI_UIA1._key = (void *) - &session->auth.pKeySched_kasumi_auth; - break; case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; - job->u.CHACHA20_POLY1305.aad_len_in_bytes = - session->aead.aad_len; if (sgl) { job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; } - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.encode; break; default: - job->u.HMAC._hashed_auth_key_xor_ipad = - session->auth.pads.inner; - job->u.HMAC._hashed_auth_key_xor_opad = - session->auth.pads.outer; - + break; } if (aead) @@ -1542,14 +1475,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, m_offset = op->sym->cipher.data.offset; if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { - job->enc_keys = session->cipher.zuc_cipher_key; - job->dec_keys = session->cipher.zuc_cipher_key; m_offset >>= 3; } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { - job->enc_keys = &session->cipher.pKeySched_snow3g_cipher; m_offset = 0; } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { - job->enc_keys = &session->cipher.pKeySched_kasumi_cipher; m_offset = 0; } @@ -1565,7 +1494,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->auth_tag_output = op->sym->auth.digest.data; if (session->auth.req_digest_len != - session->auth.gen_digest_len) { + job->auth_tag_output_len_in_bytes) { job->auth_tag_output = qp_data->temp_digests[*digest_idx]; *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; @@ -1576,12 +1505,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, * digest length as specified in the relevant IPsec RFCs */ - /* Set digest length */ - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - - /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; - /* Data Parameters */ if (sgl) { job->src = NULL; @@ -1773,8 +1696,8 @@ set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } /* Only DOCSIS protocol operations supported now */ - if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI || - session->auth.algo != IMB_AUTH_DOCSIS_CRC32) { + if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI || + session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return -1; } @@ -1791,31 +1714,19 @@ set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -ENOTSUP; } - /* Set crypto operation */ - job->chain_order = session->chain_order; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; job->enc_keys = session->cipher.expanded_aes_keys.encode; job->dec_keys = session->cipher.expanded_aes_keys.decode; /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; job->iv = (uint8_t *)op + session->iv.offset; - /* Set authentication parameters */ - job->hash_alg = session->auth.algo; - /* Set digest output location */ job->auth_tag_output = qp_data->temp_digests[*digest_idx]; *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; - /* Set digest length */ - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - /* Set data parameters */ job->src = rte_pktmbuf_mtod(m_src, uint8_t *); job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, @@ -1865,7 +1776,7 @@ generate_digest(IMB_JOB *job, struct rte_crypto_op *op, struct aesni_mb_session *sess) { /* No extra copy needed */ - if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len)) + if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes)) return; /* @@ -1940,7 +1851,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) if ((op->sym->m_src->nb_segs > 1 || (op->sym->m_dst != NULL && op->sym->m_dst->nb_segs > 1)) && - !imb_lib_support_sgl_algo(sess->cipher.mode)) { + !imb_lib_support_sgl_algo(job->cipher_mode)) { linear_buf = (uint8_t *) job->user_data2; post_process_sgl_linear(op, job, sess, linear_buf); } @@ -1950,7 +1861,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { if (is_aead_algo(job->hash_alg, - sess->cipher.mode)) + job->cipher_mode)) verify_digest(job, op->sym->aead.digest.data, sess->auth.req_digest_len, diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 3cf44f8bc4..ce9a6e4886 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -852,14 +852,12 @@ get_digest_byte_length(IMB_HASH_ALG algo) /** AES-NI multi-buffer private session structure */ struct aesni_mb_session { - IMB_CHAIN_ORDER chain_order; - /* common job fields */ + IMB_JOB template_job; + /*< Template job structure */ struct { - uint16_t length; uint16_t offset; } iv; struct { - uint16_t length; uint16_t offset; } auth_iv; /* *< IV parameters @@ -868,13 +866,6 @@ struct aesni_mb_session { /* * Cipher Parameters */ struct { - /* * Cipher direction - encrypt / decrypt */ - IMB_CIPHER_DIRECTION direction; - /* * Cipher mode - CBC / Counter */ - IMB_CIPHER_MODE mode; - - uint64_t key_length_in_bytes; - union { struct { uint32_t encode[60] __rte_aligned(16); @@ -907,7 +898,6 @@ struct aesni_mb_session { /* *< Authentication Parameters */ struct { - IMB_HASH_ALG algo; /* *< Authentication Algorithm */ enum rte_crypto_auth_operation operation; /* *< auth operation generate or verify */ union { @@ -948,16 +938,10 @@ struct aesni_mb_session { kasumi_key_sched_t pKeySched_kasumi_auth; /* *< KASUMI scheduled authentication key */ }; - /* * Generated digest size by the Multi-buffer library */ - uint16_t gen_digest_len; /* * Requested digest size from Cryptodev */ uint16_t req_digest_len; } auth; - struct { - /* * AAD data length */ - uint16_t aad_len; - } aead; } __rte_cache_aligned; typedef void (*hash_one_block_t)(const void *data, void *digest); From patchwork Fri Apr 21 13:12:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126394 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A6238429AD; Fri, 21 Apr 2023 15:13:15 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8CD4342D42; Fri, 21 Apr 2023 15:12:40 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 93F4A42D36 for ; Fri, 21 Apr 2023 15:12:38 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082758; x=1713618758; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=KYrRIqt1SIa7N0cviilpjuZyms1QwoeyAhekqd/ckmk=; b=RxTwgF3VIZ+x2DarXj4RuUAJxnB66qr6JNt3m1kKV6rUn/pGZpsVl5Mz 0vkea5sTB9fa9FCe+eeQ+wdMVq06qefNZVpIAtClQCVRqYo9tbR7HzfSN ogDpTWjk3zkgalMYPMdBlYTf3RHEbWO0vp2TJadzw1nJ1ItRamgkaXZy0 littB0VyP0qQ+araSYK6M4oheHDBNPMcxZWL9XdI2yJjLqYasfRc3PC6I oT4fGmcjfmUrUa+lF2UxGqoMWRVFmHSRawivq94XcUellJA9gR1HzA5w7 u1e5iG02yaHeM0UbUtDdP5qaoYJEsDt3sdUYSgs7VJI0UpVVkseW4H3X2 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927729" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927729" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817390" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817390" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:36 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 6/8] crypto/ipsec_mb: optimize for GCM case Date: Fri, 21 Apr 2023 13:12:18 +0000 Message-Id: <20230421131221.1732314-7-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Use a separate code path when dealing with AES-GCM. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 88 +++++++++++++++++++++++--- 1 file changed, 79 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 8ccdd2ad2e..9ca679606f 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1366,6 +1366,70 @@ multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, } return 0; } + +static inline int +set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, + struct aesni_mb_qp_data *qp_data, + struct rte_crypto_op *op, uint8_t *digest_idx, + const struct aesni_mb_session *session, + struct rte_mbuf *m_src, struct rte_mbuf *m_dst, + const int oop) +{ + const uint32_t m_offset = op->sym->aead.data.offset; + + job->u.GCM.aad = op->sym->aead.aad.data; + if (sgl) { + job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; + job->cipher_mode = IMB_CIPHER_GCM_SGL; + job->hash_alg = IMB_AUTH_GCM_SGL; + job->hash_start_src_offset_in_bytes = 0; + job->msg_len_to_hash_in_bytes = 0; + job->msg_len_to_cipher_in_bytes = 0; + job->cipher_start_src_offset_in_bytes = 0; + } else { + job->hash_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->aead.data.length; + job->cipher_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; + } + + if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + job->auth_tag_output = qp_data->temp_digests[*digest_idx]; + *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; + } else { + job->auth_tag_output = op->sym->aead.digest.data; + } + + job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); + + /* Set user data to be crypto operation data struct */ + job->user_data = op; + + if (sgl) { + job->src = NULL; + job->dst = NULL; + +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (m_src->nb_segs <= MAX_NUM_SEGS) + return single_sgl_job(job, op, oop, + m_offset, m_src, m_dst, + qp_data->sgl_segs); + else +#endif + return multi_sgl_job(job, op, oop, + m_offset, m_src, m_dst, mb_mgr); + } else { + job->src = rte_pktmbuf_mtod(m_src, uint8_t *); + job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); + } + + return 0; +} + /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1403,10 +1467,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } - memcpy(job, &session->template_job, sizeof(IMB_JOB)); + const IMB_CIPHER_MODE cipher_mode = + session->template_job.cipher_mode; - /* Set authentication parameters */ - const int aead = is_aead_algo(job->hash_alg, job->cipher_mode); + memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { /* in-place operation */ @@ -1424,10 +1488,17 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { sgl = 1; - if (!imb_lib_support_sgl_algo(job->cipher_mode)) + if (!imb_lib_support_sgl_algo(cipher_mode)) lb_sgl = 1; } + if (cipher_mode == IMB_CIPHER_GCM) + return set_gcm_job(mb_mgr, job, sgl, qp_data, + op, digest_idx, session, m_src, m_dst, oop); + + /* Set authentication parameters */ + const int aead = is_aead_algo(job->hash_alg, cipher_mode); + switch (job->hash_alg) { case IMB_AUTH_AES_CCM: job->u.CCM.aad = op->sym->aead.aad.data + 18; @@ -1474,13 +1545,12 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, else m_offset = op->sym->cipher.data.offset; - if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { + if (cipher_mode == IMB_CIPHER_ZUC_EEA3) m_offset >>= 3; - } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { + else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) m_offset = 0; - } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { + else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) m_offset = 0; - } /* Set digest output location */ if (job->hash_alg != IMB_AUTH_NULL && @@ -1642,7 +1712,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; } - if (job->cipher_mode == IMB_CIPHER_NULL && oop) { + if (cipher_mode == IMB_CIPHER_NULL && oop) { memcpy(job->dst + job->cipher_start_src_offset_in_bytes, job->src + job->cipher_start_src_offset_in_bytes, job->msg_len_to_cipher_in_bytes); From patchwork Fri Apr 21 13:12:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126395 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 981A8429AD; Fri, 21 Apr 2023 15:13:22 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C2D1042D48; Fri, 21 Apr 2023 15:12:41 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id B1D5242D3A for ; Fri, 21 Apr 2023 15:12:39 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082759; x=1713618759; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=zZVgbR74AfUHwnyn7VBtCbnpR1kPH4EV2tvUKoTcBNo=; b=lUMniBllutrIFWUBoHelUgc0n1udsnKs0x15ZtFLqNHamqb5F5RlxhQ/ GAq+nbb7Xe7tvFy0zuXKmzl4t1CELOt/Ao1eaaUkvj18mkeVZmoWi4moI 754t4lKOfX9YaWd8A438mTSLxDTGdftZGMhKyHFXUP09zNvUdlhLArG3F VgQjsdhoYqmvtJD9ioLgOXEVn+lNdx43uLsng9ILPTACaB7PLN2JsvV+c mEbXlEAk75Xcz5myPREwefJq9C1VNUDEO6LwWP8h/EP6hKdbYl2SAtJZY cc490TN4kYgtuXtd4F6Wtz4a7qvioCca4jfyHyyRdfVCxh7aL9rRbgfnd g==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927731" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927731" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817400" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817400" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:38 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara Subject: [PATCH 7/8] crypto/ipsec_mb: do not free linear_sgl always Date: Fri, 21 Apr 2023 13:12:19 +0000 Message-Id: <20230421131221.1732314-8-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara linear_sgl buffer only needs to be freed if it was allocated previously. Signed-off-by: Pablo de Lara --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 9ca679606f..f23016c9c3 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1898,6 +1898,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; struct aesni_mb_session *sess = NULL; uint8_t *linear_buf = NULL; + int sgl = 0; #ifdef AESNI_MB_DOCSIS_SEC_ENABLED uint8_t is_docsis_sec = 0; @@ -1923,6 +1924,8 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) op->sym->m_dst->nb_segs > 1)) && !imb_lib_support_sgl_algo(job->cipher_mode)) { linear_buf = (uint8_t *) job->user_data2; + sgl = 1; + post_process_sgl_linear(op, job, sess, linear_buf); } @@ -1952,7 +1955,8 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) default: op->status = RTE_CRYPTO_OP_STATUS_ERROR; } - rte_free(linear_buf); + if (sgl) + rte_free(linear_buf); } /* Free session if a session-less crypto op */ From patchwork Fri Apr 21 13:12:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126396 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1FABF429AD; Fri, 21 Apr 2023 15:13:29 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E08DC42D4B; Fri, 21 Apr 2023 15:12:42 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 3A11A42D46 for ; Fri, 21 Apr 2023 15:12:41 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082761; x=1713618761; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=OgCScGtFuz71/3Qht5eQDAua9COPw/fee0oDkhqgRok=; b=Gy03Q6aphRBp829YVFH+czoDfzjgE1/swG+9+ZbWtmAUuDyMMlHdm0Yn jFhADhqz26dvdQ6lxTS15H1chCIvUMRSRRHxArehaNohvYVwo7pRRmhSK I1VLzaUWuAyEbRKBIKj7Z5D1G8xJ66MhlO1xxJC6Cgk5/EZYHDD6Yc2rf /MrZMNgCI15bJi4N2LxkdpkADTy8sJLwBR9/EPtYaGQX491BSC43ZZKYA LLu9J7LvwH5GAesDtQs3yAjb14Zl1h2ERlMSxV+1moQir/JKnrpAhpTt8 xlOPt/v/lQ2ayX9RGrxQ5fEpa5nkpNCtcm9+FFffDPZn/CNSbR+iebQ4c Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927732" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927732" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817416" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817416" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:39 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 8/8] crypto/ipsec_mb: set and use session ID Date: Fri, 21 Apr 2023 13:12:20 +0000 Message-Id: <20230421131221.1732314-9-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara When creating a session, get the session ID that defines the fixed session parameters and store it in the private data. When retrieving IMB_JOB's, if their internal session ID matches the one in the private session data, these fixed session parameters do not need to be filled again. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power Acked-by: Wathsala Vithanage --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 22 ++++++++++++++++++++- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 2 ++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index f23016c9c3..8600f41897 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -845,6 +845,10 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } } +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + sess->session_id = imb_set_session(mb_mgr, &sess->template_job); +#endif + return 0; } @@ -977,6 +981,10 @@ aesni_mb_set_docsis_sec_session_parameters( goto error_exit; } +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); +#endif + error_exit: free_mb_mgr(mb_mgr); return ret; @@ -1386,6 +1394,9 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->msg_len_to_hash_in_bytes = 0; job->msg_len_to_cipher_in_bytes = 0; job->cipher_start_src_offset_in_bytes = 0; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } else { job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; @@ -1470,7 +1481,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, const IMB_CIPHER_MODE cipher_mode = session->template_job.cipher_mode; - memcpy(job, &session->template_job, sizeof(IMB_JOB)); +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + if (job->session_id != session->session_id) +#endif + memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { /* in-place operation */ @@ -1510,6 +1524,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } break; case IMB_AUTH_AES_GMAC_128: @@ -1534,6 +1551,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } break; default: diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index ce9a6e4886..9b7c9edb6d 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -854,6 +854,8 @@ get_digest_byte_length(IMB_HASH_ALG algo) struct aesni_mb_session { IMB_JOB template_job; /*< Template job structure */ + uint32_t session_id; + /*< IPSec MB session ID */ struct { uint16_t offset; } iv;