From patchwork Tue May 16 15:24:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126893 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A238442B24; Tue, 16 May 2023 17:24:32 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BC3EB41151; Tue, 16 May 2023 17:24:30 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 649C34114A for ; Tue, 16 May 2023 17:24:28 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250668; x=1715786668; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=hNU8gxx4eseAH7M1/hsKxagrAYuaDyQa/zvh2Fkuui8=; b=Mcny4L8moEX8xFYXLOsOFIyV63iBfTwwGWsMefao29M4/nRo4aHhh6GC QP3sJR7qshlmeUdCG8Wu/Cz5Jedfw13Z4RZON+rDfkIzAjHbHpdPfkbmG ebDadS6fPnD0e4Aleq7SzeCyMyEjKv+4ev1lwLnnJBz+ZyKk94T4l5O+1 gRK25g7JTa/gg2Ta84Hko6dCy4nepjgzFCHhHWf2pSbKfyhcOx/eomwcN HXzCbBqK9b1e0sFrRR4I03kW4GCcHGpNbYJa4YXdtPVpN1BwSh3SV45iq +miZGIhJb44X3CaagcSJO21n/WktMBr+GVNB2K7HZz6SMo2zTfDc5aBT8 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789066" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789066" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500636" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500636" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:25 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara , Ciara Power Subject: [PATCH v2 1/8] crypto/ipsec_mb: use GMAC dedicated algorithms Date: Tue, 16 May 2023 15:24:15 +0000 Message-Id: <20230516152422.606617-2-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara AES-GMAC can be done with auth-only enums IMB_AES_GMAC_128/192/256, which allows another cipher algorithm to be used, instead of being part of AES-GCM. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 104 +++++++++++-------------- 1 file changed, 47 insertions(+), 57 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index ac20d01937..c53548aa3b 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -57,8 +57,7 @@ is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode) { return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == IMB_AUTH_AES_CCM || - (hash_alg == IMB_AUTH_AES_GMAC && - cipher_mode == IMB_CIPHER_GCM)); + cipher_mode == IMB_CIPHER_GCM); } /** Set session authentication parameters */ @@ -155,7 +154,6 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } else sess->cipher.direction = IMB_DIR_DECRYPT; - sess->auth.algo = IMB_AUTH_AES_GMAC; if (sess->auth.req_digest_len > get_digest_byte_length(IMB_AUTH_AES_GMAC)) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); @@ -167,16 +165,19 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, switch (xform->auth.key.length) { case IMB_KEY_128_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_128; IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; break; case IMB_KEY_192_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_192; IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; break; case IMB_KEY_256_BYTES: + sess->auth.algo = IMB_AUTH_AES_GMAC_256; IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; @@ -1039,19 +1040,20 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->u.GCM.aad = aad->va; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - } else { - /* For GMAC */ - job->u.GCM.aad = buf; - job->u.GCM.aad_len_in_bytes = len; - job->cipher_mode = IMB_CIPHER_GCM; - } + job->u.GCM.aad = aad->va; + job->u.GCM.aad_len_in_bytes = session->aead.aad_len; job->enc_keys = &session->cipher.gcm_key; job->dec_keys = &session->cipher.gcm_key; break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->u.GMAC._key = &session->cipher.gcm_key; + job->u.GMAC._iv = iv->va; + job->u.GMAC.iv_len_in_bytes = session->iv.length; + break; + case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = aad->va; job->u.CHACHA20_POLY1305.aad_len_in_bytes = @@ -1091,16 +1093,10 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, job->dst = (uint8_t *)buf + sofs.ofs.cipher.head; job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head; job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head; - if (job->hash_alg == IMB_AUTH_AES_GMAC && - session->cipher.mode != IMB_CIPHER_GCM) { - job->msg_len_to_hash_in_bytes = 0; - job->msg_len_to_cipher_in_bytes = 0; - } else { - job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - - sofs.ofs.auth.tail; - job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - - sofs.ofs.cipher.tail; - } + job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head - + sofs.ofs.auth.tail; + job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head - + sofs.ofs.cipher.tail; job->user_data = udata; } @@ -1184,8 +1180,6 @@ sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len) job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN) *auth_len = (job->msg_len_to_hash_in_bits >> 3) + job->hash_start_src_offset_in_bytes; - else if (job->hash_alg == IMB_AUTH_AES_GMAC) - *auth_len = job->u.GCM.aad_len_in_bytes; else *auth_len = job->msg_len_to_hash_in_bytes + job->hash_start_src_offset_in_bytes; @@ -1352,24 +1346,24 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->u.GCM.aad = op->sym->aead.aad.data; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - if (sgl) { - job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; - job->cipher_mode = IMB_CIPHER_GCM_SGL; - job->hash_alg = IMB_AUTH_GCM_SGL; - } - } else { - /* For GMAC */ - job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src, - uint8_t *, op->sym->auth.data.offset); - job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length; - job->cipher_mode = IMB_CIPHER_GCM; + job->u.GCM.aad = op->sym->aead.aad.data; + job->u.GCM.aad_len_in_bytes = session->aead.aad_len; + if (sgl) { + job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; + job->cipher_mode = IMB_CIPHER_GCM_SGL; + job->hash_alg = IMB_AUTH_GCM_SGL; } job->enc_keys = &session->cipher.gcm_key; job->dec_keys = &session->cipher.gcm_key; break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->u.GMAC._key = &session->cipher.gcm_key; + job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->auth_iv.offset); + job->u.GMAC.iv_len_in_bytes = session->auth_iv.length; + break; case IMB_AUTH_ZUC_EIA3_BITLEN: case IMB_AUTH_ZUC256_EIA3_BITLEN: job->u.ZUC_EIA3._key = session->auth.zuc_auth_key; @@ -1472,19 +1466,21 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, break; case IMB_AUTH_AES_GMAC: - if (session->cipher.mode == IMB_CIPHER_GCM) { - job->hash_start_src_offset_in_bytes = - op->sym->aead.data.offset; - job->msg_len_to_hash_in_bytes = - op->sym->aead.data.length; - } else { /* AES-GMAC only, only AAD used */ - job->msg_len_to_hash_in_bytes = 0; - job->hash_start_src_offset_in_bytes = 0; - } - + job->hash_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->aead.data.length; job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); break; + case IMB_AUTH_AES_GMAC_128: + case IMB_AUTH_AES_GMAC_192: + case IMB_AUTH_AES_GMAC_256: + job->hash_start_src_offset_in_bytes = + op->sym->auth.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->auth.data.length; + break; case IMB_AUTH_GCM_SGL: case IMB_AUTH_CHACHA20_POLY1305_SGL: @@ -1567,15 +1563,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, op->sym->cipher.data.length; break; case IMB_CIPHER_GCM: - if (session->cipher.mode == IMB_CIPHER_NULL) { - /* AES-GMAC only (only AAD used) */ - job->msg_len_to_cipher_in_bytes = 0; - job->cipher_start_src_offset_in_bytes = 0; - } else { - job->cipher_start_src_offset_in_bytes = - op->sym->aead.data.offset; - job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; - } + job->cipher_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; break; case IMB_CIPHER_CCM: case IMB_CIPHER_CHACHA20_POLY1305: From patchwork Tue May 16 15:24:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126894 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9025042B24; Tue, 16 May 2023 17:24:38 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CCBE842D36; Tue, 16 May 2023 17:24:31 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 9231A4114A for ; Tue, 16 May 2023 17:24:29 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250669; x=1715786669; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=jtOmUk2W66UDKOIp/ofoJGWsTBm/piPASYHE8lPdjcU=; b=OnjABpOnhO5x7TyTxetw6Z0AfRkqNLSRMmjUMxnqsDCLrM0Ymcah/u1z 9hYAawNHYeJRX+Yekeqb5Rl8+Y6yhIcNHnszlyg8D/FwHWgxP+ri7Anu1 3HDBtp+ZpQsV4ZebZVijuhTDxxnfy7oi/FpZVcxp8zxwwus7rWGfKJINq Wes+HAbgzv0OenbBUb6p5L4zCFVGKzCztdLuRwJiSG2ThteGr2FkXYZgY AAV+Z0a2yPPwaBtIcDFgUIe+5Qy7OoUyBDeIsUSv8bniHjgn/vu1Gk/kN kj451lYGuMKXWSkj7a02wuNn+zP0wk2G40Z5xPDDMVJ0USyYlGgq5Vc0N A==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789078" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789078" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500647" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500647" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:27 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Marcel Cornu , Pablo de Lara , Ciara Power Subject: [PATCH v2 2/8] crypto/ipsec_mb: use burst API in aesni_mb Date: Tue, 16 May 2023 15:24:16 +0000 Message-Id: <20230516152422.606617-3-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Marcel Cornu Use new ipsec_mb burst API in dequeue burst function, when ipsec_mb version is v1.3 or newer. Signed-off-by: Marcel Cornu Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- v2: moved some functions inside ifdef as they are only used when IPSec_MB version is 1.2 or lower. --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 202 ++++++++++++++++++++----- 1 file changed, 167 insertions(+), 35 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index c53548aa3b..b22c0183eb 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -9,6 +9,10 @@ struct aesni_mb_op_buf_data { uint32_t offset; }; +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static IMB_JOB *jobs[IMB_MAX_BURST_SIZE] = {NULL}; +#endif + /** * Calculate the authentication pre-computes * @@ -1884,6 +1888,168 @@ post_process_mb_sync_job(IMB_JOB *job) st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG; } +static inline uint32_t +handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) +{ + uint32_t i; + + for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) + post_process_mb_sync_job(job); + + return i; +} + +static inline uint32_t +flush_mb_sync_mgr(IMB_MGR *mb_mgr) +{ + IMB_JOB *job; + + job = IMB_FLUSH_JOB(mb_mgr); + return handle_completed_sync_jobs(job, mb_mgr); +} + +static inline IMB_JOB * +set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) +{ + job->chain_order = IMB_ORDER_HASH_CIPHER; + job->cipher_mode = IMB_CIPHER_NULL; + job->hash_alg = IMB_AUTH_NULL; + job->cipher_direction = IMB_DIR_DECRYPT; + + /* Set user data to be crypto operation data struct */ + job->user_data = op; + + return job; +} + +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static uint16_t +aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ipsec_mb_qp *qp = queue_pair; + IMB_MGR *mb_mgr = qp->mb_mgr; + struct rte_crypto_op *op; + struct rte_crypto_op *deqd_ops[IMB_MAX_BURST_SIZE]; + IMB_JOB *job; + int retval, processed_jobs = 0; + uint16_t i, nb_jobs; + + if (unlikely(nb_ops == 0 || mb_mgr == NULL)) + return 0; + + uint8_t digest_idx = qp->digest_idx; + uint16_t burst_sz = (nb_ops > IMB_MAX_BURST_SIZE) ? + IMB_MAX_BURST_SIZE : nb_ops; + + /* + * If nb_ops is greater than the max supported + * ipsec_mb burst size, then process in bursts of + * IMB_MAX_BURST_SIZE until all operations are submitted + */ + while (nb_ops) { + uint16_t nb_submit_ops; + uint16_t n = (nb_ops / burst_sz) ? + burst_sz : nb_ops; + + while (unlikely((IMB_GET_NEXT_BURST(mb_mgr, n, jobs)) < n)) { + /* + * Not enough free jobs in the queue + * Flush n jobs until enough jobs available + */ + nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + } + + /* + * Get the next operations to process from ingress queue. + * There is no need to return the job to the IMB_MGR + * if there are no more operations to process, since + * the IMB_MGR can use that pointer again in next + * get_next calls. + */ + nb_submit_ops = rte_ring_dequeue_burst(qp->ingress_queue, + (void **)deqd_ops, n, NULL); + for (i = 0; i < nb_submit_ops; i++) { + job = jobs[i]; + op = deqd_ops[i]; + +#ifdef AESNI_MB_DOCSIS_SEC_ENABLED + if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) + retval = set_sec_mb_job_params(job, qp, op, + &digest_idx); + else +#endif + retval = set_mb_job_params(job, qp, op, + &digest_idx, mb_mgr); + + if (unlikely(retval != 0)) { + qp->stats.dequeue_err_count++; + set_job_null_op(job, op); + } + } + + /* Submit jobs to multi-buffer for processing */ +#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG + int err = 0; + + nb_jobs = IMB_SUBMIT_BURST(mb_mgr, nb_submit_ops, jobs); + err = imb_get_errno(mb_mgr); + if (err) + IPSEC_MB_LOG(ERR, "%s", imb_get_strerror(err)); +#else + nb_jobs = IMB_SUBMIT_BURST_NOCHECK(mb_mgr, + nb_submit_ops, jobs); +#endif + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + + qp->digest_idx = digest_idx; + + if (processed_jobs < 1) { + nb_jobs = IMB_FLUSH_BURST(mb_mgr, n, jobs); + + for (i = 0; i < nb_jobs; i++) { + job = jobs[i]; + + op = post_process_mb_job(qp, job); + if (op) { + ops[processed_jobs++] = op; + qp->stats.dequeued_count++; + } else { + qp->stats.dequeue_err_count++; + break; + } + } + } + nb_ops -= n; + } + + return processed_jobs; +} +#else + /** * Process a completed IMB_JOB job and keep processing jobs until * get_completed_job return NULL @@ -1924,26 +2090,6 @@ handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, return processed_jobs; } -static inline uint32_t -handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr) -{ - uint32_t i; - - for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr)) - post_process_mb_sync_job(job); - - return i; -} - -static inline uint32_t -flush_mb_sync_mgr(IMB_MGR *mb_mgr) -{ - IMB_JOB *job; - - job = IMB_FLUSH_JOB(mb_mgr); - return handle_completed_sync_jobs(job, mb_mgr); -} - static inline uint16_t flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -1960,20 +2106,6 @@ flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr, return processed_ops; } -static inline IMB_JOB * -set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op) -{ - job->chain_order = IMB_ORDER_HASH_CIPHER; - job->cipher_mode = IMB_CIPHER_NULL; - job->hash_alg = IMB_AUTH_NULL; - job->cipher_direction = IMB_DIR_DECRYPT; - - /* Set user data to be crypto operation data struct */ - job->user_data = op; - - return job; -} - static uint16_t aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -2054,7 +2186,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, return processed_jobs; } - +#endif static inline int check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl) { From patchwork Tue May 16 15:24:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126895 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0C24142B24; Tue, 16 May 2023 17:24:46 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E8B5642D39; Tue, 16 May 2023 17:24:33 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 4347342D29 for ; Tue, 16 May 2023 17:24:31 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250671; x=1715786671; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=A7LBtgyDcv5HuHJ0NgyFUyDfmeHE2CrW8vtwvyQ0Nfc=; b=Elrzsly+M6qgspBZTS6dlDisMlR/BvnFyZ8M4hEmDnTFg4Y3buTHiFmX HIqANt3GVNFO99oI5GRq+aAaQFj0dkCBTr69RHInRw42jNr+gftbi2YqR kTMsoMJ3yMb2UKhVcyal9jLFY4VNPDFYl5b95EsTQ5Mc5SZ2EajgMTNqb DIKVhsChbhTNwH4yyejznXYlglmvg1/HGuDjxRTPr4UrAqAnR5WBvPJoM baOg71oC0R9ABdRhZ2ilmU6tQ6FX43JMcFBL8jfdjoPlgzBaGQY1jGbZo cV4YaDso3HYQeh2SAaFCowlfYcRCv30ppAHg3DupkK/NF4rSgK0CW1y4v Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789089" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789089" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500665" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500665" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:29 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara , Ciara Power Subject: [PATCH v2 3/8] crypto/ipsec_mb: use new SGL API Date: Tue, 16 May 2023 15:24:17 +0000 Message-Id: <20230516152422.606617-4-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Use new SGL API available from IPSec Multi-buffer v1.3, where only one function call is required to submit all segments to be processed in an SGL scenario. Instead of having one call per segment, there is only one call per buffer. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 187 +++++++++++++++----- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 7 + 2 files changed, 153 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index b22c0183eb..ef1f141cad 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1241,6 +1241,141 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) return 0; } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static inline int +single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) +{ + uint32_t num_segs = 0; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + job->sgl_state = IMB_SGL_ALL; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + total_len = op->sym->aead.data.length; + + while (total_len != 0) { + uint32_t data_len, part_len; + + if (src_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid source buffer"); + return -EINVAL; + } + + data_len = src_sgl.m->data_len - src_sgl.offset; + + sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + + if (dst_sgl.m != NULL) { + if (dst_sgl.m->data_len - dst_sgl.offset == 0) { + dst_sgl.m = dst_sgl.m->next; + if (dst_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid destination buffer"); + return -EINVAL; + } + dst_sgl.offset = 0; + } + part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - + dst_sgl.offset)); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, + uint8_t *, dst_sgl.offset); + dst_sgl.offset += part_len; + } else { + part_len = RTE_MIN(data_len, total_len); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + } + + sgl_segs[num_segs].len = part_len; + + total_len -= part_len; + + if (part_len != data_len) { + src_sgl.offset += part_len; + } else { + src_sgl.m = src_sgl.m->next; + src_sgl.offset = 0; + } + num_segs++; + } + job->num_sgl_io_segs = num_segs; + job->sgl_io_segs = sgl_segs; + return 0; +} +#endif + +static inline int +multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) +{ + int ret; + IMB_JOB base_job; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + base_job = *job; + job->sgl_state = IMB_SGL_INIT; + job = IMB_SUBMIT_JOB(mb_mgr); + total_len = op->sym->aead.data.length; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + + while (job->sgl_state != IMB_SGL_COMPLETE) { + job = IMB_GET_NEXT_JOB(mb_mgr); + *job = base_job; + ret = handle_aead_sgl_job(job, mb_mgr, &total_len, + &src_sgl, &dst_sgl); + if (ret < 0) + return ret; + } + return 0; +} /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1262,19 +1397,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); - struct aesni_mb_op_buf_data src_sgl = {0}; - struct aesni_mb_op_buf_data dst_sgl = {0}; struct aesni_mb_session *session; - uint32_t m_offset, oop; + uint32_t m_offset; + int oop; uint32_t auth_off_in_bytes; uint32_t ciph_off_in_bytes; uint32_t auth_len_in_bytes; uint32_t ciph_len_in_bytes; - uint32_t total_len; - IMB_JOB base_job; uint8_t sgl = 0; uint8_t lb_sgl = 0; - int ret; session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { @@ -1602,41 +1733,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (lb_sgl) return handle_sgl_linear(job, op, m_offset, session); - base_job = *job; - job->sgl_state = IMB_SGL_INIT; - job = IMB_SUBMIT_JOB(mb_mgr); - total_len = op->sym->aead.data.length; - - src_sgl.m = m_src; - src_sgl.offset = m_offset; - - while (src_sgl.offset >= src_sgl.m->data_len) { - src_sgl.offset -= src_sgl.m->data_len; - src_sgl.m = src_sgl.m->next; - - RTE_ASSERT(src_sgl.m != NULL); - } - - if (oop) { - dst_sgl.m = m_dst; - dst_sgl.offset = m_offset; - - while (dst_sgl.offset >= dst_sgl.m->data_len) { - dst_sgl.offset -= dst_sgl.m->data_len; - dst_sgl.m = dst_sgl.m->next; - - RTE_ASSERT(dst_sgl.m != NULL); - } - } - - while (job->sgl_state != IMB_SGL_COMPLETE) { - job = IMB_GET_NEXT_JOB(mb_mgr); - *job = base_job; - ret = handle_aead_sgl_job(job, mb_mgr, &total_len, - &src_sgl, &dst_sgl); - if (ret < 0) - return ret; - } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (m_src->nb_segs <= MAX_NUM_SEGS) + return single_sgl_job(job, op, oop, + m_offset, m_src, m_dst, + qp_data->sgl_segs); + else +#endif + return multi_sgl_job(job, op, oop, + m_offset, m_src, m_dst, mb_mgr); } return 0; diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 8a7c74f621..e17b53e4fe 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -20,6 +20,10 @@ #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +#define MAX_NUM_SEGS 16 +#endif + static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { { /* MD5 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, @@ -729,6 +733,9 @@ struct aesni_mb_qp_data { * by the driver when verifying a digest provided * by the user (using authentication verify operation) */ +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS]; +#endif union { struct gcm_context_data gcm_sgl_ctx; struct chacha20_poly1305_context_data chacha_sgl_ctx; From patchwork Tue May 16 15:24:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126896 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6A6FD42B24; Tue, 16 May 2023 17:24:54 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 80B4042D44; Tue, 16 May 2023 17:24:36 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id A8ECC40689 for ; Tue, 16 May 2023 17:24:34 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250674; x=1715786674; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=5ITM7j6HFfzyx4TNh8l2VJIUh7qbIXRVU5oto1UULM0=; b=PZvE8l8RkNXDjl+umQzrPY+aHZpeCx6XtlqQoarjIiBRfjMrfAx3JrG5 Jzd2azXsmIn+9gHxlVmAnwBsFoh8OyyJ7n2Rrt0wNvnBvAjqXKcub9SHg x4qtYksP35PbIu0A+INE5gSsxvkM5SAjOZlImluKXTn0bVcHmzKtmqnrc 355ELzX8qj6gW/ODPHeei8wNAri0lM8ks8U4RIT7Kq2qNf/JG/OR+efoZ B4/q58C7+k40fkqvHaGioUS/RLGdy7jNJiv00VuhMi70gjRvCT5vYMTCr wuSFVB0gn2vFR72Jipjn7+sSAag+zPFy/VVmoXmMrrN/j9rSKr0f+AOAi Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789105" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789105" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500680" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500680" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:30 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara Subject: [PATCH v2 4/8] crypto/ipsec_mb: remove unneeded fields in crypto session Date: Tue, 16 May 2023 15:24:18 +0000 Message-Id: <20230516152422.606617-5-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Cipher direction, cipher mode and hash algorithm are duplicated in crypto session. Signed-off-by: Pablo de Lara --- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index e17b53e4fe..3cf44f8bc4 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -852,9 +852,6 @@ get_digest_byte_length(IMB_HASH_ALG algo) /** AES-NI multi-buffer private session structure */ struct aesni_mb_session { - IMB_CIPHER_MODE cipher_mode; - IMB_CIPHER_DIRECTION cipher_direction; - IMB_HASH_ALG hash_alg; IMB_CHAIN_ORDER chain_order; /* common job fields */ struct { From patchwork Tue May 16 15:24:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126897 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A293442B24; Tue, 16 May 2023 17:25:00 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 800294114A; Tue, 16 May 2023 17:24:39 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id F04BD42D47 for ; Tue, 16 May 2023 17:24:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250677; x=1715786677; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Qg5VeVDDwtqvE+G4GMNDmwRlII1FuBSwR26UaDAIP7c=; b=ZWMlGXIm/1ht8arCLFregqq9GfiRTjLdpQ9Qrg2IYdXoyiwhbwkOv3oK vf7Ho4mjYbQ6jRNqZtQBLGyjpt6YUX5GFAqOAXGIL1Wn15k32MxaiQEYX NqHNl1OIKGGYaF6Rx93x4+pvWxhLuyF3Rc/WQ4Cu0ZnE4OVwTUJau6e3n VE4yArilhDf0HHIa3Qh7IBv3PlN0P4G09MRTfPfQpr1SpAWkNsbOJ9eLO wJqiOlLH3TsJsd9kBhFtSCaphqT5adPzEP6s87WrDXMnZZMqI7zuH0EHw t4DLmrgutqMwPeEHCZJZlBqpX64SRGGH+rjLK//jUOCAVj7/eZ4hbU4kK g==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789122" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789122" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500707" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500707" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:34 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara , Ciara Power Subject: [PATCH v2 5/8] crypto/ipsec_mb: store template job Date: Tue, 16 May 2023 15:24:19 +0000 Message-Id: <20230516152422.606617-6-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Store template IMB_JOB in session that will have filled all session-related fields. These fields include cipher direction, chain order, cipher mode, hash algorithm, key length, IV lengths, AAD length, digest length, and key pointers. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 403 ++++++++------------ drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 20 +- 2 files changed, 159 insertions(+), 264 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index ef1f141cad..80f59e75de 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -76,7 +76,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, uint32_t auth_precompute = 1; if (xform == NULL) { - sess->auth.algo = IMB_AUTH_NULL; + sess->template_job.hash_alg = IMB_AUTH_NULL; return 0; } @@ -87,7 +87,6 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->auth_iv.offset = xform->auth.iv.offset; - sess->auth_iv.length = xform->auth.iv.length; /* Set the request digest size */ sess->auth.req_digest_len = xform->auth.digest_length; @@ -97,13 +96,13 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, /* Set Authentication Parameters */ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) { - sess->auth.algo = IMB_AUTH_NULL; - sess->auth.gen_digest_len = 0; + sess->template_job.hash_alg = IMB_AUTH_NULL; + sess->template_job.auth_tag_output_len_in_bytes = 0; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) { - sess->auth.algo = IMB_AUTH_AES_XCBC; + sess->template_job.hash_alg = IMB_AUTH_AES_XCBC; uint16_t xcbc_mac_digest_len = get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); @@ -111,18 +110,21 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data, sess->auth.xcbc.k1_expanded, sess->auth.xcbc.k2, sess->auth.xcbc.k3); + sess->template_job.u.XCBC._k1_expanded = sess->auth.xcbc.k1_expanded; + sess->template_job.u.XCBC._k2 = sess->auth.xcbc.k2; + sess->template_job.u.XCBC._k3 = sess->auth.xcbc.k3; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { uint32_t dust[4*15]; - sess->auth.algo = IMB_AUTH_AES_CMAC; + sess->template_job.hash_alg = IMB_AUTH_AES_CMAC; uint16_t cmac_digest_len = get_digest_byte_length(IMB_AUTH_AES_CMAC); @@ -140,70 +142,74 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, * the requested number of bytes. */ if (sess->auth.req_digest_len < 4) - sess->auth.gen_digest_len = cmac_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = cmac_digest_len; else - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data, sess->auth.cmac.expkey, dust); IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey, sess->auth.cmac.skey1, sess->auth.cmac.skey2); + sess->template_job.u.CMAC._key_expanded = sess->auth.cmac.expkey; + sess->template_job.u.CMAC._skey1 = sess->auth.cmac.skey1; + sess->template_job.u.CMAC._skey2 = sess->auth.cmac.skey2; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) { - sess->cipher.direction = IMB_DIR_ENCRYPT; - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; } else - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; if (sess->auth.req_digest_len > get_digest_byte_length(IMB_AUTH_AES_GMAC)) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; - sess->iv.length = xform->auth.iv.length; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; + sess->template_job.u.GMAC.iv_len_in_bytes = xform->auth.iv.length; sess->iv.offset = xform->auth.iv.offset; switch (xform->auth.key.length) { case IMB_KEY_128_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_128; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_128; IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; break; case IMB_KEY_192_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_192; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_192; IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; break; case IMB_KEY_256_BYTES: - sess->auth.algo = IMB_AUTH_AES_GMAC_256; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC_256; IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data, &sess->cipher.gcm_key); - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; break; default: IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); return -EINVAL; } + sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; return 0; } if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) { if (xform->auth.key.length == 16) { - sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; if (sess->auth.req_digest_len != 4) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } } else if (xform->auth.key.length == 32) { - sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_ZUC256_EIA3_BITLEN; #if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM if (sess->auth.req_digest_len != 4 && sess->auth.req_digest_len != 8 && @@ -219,13 +225,14 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, xform->auth.key.length); + sess->template_job.u.ZUC_EIA3._key = sess->auth.zuc_auth_key; return 0; } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) { - sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN; + sess->template_job.hash_alg = IMB_AUTH_SNOW3G_UIA2_BITLEN; uint16_t snow3g_uia2_digest_len = get_truncated_digest_byte_length( IMB_AUTH_SNOW3G_UIA2_BITLEN); @@ -233,33 +240,37 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data, &sess->auth.pKeySched_snow3g_auth); + sess->template_job.u.SNOW3G_UIA2._key = (void *) + &sess->auth.pKeySched_snow3g_auth; return 0; } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) { - sess->auth.algo = IMB_AUTH_KASUMI_UIA1; + sess->template_job.hash_alg = IMB_AUTH_KASUMI_UIA1; uint16_t kasumi_f9_digest_len = get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); if (sess->auth.req_digest_len != kasumi_f9_digest_len) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; } - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data, &sess->auth.pKeySched_kasumi_auth); + sess->template_job.u.KASUMI_UIA1._key = (void *) + &sess->auth.pKeySched_kasumi_auth; return 0; } switch (xform->auth.algo) { case RTE_CRYPTO_AUTH_MD5_HMAC: - sess->auth.algo = IMB_AUTH_MD5; + sess->template_job.hash_alg = IMB_AUTH_MD5; hash_oneblock_fn = mb_mgr->md5_one_block; break; case RTE_CRYPTO_AUTH_SHA1_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_1; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_1; hash_oneblock_fn = mb_mgr->sha1_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_1)) { @@ -271,11 +282,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA1: - sess->auth.algo = IMB_AUTH_SHA_1; + sess->template_job.hash_alg = IMB_AUTH_SHA_1; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA224_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_224; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_224; hash_oneblock_fn = mb_mgr->sha224_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_224)) { @@ -287,11 +298,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA224: - sess->auth.algo = IMB_AUTH_SHA_224; + sess->template_job.hash_alg = IMB_AUTH_SHA_224; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA256_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_256; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_256; hash_oneblock_fn = mb_mgr->sha256_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_256)) { @@ -303,11 +314,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA256: - sess->auth.algo = IMB_AUTH_SHA_256; + sess->template_job.hash_alg = IMB_AUTH_SHA_256; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA384_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_384; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_384; hash_oneblock_fn = mb_mgr->sha384_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_384)) { @@ -319,11 +330,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA384: - sess->auth.algo = IMB_AUTH_SHA_384; + sess->template_job.hash_alg = IMB_AUTH_SHA_384; auth_precompute = 0; break; case RTE_CRYPTO_AUTH_SHA512_HMAC: - sess->auth.algo = IMB_AUTH_HMAC_SHA_512; + sess->template_job.hash_alg = IMB_AUTH_HMAC_SHA_512; hash_oneblock_fn = mb_mgr->sha512_one_block; if (xform->auth.key.length > get_auth_algo_blocksize( IMB_AUTH_HMAC_SHA_512)) { @@ -335,7 +346,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, } break; case RTE_CRYPTO_AUTH_SHA512: - sess->auth.algo = IMB_AUTH_SHA_512; + sess->template_job.hash_alg = IMB_AUTH_SHA_512; auth_precompute = 0; break; default: @@ -344,9 +355,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, return -ENOTSUP; } uint16_t trunc_digest_size = - get_truncated_digest_byte_length(sess->auth.algo); + get_truncated_digest_byte_length(sess->template_job.hash_alg); uint16_t full_digest_size = - get_digest_byte_length(sess->auth.algo); + get_digest_byte_length(sess->template_job.hash_alg); if (sess->auth.req_digest_len > full_digest_size || sess->auth.req_digest_len == 0) { @@ -356,9 +367,9 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, if (sess->auth.req_digest_len != trunc_digest_size && sess->auth.req_digest_len != full_digest_size) - sess->auth.gen_digest_len = full_digest_size; + sess->template_job.auth_tag_output_len_in_bytes = full_digest_size; else - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; /* Plain SHA does not require precompute key */ if (auth_precompute == 0) @@ -370,14 +381,18 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, sess->auth.pads.inner, sess->auth.pads.outer, hashed_key, xform->auth.key.length, - get_auth_algo_blocksize(sess->auth.algo)); + get_auth_algo_blocksize(sess->template_job.hash_alg)); } else { calculate_auth_precomputes(hash_oneblock_fn, sess->auth.pads.inner, sess->auth.pads.outer, xform->auth.key.data, xform->auth.key.length, - get_auth_algo_blocksize(sess->auth.algo)); + get_auth_algo_blocksize(sess->template_job.hash_alg)); } + sess->template_job.u.HMAC._hashed_auth_key_xor_ipad = + sess->auth.pads.inner; + sess->template_job.u.HMAC._hashed_auth_key_xor_opad = + sess->auth.pads.outer; return 0; } @@ -396,7 +411,7 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, uint8_t is_kasumi = 0; if (xform == NULL) { - sess->cipher.mode = IMB_CIPHER_NULL; + sess->template_job.cipher_mode = IMB_CIPHER_NULL; return 0; } @@ -408,10 +423,10 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Select cipher direction */ switch (xform->cipher.op) { case RTE_CRYPTO_CIPHER_OP_ENCRYPT: - sess->cipher.direction = IMB_DIR_ENCRYPT; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; break; case RTE_CRYPTO_CIPHER_OP_DECRYPT: - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; break; default: IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter"); @@ -421,48 +436,48 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Select cipher mode */ switch (xform->cipher.algo) { case RTE_CRYPTO_CIPHER_AES_CBC: - sess->cipher.mode = IMB_CIPHER_CBC; + sess->template_job.cipher_mode = IMB_CIPHER_CBC; is_aes = 1; break; case RTE_CRYPTO_CIPHER_AES_CTR: - sess->cipher.mode = IMB_CIPHER_CNTR; + sess->template_job.cipher_mode = IMB_CIPHER_CNTR; is_aes = 1; break; case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: - sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI; + sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_SEC_BPI; is_docsis = 1; break; case RTE_CRYPTO_CIPHER_DES_CBC: - sess->cipher.mode = IMB_CIPHER_DES; + sess->template_job.cipher_mode = IMB_CIPHER_DES; break; case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: - sess->cipher.mode = IMB_CIPHER_DOCSIS_DES; + sess->template_job.cipher_mode = IMB_CIPHER_DOCSIS_DES; break; case RTE_CRYPTO_CIPHER_3DES_CBC: - sess->cipher.mode = IMB_CIPHER_DES3; + sess->template_job.cipher_mode = IMB_CIPHER_DES3; is_3DES = 1; break; case RTE_CRYPTO_CIPHER_AES_ECB: - sess->cipher.mode = IMB_CIPHER_ECB; + sess->template_job.cipher_mode = IMB_CIPHER_ECB; is_aes = 1; break; case RTE_CRYPTO_CIPHER_ZUC_EEA3: - sess->cipher.mode = IMB_CIPHER_ZUC_EEA3; + sess->template_job.cipher_mode = IMB_CIPHER_ZUC_EEA3; is_zuc = 1; break; case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; + sess->template_job.cipher_mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN; is_snow3g = 1; break; case RTE_CRYPTO_CIPHER_KASUMI_F8: - sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; + sess->template_job.cipher_mode = IMB_CIPHER_KASUMI_UEA1_BITLEN; is_kasumi = 1; break; case RTE_CRYPTO_CIPHER_NULL: - sess->cipher.mode = IMB_CIPHER_NULL; - sess->cipher.key_length_in_bytes = 0; + sess->template_job.cipher_mode = IMB_CIPHER_NULL; + sess->template_job.key_len_in_bytes = 0; sess->iv.offset = xform->cipher.iv.offset; - sess->iv.length = xform->cipher.iv.length; + sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; return 0; default: IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter"); @@ -471,25 +486,25 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->iv.offset = xform->cipher.iv.offset; - sess->iv.length = xform->cipher.iv.length; + sess->template_job.iv_len_in_bytes = xform->cipher.iv.length; /* Check key length and choose key expansion function for AES */ if (is_aes) { switch (xform->cipher.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_192_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -498,16 +513,19 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } + + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } else if (is_docsis) { switch (xform->cipher.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -516,6 +534,8 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } else if (is_3DES) { uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], sess->cipher.exp_3des_keys.key[1], @@ -559,38 +579,46 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } - sess->cipher.key_length_in_bytes = 24; + sess->template_job.enc_keys = sess->cipher.exp_3des_keys.ks_ptr; + sess->template_job.dec_keys = sess->cipher.exp_3des_keys.ks_ptr; + sess->template_job.key_len_in_bytes = 24; } else if (is_zuc) { if (xform->cipher.key.length != 16 && xform->cipher.key.length != 32) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = xform->cipher.key.length; + sess->template_job.key_len_in_bytes = xform->cipher.key.length; memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data, xform->cipher.key.length); + sess->template_job.enc_keys = sess->cipher.zuc_cipher_key; + sess->template_job.dec_keys = sess->cipher.zuc_cipher_key; } else if (is_snow3g) { if (xform->cipher.key.length != 16) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 16; + sess->template_job.key_len_in_bytes = 16; IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data, &sess->cipher.pKeySched_snow3g_cipher); + sess->template_job.enc_keys = &sess->cipher.pKeySched_snow3g_cipher; + sess->template_job.dec_keys = &sess->cipher.pKeySched_snow3g_cipher; } else if (is_kasumi) { if (xform->cipher.key.length != 16) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 16; + sess->template_job.key_len_in_bytes = 16; IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data, &sess->cipher.pKeySched_kasumi_cipher); + sess->template_job.enc_keys = &sess->cipher.pKeySched_kasumi_cipher; + sess->template_job.dec_keys = &sess->cipher.pKeySched_kasumi_cipher; } else { if (xform->cipher.key.length != 8) { IPSEC_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 8; + sess->template_job.key_len_in_bytes = 8; IMB_DES_KEYSCHED(mb_mgr, (uint64_t *)sess->cipher.expanded_aes_keys.encode, @@ -598,6 +626,8 @@ aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr, IMB_DES_KEYSCHED(mb_mgr, (uint64_t *)sess->cipher.expanded_aes_keys.decode, xform->cipher.key.data); + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; } return 0; @@ -610,11 +640,11 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, { switch (xform->aead.op) { case RTE_CRYPTO_AEAD_OP_ENCRYPT: - sess->cipher.direction = IMB_DIR_ENCRYPT; + sess->template_job.cipher_direction = IMB_DIR_ENCRYPT; sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; break; case RTE_CRYPTO_AEAD_OP_DECRYPT: - sess->cipher.direction = IMB_DIR_DECRYPT; + sess->template_job.cipher_direction = IMB_DIR_DECRYPT; sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; break; default: @@ -624,27 +654,28 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, /* Set IV parameters */ sess->iv.offset = xform->aead.iv.offset; - sess->iv.length = xform->aead.iv.length; + sess->template_job.iv_len_in_bytes = xform->aead.iv.length; /* Set digest sizes */ sess->auth.req_digest_len = xform->aead.digest_length; - sess->auth.gen_digest_len = sess->auth.req_digest_len; + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; switch (xform->aead.algo) { case RTE_CRYPTO_AEAD_AES_CCM: - sess->cipher.mode = IMB_CIPHER_CCM; - sess->auth.algo = IMB_AUTH_AES_CCM; + sess->template_job.cipher_mode = IMB_CIPHER_CCM; + sess->template_job.hash_alg = IMB_AUTH_AES_CCM; + sess->template_job.u.CCM.aad_len_in_bytes = xform->aead.aad_length; /* Check key length and choose key expansion function for AES */ switch (xform->aead.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); @@ -654,6 +685,8 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; /* CCM digests must be between 4 and 16 and an even number */ if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || @@ -664,22 +697,23 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AEAD_AES_GCM: - sess->cipher.mode = IMB_CIPHER_GCM; - sess->auth.algo = IMB_AUTH_AES_GMAC; + sess->template_job.cipher_mode = IMB_CIPHER_GCM; + sess->template_job.hash_alg = IMB_AUTH_AES_GMAC; + sess->template_job.u.GCM.aad_len_in_bytes = xform->aead.aad_length; switch (xform->aead.key.length) { case IMB_KEY_128_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_128_BYTES; IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; case IMB_KEY_192_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_192_BYTES; IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; case IMB_KEY_256_BYTES: - sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES; + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data, &sess->cipher.gcm_key); break; @@ -688,6 +722,8 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, return -EINVAL; } + sess->template_job.enc_keys = &sess->cipher.gcm_key; + sess->template_job.dec_keys = &sess->cipher.gcm_key; /* GCM digest size must be between 1 and 16 */ if (sess->auth.req_digest_len == 0 || sess->auth.req_digest_len > 16) { @@ -697,16 +733,20 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, break; case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: - sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305; - sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305; + sess->template_job.cipher_mode = IMB_CIPHER_CHACHA20_POLY1305; + sess->template_job.hash_alg = IMB_AUTH_CHACHA20_POLY1305; + sess->template_job.u.CHACHA20_POLY1305.aad_len_in_bytes = + xform->aead.aad_length; if (xform->aead.key.length != 32) { IPSEC_MB_LOG(ERR, "Invalid key length"); return -EINVAL; } - sess->cipher.key_length_in_bytes = 32; + sess->template_job.key_len_in_bytes = 32; memcpy(sess->cipher.expanded_aes_keys.encode, xform->aead.key.data, 32); + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; if (sess->auth.req_digest_len != 16) { IPSEC_MB_LOG(ERR, "Invalid digest size\n"); return -EINVAL; @@ -741,16 +781,16 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, /* Select Crypto operation - hash then cipher / cipher then hash */ switch (mode) { case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN: case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY: - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_HASH_GEN_ONLY: case IPSEC_MB_OP_HASH_VERIFY_ONLY: case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; /* * Multi buffer library operates only at two modes, @@ -760,18 +800,16 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, * the first operation and decryption the last one. */ case IPSEC_MB_OP_ENCRYPT_ONLY: - sess->chain_order = IMB_ORDER_CIPHER_HASH; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_DECRYPT_ONLY: - sess->chain_order = IMB_ORDER_HASH_CIPHER; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT: - sess->chain_order = IMB_ORDER_CIPHER_HASH; - sess->aead.aad_len = xform->aead.aad_length; + sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; break; case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT: - sess->chain_order = IMB_ORDER_HASH_CIPHER; - sess->aead.aad_len = xform->aead.aad_length; + sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; break; case IPSEC_MB_OP_NOT_SUPPORTED: default: @@ -781,8 +819,7 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } /* Default IV length = 0 */ - sess->iv.length = 0; - sess->auth_iv.length = 0; + sess->template_job.iv_len_in_bytes = 0; ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform); if (ret != 0) { @@ -864,10 +901,10 @@ aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, /* Select CRC generate/verify */ if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) { - sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; + sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) { - sess->auth.algo = IMB_AUTH_DOCSIS_CRC32; + sess->template_job.hash_alg = IMB_AUTH_DOCSIS_CRC32; sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE; } else { IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction"); @@ -875,7 +912,7 @@ aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess, } sess->auth.req_digest_len = RTE_ETHER_CRC_LEN; - sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN; + sess->template_job.auth_tag_output_len_in_bytes = RTE_ETHER_CRC_LEN; return 0; } @@ -907,12 +944,12 @@ aesni_mb_set_docsis_sec_session_parameters( switch (conf->docsis.direction) { case RTE_SECURITY_DOCSIS_UPLINK: - ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH; + ipsec_sess->template_job.chain_order = IMB_ORDER_CIPHER_HASH; docsis_xform = &conf->docsis; cipher_xform = conf->crypto_xform; break; case RTE_SECURITY_DOCSIS_DOWNLINK: - ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER; + ipsec_sess->template_job.chain_order = IMB_ORDER_HASH_CIPHER; cipher_xform = conf->crypto_xform; docsis_xform = &conf->docsis; break; @@ -923,7 +960,7 @@ aesni_mb_set_docsis_sec_session_parameters( } /* Default IV length = 0 */ - ipsec_sess->iv.length = 0; + ipsec_sess->template_job.iv_len_in_bytes = 0; ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess, docsis_xform); @@ -958,7 +995,7 @@ auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session, uint32_t cipher_end, auth_end; /* Only cipher then hash needs special calculation. */ - if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) + if (!oop || session->template_job.chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl) return auth_offset; m_src = op->sym->m_src; @@ -1004,80 +1041,35 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata) { - /* Set crypto operation */ - job->chain_order = session->chain_order; - - /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set authentication parameters */ - job->hash_alg = session->auth.algo; job->iv = iv->va; switch (job->hash_alg) { - case IMB_AUTH_AES_XCBC: - job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; - job->u.XCBC._k2 = session->auth.xcbc.k2; - job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_CCM: job->u.CCM.aad = (uint8_t *)aad->va + 18; - job->u.CCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; job->iv++; break; - case IMB_AUTH_AES_CMAC: - job->u.CMAC._key_expanded = session->auth.cmac.expkey; - job->u.CMAC._skey1 = session->auth.cmac.skey1; - job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_GMAC: job->u.GCM.aad = aad->va; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = &session->cipher.gcm_key; - job->dec_keys = &session->cipher.gcm_key; break; case IMB_AUTH_AES_GMAC_128: case IMB_AUTH_AES_GMAC_192: case IMB_AUTH_AES_GMAC_256: - job->u.GMAC._key = &session->cipher.gcm_key; job->u.GMAC._iv = iv->va; - job->u.GMAC.iv_len_in_bytes = session->iv.length; break; case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = aad->va; - job->u.CHACHA20_POLY1305.aad_len_in_bytes = - session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.encode; break; default: job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner; job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer; - - if (job->cipher_mode == IMB_CIPHER_DES3) { - job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; - job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; - } else { - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - } } /* @@ -1087,10 +1079,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, /* Set digest location and length */ job->auth_tag_output = digest; - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - - /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; /* Data Parameters */ job->src = buf; @@ -1235,8 +1223,10 @@ handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, static inline int imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) { - if (alg == IMB_CIPHER_CHACHA20_POLY1305 - || alg == IMB_CIPHER_GCM) + if (alg == IMB_CIPHER_CHACHA20_POLY1305 || + alg == IMB_CIPHER_CHACHA20_POLY1305_SGL || + alg == IMB_CIPHER_GCM_SGL || + alg == IMB_CIPHER_GCM) return 1; return 0; } @@ -1413,28 +1403,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } - /* Set crypto operation */ - job->chain_order = session->chain_order; - - /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set authentication parameters */ - job->hash_alg = session->auth.algo; - const int aead = is_aead_algo(job->hash_alg, job->cipher_mode); - if (job->cipher_mode == IMB_CIPHER_DES3) { - job->enc_keys = session->cipher.exp_3des_keys.ks_ptr; - job->dec_keys = session->cipher.exp_3des_keys.ks_ptr; - } else { - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - } - if (!op->sym->m_dst) { /* in-place operation */ m_dst = m_src; @@ -1451,89 +1424,49 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { sgl = 1; - if (!imb_lib_support_sgl_algo(session->cipher.mode)) + if (!imb_lib_support_sgl_algo(job->cipher_mode)) lb_sgl = 1; } switch (job->hash_alg) { - case IMB_AUTH_AES_XCBC: - job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; - job->u.XCBC._k2 = session->auth.xcbc.k2; - job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - case IMB_AUTH_AES_CCM: job->u.CCM.aad = op->sym->aead.aad.data + 18; - job->u.CCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; - break; - - case IMB_AUTH_AES_CMAC: - job->u.CMAC._key_expanded = session->auth.cmac.expkey; - job->u.CMAC._skey1 = session->auth.cmac.skey1; - job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_GMAC: job->u.GCM.aad = op->sym->aead.aad.data; - job->u.GCM.aad_len_in_bytes = session->aead.aad_len; if (sgl) { job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; } - job->enc_keys = &session->cipher.gcm_key; - job->dec_keys = &session->cipher.gcm_key; break; case IMB_AUTH_AES_GMAC_128: case IMB_AUTH_AES_GMAC_192: case IMB_AUTH_AES_GMAC_256: - job->u.GMAC._key = &session->cipher.gcm_key; job->u.GMAC._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); - job->u.GMAC.iv_len_in_bytes = session->auth_iv.length; break; case IMB_AUTH_ZUC_EIA3_BITLEN: case IMB_AUTH_ZUC256_EIA3_BITLEN: - job->u.ZUC_EIA3._key = session->auth.zuc_auth_key; job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); break; case IMB_AUTH_SNOW3G_UIA2_BITLEN: - job->u.SNOW3G_UIA2._key = (void *) - &session->auth.pKeySched_snow3g_auth; job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->auth_iv.offset); break; - case IMB_AUTH_KASUMI_UIA1: - job->u.KASUMI_UIA1._key = (void *) - &session->auth.pKeySched_kasumi_auth; - break; case IMB_AUTH_CHACHA20_POLY1305: job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data; - job->u.CHACHA20_POLY1305.aad_len_in_bytes = - session->aead.aad_len; if (sgl) { job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; } - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.encode; break; default: - job->u.HMAC._hashed_auth_key_xor_ipad = - session->auth.pads.inner; - job->u.HMAC._hashed_auth_key_xor_opad = - session->auth.pads.outer; - + break; } if (aead) @@ -1542,14 +1475,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, m_offset = op->sym->cipher.data.offset; if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { - job->enc_keys = session->cipher.zuc_cipher_key; - job->dec_keys = session->cipher.zuc_cipher_key; m_offset >>= 3; } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { - job->enc_keys = &session->cipher.pKeySched_snow3g_cipher; m_offset = 0; } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { - job->enc_keys = &session->cipher.pKeySched_kasumi_cipher; m_offset = 0; } @@ -1565,7 +1494,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->auth_tag_output = op->sym->auth.digest.data; if (session->auth.req_digest_len != - session->auth.gen_digest_len) { + job->auth_tag_output_len_in_bytes) { job->auth_tag_output = qp_data->temp_digests[*digest_idx]; *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; @@ -1576,12 +1505,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, * digest length as specified in the relevant IPsec RFCs */ - /* Set digest length */ - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - - /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; - /* Data Parameters */ if (sgl) { job->src = NULL; @@ -1773,8 +1696,8 @@ set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } /* Only DOCSIS protocol operations supported now */ - if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI || - session->auth.algo != IMB_AUTH_DOCSIS_CRC32) { + if (session->template_job.cipher_mode != IMB_CIPHER_DOCSIS_SEC_BPI || + session->template_job.hash_alg != IMB_AUTH_DOCSIS_CRC32) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return -1; } @@ -1791,31 +1714,19 @@ set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -ENOTSUP; } - /* Set crypto operation */ - job->chain_order = session->chain_order; + memcpy(job, &session->template_job, sizeof(IMB_JOB)); /* Set cipher parameters */ - job->cipher_direction = session->cipher.direction; - job->cipher_mode = session->cipher.mode; - - job->key_len_in_bytes = session->cipher.key_length_in_bytes; job->enc_keys = session->cipher.expanded_aes_keys.encode; job->dec_keys = session->cipher.expanded_aes_keys.decode; /* Set IV parameters */ - job->iv_len_in_bytes = session->iv.length; job->iv = (uint8_t *)op + session->iv.offset; - /* Set authentication parameters */ - job->hash_alg = session->auth.algo; - /* Set digest output location */ job->auth_tag_output = qp_data->temp_digests[*digest_idx]; *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; - /* Set digest length */ - job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len; - /* Set data parameters */ job->src = rte_pktmbuf_mtod(m_src, uint8_t *); job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, @@ -1865,7 +1776,7 @@ generate_digest(IMB_JOB *job, struct rte_crypto_op *op, struct aesni_mb_session *sess) { /* No extra copy needed */ - if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len)) + if (likely(sess->auth.req_digest_len == job->auth_tag_output_len_in_bytes)) return; /* @@ -1940,7 +1851,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) if ((op->sym->m_src->nb_segs > 1 || (op->sym->m_dst != NULL && op->sym->m_dst->nb_segs > 1)) && - !imb_lib_support_sgl_algo(sess->cipher.mode)) { + !imb_lib_support_sgl_algo(job->cipher_mode)) { linear_buf = (uint8_t *) job->user_data2; post_process_sgl_linear(op, job, sess, linear_buf); } @@ -1950,7 +1861,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { if (is_aead_algo(job->hash_alg, - sess->cipher.mode)) + job->cipher_mode)) verify_digest(job, op->sym->aead.digest.data, sess->auth.req_digest_len, diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 3cf44f8bc4..ce9a6e4886 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -852,14 +852,12 @@ get_digest_byte_length(IMB_HASH_ALG algo) /** AES-NI multi-buffer private session structure */ struct aesni_mb_session { - IMB_CHAIN_ORDER chain_order; - /* common job fields */ + IMB_JOB template_job; + /*< Template job structure */ struct { - uint16_t length; uint16_t offset; } iv; struct { - uint16_t length; uint16_t offset; } auth_iv; /* *< IV parameters @@ -868,13 +866,6 @@ struct aesni_mb_session { /* * Cipher Parameters */ struct { - /* * Cipher direction - encrypt / decrypt */ - IMB_CIPHER_DIRECTION direction; - /* * Cipher mode - CBC / Counter */ - IMB_CIPHER_MODE mode; - - uint64_t key_length_in_bytes; - union { struct { uint32_t encode[60] __rte_aligned(16); @@ -907,7 +898,6 @@ struct aesni_mb_session { /* *< Authentication Parameters */ struct { - IMB_HASH_ALG algo; /* *< Authentication Algorithm */ enum rte_crypto_auth_operation operation; /* *< auth operation generate or verify */ union { @@ -948,16 +938,10 @@ struct aesni_mb_session { kasumi_key_sched_t pKeySched_kasumi_auth; /* *< KASUMI scheduled authentication key */ }; - /* * Generated digest size by the Multi-buffer library */ - uint16_t gen_digest_len; /* * Requested digest size from Cryptodev */ uint16_t req_digest_len; } auth; - struct { - /* * AAD data length */ - uint16_t aad_len; - } aead; } __rte_cache_aligned; typedef void (*hash_one_block_t)(const void *data, void *digest); From patchwork Tue May 16 15:24:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126898 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AA36042B24; Tue, 16 May 2023 17:25:07 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 980EF42D4E; Tue, 16 May 2023 17:24:40 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 7914F4114A for ; Tue, 16 May 2023 17:24:38 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250678; x=1715786678; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=TVkvwoNBoCgq6n9k66F5KOsrzOjom8I2G6X1R8BuvjI=; b=B/qERBfSnZmIg+jUtqoqLCYvNXPIQk2PZKa5xzUTHJe9Mz8dcCPlH2Ea xbhc7HvoaPqu6m9POttF0oB5s4A4vaCnQR+vg6rTghc+P+aDM9ETbL2fn 4sibzkJOp0/60CRayDqhT4o8nupAcuj4AEuxm8KaN8yc/Tm2NHu3/OEWY 83mAPEfMdXL5a1nQV547K5p3mNfPH1tyMbAbEqQO6IAWULiA0PwkJuy9O HnO6Z+822I+EifeHxdrlf4aTqx00h/HepvMKNjrDcOvxoKN4gCVng2ahf tWpq3b2XuFPCR5l3cyxfpaP2ZTilredrMhelN/6EGpiov8MHq+dFn0GmX A==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789138" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789138" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500715" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500715" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:36 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara , Ciara Power Subject: [PATCH v2 6/8] crypto/ipsec_mb: optimize for GCM case Date: Tue, 16 May 2023 15:24:20 +0000 Message-Id: <20230516152422.606617-7-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Use a separate code path when dealing with AES-GCM. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 88 +++++++++++++++++++++++--- 1 file changed, 79 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 80f59e75de..58faf3502c 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1366,6 +1366,70 @@ multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, } return 0; } + +static inline int +set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, + struct aesni_mb_qp_data *qp_data, + struct rte_crypto_op *op, uint8_t *digest_idx, + const struct aesni_mb_session *session, + struct rte_mbuf *m_src, struct rte_mbuf *m_dst, + const int oop) +{ + const uint32_t m_offset = op->sym->aead.data.offset; + + job->u.GCM.aad = op->sym->aead.aad.data; + if (sgl) { + job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; + job->cipher_mode = IMB_CIPHER_GCM_SGL; + job->hash_alg = IMB_AUTH_GCM_SGL; + job->hash_start_src_offset_in_bytes = 0; + job->msg_len_to_hash_in_bytes = 0; + job->msg_len_to_cipher_in_bytes = 0; + job->cipher_start_src_offset_in_bytes = 0; + } else { + job->hash_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_hash_in_bytes = + op->sym->aead.data.length; + job->cipher_start_src_offset_in_bytes = + op->sym->aead.data.offset; + job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; + } + + if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + job->auth_tag_output = qp_data->temp_digests[*digest_idx]; + *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS; + } else { + job->auth_tag_output = op->sym->aead.digest.data; + } + + job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); + + /* Set user data to be crypto operation data struct */ + job->user_data = op; + + if (sgl) { + job->src = NULL; + job->dst = NULL; + +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (m_src->nb_segs <= MAX_NUM_SEGS) + return single_sgl_job(job, op, oop, + m_offset, m_src, m_dst, + qp_data->sgl_segs); + else +#endif + return multi_sgl_job(job, op, oop, + m_offset, m_src, m_dst, mb_mgr); + } else { + job->src = rte_pktmbuf_mtod(m_src, uint8_t *); + job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); + } + + return 0; +} + /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1403,10 +1467,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } - memcpy(job, &session->template_job, sizeof(IMB_JOB)); + const IMB_CIPHER_MODE cipher_mode = + session->template_job.cipher_mode; - /* Set authentication parameters */ - const int aead = is_aead_algo(job->hash_alg, job->cipher_mode); + memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { /* in-place operation */ @@ -1424,10 +1488,17 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) { sgl = 1; - if (!imb_lib_support_sgl_algo(job->cipher_mode)) + if (!imb_lib_support_sgl_algo(cipher_mode)) lb_sgl = 1; } + if (cipher_mode == IMB_CIPHER_GCM) + return set_gcm_job(mb_mgr, job, sgl, qp_data, + op, digest_idx, session, m_src, m_dst, oop); + + /* Set authentication parameters */ + const int aead = is_aead_algo(job->hash_alg, cipher_mode); + switch (job->hash_alg) { case IMB_AUTH_AES_CCM: job->u.CCM.aad = op->sym->aead.aad.data + 18; @@ -1474,13 +1545,12 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, else m_offset = op->sym->cipher.data.offset; - if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) { + if (cipher_mode == IMB_CIPHER_ZUC_EEA3) m_offset >>= 3; - } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) { + else if (cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) m_offset = 0; - } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) { + else if (cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) m_offset = 0; - } /* Set digest output location */ if (job->hash_alg != IMB_AUTH_NULL && @@ -1642,7 +1712,7 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length; } - if (job->cipher_mode == IMB_CIPHER_NULL && oop) { + if (cipher_mode == IMB_CIPHER_NULL && oop) { memcpy(job->dst + job->cipher_start_src_offset_in_bytes, job->src + job->cipher_start_src_offset_in_bytes, job->msg_len_to_cipher_in_bytes); From patchwork Tue May 16 15:24:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126899 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2E24742B24; Tue, 16 May 2023 17:25:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0383D42D5C; Tue, 16 May 2023 17:24:42 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 179C942D4D for ; Tue, 16 May 2023 17:24:39 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250680; x=1715786680; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=/AwgAqNyv1lq10+EcbZE9FuE8XCBSJXZ0p1gIKrKhZs=; b=dFuNOYjJ99xWVNkvYHRtYL4dpx7hg2rb8fUTA4nmAa23/YAgpyccLkJk hVYVVc6aLHDTqBxUFk4T9iDm85kynzImK5HlztZFvaCBLx6f4k1UjNpF5 p9wK5670s9QNBh3INRNnEC+n13HDk+lvQPGKjVgLAbMDCoE5O7CJyizHi SdjqZpdZfrokK9ZkQDXgn8d+1FBRqLr2yIevCJgxj1iYaoqWersqSrBbL GMKvxSvXjHcUkYOgWubc0+pWf6albPcrDJ6hgEgrbUIENzmChVOIh+8QF YC5RpyqU4dOqUBg4brCblI2MMELnO+xTHA0zV3xzm1m4hQhaJEh2/VkFc w==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789148" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789148" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500724" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500724" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:38 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara Subject: [PATCH v2 7/8] crypto/ipsec_mb: do not free linear_sgl always Date: Tue, 16 May 2023 15:24:21 +0000 Message-Id: <20230516152422.606617-8-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara linear_sgl buffer only needs to be freed if it was allocated previously. Signed-off-by: Pablo de Lara --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 58faf3502c..f83738a5eb 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1898,6 +1898,7 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; struct aesni_mb_session *sess = NULL; uint8_t *linear_buf = NULL; + int sgl = 0; #ifdef AESNI_MB_DOCSIS_SEC_ENABLED uint8_t is_docsis_sec = 0; @@ -1923,6 +1924,8 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) op->sym->m_dst->nb_segs > 1)) && !imb_lib_support_sgl_algo(job->cipher_mode)) { linear_buf = (uint8_t *) job->user_data2; + sgl = 1; + post_process_sgl_linear(op, job, sess, linear_buf); } @@ -1952,7 +1955,8 @@ post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job) default: op->status = RTE_CRYPTO_OP_STATUS_ERROR; } - rte_free(linear_buf); + if (sgl) + rte_free(linear_buf); } /* Free session if a session-less crypto op */ From patchwork Tue May 16 15:24:22 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126900 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A7CE842B24; Tue, 16 May 2023 17:25:22 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 10B2242D64; Tue, 16 May 2023 17:24:43 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 42F2442D42 for ; Tue, 16 May 2023 17:24:41 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1684250681; x=1715786681; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=A+DlAqj71L0JK8lCQKPQ0e8w289UzdfKSy4MRSkSh7U=; b=kalJjCS6p8dUFiSJUKoaMk1yVwqRYhJF+PGTvUFaGsfOB20zKsriP68J +UF0Pf+NMIZ0Pw++v5FMssFwHmCDZRfq5rS+QAys/pp9/zWFumo9bJZ5s GZ6azqB4er0e5RCdNLLuc+2t73RXquZY2MFmo8tWy0UF5/AXqCKkXeIi+ cnz25ZtK44YuYhtXRaOMbPR5P6Fet1wbEOEKht4dFLhYgEYdK3pkzv7uK yxPgoBCoGYlnJ/NR2ZH2BaV3vWPcDWQqyQ7GDbz19gVKIQbvRol2gfxP9 hBKYgztVud1M4++YUdPGNiC1fU1md4lktrnU84HidSUwmFKATn3hASFKz w==; X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="353789153" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="353789153" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 May 2023 08:24:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10712"; a="695500731" X-IronPort-AV: E=Sophos;i="5.99,278,1677571200"; d="scan'208";a="695500731" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga007.jf.intel.com with ESMTP; 16 May 2023 08:24:39 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, gakhil@marvell.com, Pablo de Lara , Ciara Power Subject: [PATCH v2 8/8] crypto/ipsec_mb: set and use session ID Date: Tue, 16 May 2023 15:24:22 +0000 Message-Id: <20230516152422.606617-9-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230516152422.606617-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> <20230516152422.606617-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara When creating a session, get the session ID that defines the fixed session parameters and store it in the private data. When retrieving IMB_JOB's, if their internal session ID matches the one in the private session data, these fixed session parameters do not need to be filled again. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 22 ++++++++++++++++++++- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 2 ++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index f83738a5eb..f4322d9af4 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -845,6 +845,10 @@ aesni_mb_session_configure(IMB_MGR *mb_mgr, } } +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + sess->session_id = imb_set_session(mb_mgr, &sess->template_job); +#endif + return 0; } @@ -977,6 +981,10 @@ aesni_mb_set_docsis_sec_session_parameters( goto error_exit; } +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + ipsec_sess->session_id = imb_set_session(mb_mgr, &ipsec_sess->template_job); +#endif + error_exit: free_mb_mgr(mb_mgr); return ret; @@ -1386,6 +1394,9 @@ set_gcm_job(IMB_MGR *mb_mgr, IMB_JOB *job, const uint8_t sgl, job->msg_len_to_hash_in_bytes = 0; job->msg_len_to_cipher_in_bytes = 0; job->cipher_start_src_offset_in_bytes = 0; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } else { job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; @@ -1470,7 +1481,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, const IMB_CIPHER_MODE cipher_mode = session->template_job.cipher_mode; - memcpy(job, &session->template_job, sizeof(IMB_JOB)); +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + if (job->session_id != session->session_id) +#endif + memcpy(job, &session->template_job, sizeof(IMB_JOB)); if (!op->sym->m_dst) { /* in-place operation */ @@ -1510,6 +1524,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.GCM.ctx = &qp_data->gcm_sgl_ctx; job->cipher_mode = IMB_CIPHER_GCM_SGL; job->hash_alg = IMB_AUTH_GCM_SGL; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } break; case IMB_AUTH_AES_GMAC_128: @@ -1534,6 +1551,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx; job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; +#if IMB_VERSION(1, 3, 0) < IMB_VERSION_NUM + imb_set_session(mb_mgr, job); +#endif } break; default: diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index ce9a6e4886..9b7c9edb6d 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -854,6 +854,8 @@ get_digest_byte_length(IMB_HASH_ALG algo) struct aesni_mb_session { IMB_JOB template_job; /*< Template job structure */ + uint32_t session_id; + /*< IPSec MB session ID */ struct { uint16_t offset; } iv;