From patchwork Fri Apr 21 13:12:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Power, Ciara" X-Patchwork-Id: 126391 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 23863429AD; Fri, 21 Apr 2023 15:12:50 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 29E0542D12; Fri, 21 Apr 2023 15:12:36 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 665624114B for ; Fri, 21 Apr 2023 15:12:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682082753; x=1713618753; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=RThlGkv0ELtY8+ujtaZFcNbjwhVEsiQI1hottTimYGY=; b=fsAufMQ5+1Xahu/ZpuUcgDjQ7C4V0VnVRF9JpNegX/kI7qyDkHD9wPYI N24tSofYnYMMxkBwDLmXFMP5grrYy7CHOnH25QGEo4fzvUK4eSb8DZw9B Q25XSmkgVTCdjQwzC11IFjM0zEgw6qtXhhvFtxE1wdBDJs6MiZFuoURxB GyA0KtQgjh3YF7E8RjFPQc/xGKZeSthAM8dyIqEb8AEdGz4HsxGG2sFHZ T2WB9VpDZMrsI7FKsmaDO2CGOfWxCGAfoZStm+f7iW4xzrxAZyinOvxo0 TtWiM3AIg5AZ7IHGc9xrBVxSvyRB++85Www4Eab/TsyoEDPBrpS14L8/1 g==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="408927707" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="408927707" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 06:12:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="724817353" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="724817353" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.80]) by orsmga001.jf.intel.com with ESMTP; 21 Apr 2023 06:12:31 -0700 From: Ciara Power To: dev@dpdk.org Cc: kai.ji@intel.com, Pablo de Lara , Ciara Power Subject: [PATCH 3/8] crypto/ipsec_mb: use new SGL API Date: Fri, 21 Apr 2023 13:12:15 +0000 Message-Id: <20230421131221.1732314-4-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230421131221.1732314-1-ciara.power@intel.com> References: <20230421131221.1732314-1-ciara.power@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pablo de Lara Use new SGL API available from IPSec Multi-buffer v1.3, where only one function call is required to submit all segments to be processed in an SGL scenario. Instead of having one call per segment, there is only one call per buffer. Signed-off-by: Pablo de Lara Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 187 +++++++++++++++----- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 7 + 2 files changed, 153 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index 5789b82d8e..3ebbade8ca 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -1241,6 +1241,141 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) return 0; } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +static inline int +single_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, struct IMB_SGL_IOV *sgl_segs) +{ + uint32_t num_segs = 0; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + job->sgl_state = IMB_SGL_ALL; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + total_len = op->sym->aead.data.length; + + while (total_len != 0) { + uint32_t data_len, part_len; + + if (src_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid source buffer"); + return -EINVAL; + } + + data_len = src_sgl.m->data_len - src_sgl.offset; + + sgl_segs[num_segs].in = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + + if (dst_sgl.m != NULL) { + if (dst_sgl.m->data_len - dst_sgl.offset == 0) { + dst_sgl.m = dst_sgl.m->next; + if (dst_sgl.m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid destination buffer"); + return -EINVAL; + } + dst_sgl.offset = 0; + } + part_len = RTE_MIN(data_len, (dst_sgl.m->data_len - + dst_sgl.offset)); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(dst_sgl.m, + uint8_t *, dst_sgl.offset); + dst_sgl.offset += part_len; + } else { + part_len = RTE_MIN(data_len, total_len); + sgl_segs[num_segs].out = rte_pktmbuf_mtod_offset(src_sgl.m, uint8_t *, + src_sgl.offset); + } + + sgl_segs[num_segs].len = part_len; + + total_len -= part_len; + + if (part_len != data_len) { + src_sgl.offset += part_len; + } else { + src_sgl.m = src_sgl.m->next; + src_sgl.offset = 0; + } + num_segs++; + } + job->num_sgl_io_segs = num_segs; + job->sgl_io_segs = sgl_segs; + return 0; +} +#endif + +static inline int +multi_sgl_job(IMB_JOB *job, struct rte_crypto_op *op, + int oop, uint32_t offset, struct rte_mbuf *m_src, + struct rte_mbuf *m_dst, IMB_MGR *mb_mgr) +{ + int ret; + IMB_JOB base_job; + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; + uint32_t total_len; + + base_job = *job; + job->sgl_state = IMB_SGL_INIT; + job = IMB_SUBMIT_JOB(mb_mgr); + total_len = op->sym->aead.data.length; + + src_sgl.m = m_src; + src_sgl.offset = offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + + while (job->sgl_state != IMB_SGL_COMPLETE) { + job = IMB_GET_NEXT_JOB(mb_mgr); + *job = base_job; + ret = handle_aead_sgl_job(job, mb_mgr, &total_len, + &src_sgl, &dst_sgl); + if (ret < 0) + return ret; + } + return 0; +} /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1262,19 +1397,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); - struct aesni_mb_op_buf_data src_sgl = {0}; - struct aesni_mb_op_buf_data dst_sgl = {0}; struct aesni_mb_session *session; - uint32_t m_offset, oop; + uint32_t m_offset; + int oop; uint32_t auth_off_in_bytes; uint32_t ciph_off_in_bytes; uint32_t auth_len_in_bytes; uint32_t ciph_len_in_bytes; - uint32_t total_len; - IMB_JOB base_job; uint8_t sgl = 0; uint8_t lb_sgl = 0; - int ret; session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { @@ -1602,41 +1733,15 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (lb_sgl) return handle_sgl_linear(job, op, m_offset, session); - base_job = *job; - job->sgl_state = IMB_SGL_INIT; - job = IMB_SUBMIT_JOB(mb_mgr); - total_len = op->sym->aead.data.length; - - src_sgl.m = m_src; - src_sgl.offset = m_offset; - - while (src_sgl.offset >= src_sgl.m->data_len) { - src_sgl.offset -= src_sgl.m->data_len; - src_sgl.m = src_sgl.m->next; - - RTE_ASSERT(src_sgl.m != NULL); - } - - if (oop) { - dst_sgl.m = m_dst; - dst_sgl.offset = m_offset; - - while (dst_sgl.offset >= dst_sgl.m->data_len) { - dst_sgl.offset -= dst_sgl.m->data_len; - dst_sgl.m = dst_sgl.m->next; - - RTE_ASSERT(dst_sgl.m != NULL); - } - } - - while (job->sgl_state != IMB_SGL_COMPLETE) { - job = IMB_GET_NEXT_JOB(mb_mgr); - *job = base_job; - ret = handle_aead_sgl_job(job, mb_mgr, &total_len, - &src_sgl, &dst_sgl); - if (ret < 0) - return ret; - } +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + if (m_src->nb_segs <= MAX_NUM_SEGS) + return single_sgl_job(job, op, oop, + m_offset, m_src, m_dst, + qp_data->sgl_segs); + else +#endif + return multi_sgl_job(job, op, oop, + m_offset, m_src, m_dst, mb_mgr); } return 0; diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 8a7c74f621..e17b53e4fe 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -20,6 +20,10 @@ #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM +#define MAX_NUM_SEGS 16 +#endif + static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = { { /* MD5 HMAC */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, @@ -729,6 +733,9 @@ struct aesni_mb_qp_data { * by the driver when verifying a digest provided * by the user (using authentication verify operation) */ +#if IMB_VERSION(1, 2, 0) < IMB_VERSION_NUM + struct IMB_SGL_IOV sgl_segs[MAX_NUM_SEGS]; +#endif union { struct gcm_context_data gcm_sgl_ctx; struct chacha20_poly1305_context_data chacha_sgl_ctx;