From patchwork Sun Oct 11 00:38:51 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 80300 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 67766A04B6; Sun, 11 Oct 2020 02:39:19 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4B6A61D509; Sun, 11 Oct 2020 02:39:07 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 1E6251D420 for ; Sun, 11 Oct 2020 02:39:01 +0200 (CEST) IronPort-SDR: q/MY4PxZ4X4K/pPERW8tsAlLjzuqMsI56wiryEaa7rgRC4Zew+UVZw9eNr4zbukE6D5i+o4hfb 3eK6trTsk/Vg== X-IronPort-AV: E=McAfee;i="6000,8403,9770"; a="144930903" X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="144930903" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2020 17:38:58 -0700 IronPort-SDR: rs4mD8NBwdnTZK8V39RdcqygH+sEikdWWRKE80GabRIopHRfitboHNsQDTvHjE7FvfNfEG8Chw iyM6kx96M2Ww== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="462648532" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.136]) by orsmga004.jf.intel.com with ESMTP; 10 Oct 2020 17:38:59 -0700 From: Fan Zhang To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Fan Zhang Date: Sun, 11 Oct 2020 01:38:51 +0100 Message-Id: <20201011003854.54947-2-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20201011003854.54947-1-roy.fan.zhang@intel.com> References: <20201011003252.54558-1-roy.fan.zhang@intel.com> <20201011003854.54947-1-roy.fan.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [dpdk-dev v13 1/4] cryptodev: change crypto symmetric vector structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch updates ``rte_crypto_sym_vec`` structure to add support for both cpu_crypto synchrounous operation and asynchronous raw data-path APIs. The patch also includes AESNI-MB and AESNI-GCM PMD changes, unit test changes and documentation updates. Signed-off-by: Fan Zhang --- app/test/test_cryptodev.c | 25 ++++++++------ doc/guides/prog_guide/cryptodev_lib.rst | 3 +- doc/guides/rel_notes/release_20_11.rst | 3 ++ drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 18 +++++----- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 9 +++-- lib/librte_cryptodev/rte_crypto_sym.h | 40 ++++++++++++++++------ lib/librte_ipsec/esp_inb.c | 12 +++---- lib/librte_ipsec/esp_outb.c | 12 +++---- lib/librte_ipsec/misc.h | 6 ++-- 9 files changed, 79 insertions(+), 49 deletions(-) diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index ac2a36bc2..62a265520 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -151,11 +151,11 @@ static void process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op) { int32_t n, st; - void *iv; struct rte_crypto_sym_op *sop; union rte_crypto_sym_ofs ofs; struct rte_crypto_sgl sgl; struct rte_crypto_sym_vec symvec; + struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr; struct rte_crypto_vec vec[UINT8_MAX]; sop = op->sym; @@ -171,13 +171,17 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op) sgl.vec = vec; sgl.num = n; symvec.sgl = &sgl; - iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); - symvec.iv = &iv; - symvec.aad = (void **)&sop->aead.aad.data; - symvec.digest = (void **)&sop->aead.digest.data; + symvec.iv = &iv_ptr; + symvec.digest = &digest_ptr; + symvec.aad = &aad_ptr; symvec.status = &st; symvec.num = 1; + /* for CPU crypto the IOVA address is not required */ + iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + digest_ptr.va = (void *)sop->aead.digest.data; + aad_ptr.va = (void *)sop->aead.aad.data; + ofs.raw = 0; n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs, @@ -193,11 +197,11 @@ static void process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op) { int32_t n, st; - void *iv; struct rte_crypto_sym_op *sop; union rte_crypto_sym_ofs ofs; struct rte_crypto_sgl sgl; struct rte_crypto_sym_vec symvec; + struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr; struct rte_crypto_vec vec[UINT8_MAX]; sop = op->sym; @@ -213,13 +217,14 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op) sgl.vec = vec; sgl.num = n; symvec.sgl = &sgl; - iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); - symvec.iv = &iv; - symvec.aad = (void **)&sop->aead.aad.data; - symvec.digest = (void **)&sop->auth.digest.data; + symvec.iv = &iv_ptr; + symvec.digest = &digest_ptr; symvec.status = &st; symvec.num = 1; + iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + digest_ptr.va = (void *)sop->auth.digest.data; + ofs.raw = 0; ofs.ofs.cipher.head = sop->cipher.data.offset - sop->auth.data.offset; ofs.ofs.cipher.tail = (sop->auth.data.offset + sop->auth.data.length) - diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst index c14f750fa..e7ba35c2d 100644 --- a/doc/guides/prog_guide/cryptodev_lib.rst +++ b/doc/guides/prog_guide/cryptodev_lib.rst @@ -620,7 +620,8 @@ operation descriptor (``struct rte_crypto_sym_vec``) containing: descriptors of performed operations (``struct rte_crypto_sgl``). Each instance of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to an array of segment descriptors ``struct rte_crypto_vec``; -- pointers to arrays of size ``num`` containing IV, AAD and digest information, +- pointers to arrays of size ``num`` containing IV, AAD and digest information + in the ``cpu_crypto`` sub-structure, - pointer to an array of size ``num`` where status information will be stored for each operation. diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index 8b911488c..2973b2a33 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -302,6 +302,9 @@ API Changes ``rte_fpga_lte_fec_configure`` and structure ``fpga_lte_fec_conf`` to ``rte_fpga_lte_fec_conf``. +* The structure ``rte_crypto_sym_vec`` is updated to support both + cpu_crypto synchrounous operation and asynchronous raw data-path APIs. + ABI Changes ----------- diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 1d2a0ce00..973b61bd6 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -464,9 +464,10 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s, processed = 0; for (i = 0; i < vec->num; ++i) { aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i], vec->aad[i]); + &vec->sgl[i], vec->iv[i].va, + vec->aad[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -482,9 +483,10 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s, processed = 0; for (i = 0; i < vec->num; ++i) { aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i], vec->aad[i]); + &vec->sgl[i], vec->iv[i].va, + vec->aad[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -505,9 +507,9 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s, } aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i]); + &vec->sgl[i], vec->iv[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } @@ -528,9 +530,9 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s, } aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, - &vec->sgl[i], vec->iv[i]); + &vec->sgl[i], vec->iv[i].va); vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s, - gdata_ctx, vec->digest[i]); + gdata_ctx, vec->digest[i].va); processed += (vec->status[i] == 0); } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 34a39ca99..39f90f537 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -1877,7 +1877,7 @@ generate_sync_dgst(struct rte_crypto_sym_vec *vec, for (i = 0, k = 0; i != vec->num; i++) { if (vec->status[i] == 0) { - memcpy(vec->digest[i], dgst[i], len); + memcpy(vec->digest[i].va, dgst[i], len); k++; } } @@ -1893,7 +1893,7 @@ verify_sync_dgst(struct rte_crypto_sym_vec *vec, for (i = 0, k = 0; i != vec->num; i++) { if (vec->status[i] == 0) { - if (memcmp(vec->digest[i], dgst[i], len) != 0) + if (memcmp(vec->digest[i].va, dgst[i], len) != 0) vec->status[i] = EBADMSG; else k++; @@ -1956,9 +1956,8 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev, } /* Submit job for processing */ - set_cpu_mb_job_params(job, s, sofs, buf, len, - vec->iv[i], vec->aad[i], tmp_dgst[i], - &vec->status[i]); + set_cpu_mb_job_params(job, s, sofs, buf, len, vec->iv[i].va, + vec->aad[i].va, tmp_dgst[i], &vec->status[i]); job = submit_sync_job(mb_mgr); j++; diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h index f29c98051..e1f23d303 100644 --- a/lib/librte_cryptodev/rte_crypto_sym.h +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -51,26 +51,44 @@ struct rte_crypto_sgl { }; /** - * Synchronous operation descriptor. - * Supposed to be used with CPU crypto API call. + * Crypto virtual and IOVA address descriptor, used to describe cryptographic + * data buffer without the length information. The length information is + * normally predefined during session creation. + */ +struct rte_crypto_va_iova_ptr { + void *va; + rte_iova_t iova; +}; + +/** + * Raw data operation descriptor. + * Supposed to be used with synchronous CPU crypto API call or asynchronous + * RAW data path API call. */ struct rte_crypto_sym_vec { + /** number of operations to perform */ + uint32_t num; /** array of SGL vectors */ struct rte_crypto_sgl *sgl; - /** array of pointers to IV */ - void **iv; - /** array of pointers to AAD */ - void **aad; + /** array of pointers to cipher IV */ + struct rte_crypto_va_iova_ptr *iv; /** array of pointers to digest */ - void **digest; + struct rte_crypto_va_iova_ptr *digest; + + __extension__ + union { + /** array of pointers to auth IV, used for chain operation */ + struct rte_crypto_va_iova_ptr *auth_iv; + /** array of pointers to AAD, used for AEAD operation */ + struct rte_crypto_va_iova_ptr *aad; + }; + /** * array of statuses for each operation: - * - 0 on success - * - errno on error + * - 0 on success + * - errno on error */ int32_t *status; - /** number of operations to perform */ - uint32_t num; }; /** diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c index 96eec0131..2b1df6a03 100644 --- a/lib/librte_ipsec/esp_inb.c +++ b/lib/librte_ipsec/esp_inb.c @@ -693,9 +693,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa; struct replay_sqn *rsn; union sym_op_data icv; - void *iv[num]; - void *aad[num]; - void *dgst[num]; + struct rte_crypto_va_iova_ptr iv[num]; + struct rte_crypto_va_iova_ptr aad[num]; + struct rte_crypto_va_iova_ptr dgst[num]; uint32_t dr[num]; uint32_t l4ofs[num]; uint32_t clen[num]; @@ -720,9 +720,9 @@ cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss, l4ofs + k, rc, ivbuf[k]); /* fill iv, digest and aad */ - iv[k] = ivbuf[k]; - aad[k] = icv.va + sa->icv_len; - dgst[k++] = icv.va; + iv[k].va = ivbuf[k]; + aad[k].va = icv.va + sa->icv_len; + dgst[k++].va = icv.va; } else { dr[i - k] = i; rte_errno = -rc; diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c index fb9d5864c..1e181cf2c 100644 --- a/lib/librte_ipsec/esp_outb.c +++ b/lib/librte_ipsec/esp_outb.c @@ -449,9 +449,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, uint32_t i, k, n; uint32_t l2, l3; union sym_op_data icv; - void *iv[num]; - void *aad[num]; - void *dgst[num]; + struct rte_crypto_va_iova_ptr iv[num]; + struct rte_crypto_va_iova_ptr aad[num]; + struct rte_crypto_va_iova_ptr dgst[num]; uint32_t dr[num]; uint32_t l4ofs[num]; uint32_t clen[num]; @@ -488,9 +488,9 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, ivbuf[k]); /* fill iv, digest and aad */ - iv[k] = ivbuf[k]; - aad[k] = icv.va + sa->icv_len; - dgst[k++] = icv.va; + iv[k].va = ivbuf[k]; + aad[k].va = icv.va + sa->icv_len; + dgst[k++].va = icv.va; } else { dr[i - k] = i; rte_errno = -rc; diff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h index 1b543ed87..79b9a2076 100644 --- a/lib/librte_ipsec/misc.h +++ b/lib/librte_ipsec/misc.h @@ -112,7 +112,9 @@ mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs, static inline void cpu_crypto_bulk(const struct rte_ipsec_session *ss, union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[], - void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[], + struct rte_crypto_va_iova_ptr iv[], + struct rte_crypto_va_iova_ptr aad[], + struct rte_crypto_va_iova_ptr dgst[], uint32_t l4ofs[], uint32_t clen[], uint32_t num) { uint32_t i, j, n; @@ -136,8 +138,8 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss, /* fill the request structure */ symvec.sgl = &vecpkt[j]; symvec.iv = &iv[j]; - symvec.aad = &aad[j]; symvec.digest = &dgst[j]; + symvec.aad = &aad[j]; symvec.status = &st[j]; symvec.num = i - j; From patchwork Sun Oct 11 00:38:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 80301 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0A91DA04B6; Sun, 11 Oct 2020 02:39:42 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id F0A321D51A; Sun, 11 Oct 2020 02:39:08 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 9B1961D454 for ; Sun, 11 Oct 2020 02:39:03 +0200 (CEST) IronPort-SDR: F8a8zAq/VTftelfVK0Ai75XDIJEWtrouVdjDAgTfOHBSgM9GPX5soVbCdITDfbne5ypESy1ofy GOkpp6YtXaJQ== X-IronPort-AV: E=McAfee;i="6000,8403,9770"; a="144930904" X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="144930904" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2020 17:39:01 -0700 IronPort-SDR: o4j4ktKqwIbE8maSWsy/xUW7JIVkBBg/aOJNBNeAViG8yy+Rx4P7YdA0hffBkDRihfCuO1I0iq /FEkz6pi/L2g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="462648546" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.136]) by orsmga004.jf.intel.com with ESMTP; 10 Oct 2020 17:39:00 -0700 From: Fan Zhang To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Fan Zhang , Piotr Bronowski , Adam Dybkowski Date: Sun, 11 Oct 2020 01:38:52 +0100 Message-Id: <20201011003854.54947-3-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20201011003854.54947-1-roy.fan.zhang@intel.com> References: <20201011003252.54558-1-roy.fan.zhang@intel.com> <20201011003854.54947-1-roy.fan.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [dpdk-dev v13 2/4] cryptodev: add raw crypto data-path APIs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds raw data-path APIs for enqueue and dequeue operations to cryptodev. The APIs support flexible user-define enqueue and dequeue behaviors. Signed-off-by: Fan Zhang Signed-off-by: Piotr Bronowski Acked-by: Adam Dybkowski --- doc/guides/cryptodevs/features/default.ini | 1 + doc/guides/prog_guide/cryptodev_lib.rst | 106 +++++ doc/guides/rel_notes/release_20_11.rst | 7 + lib/librte_cryptodev/rte_cryptodev.c | 80 ++++ lib/librte_cryptodev/rte_cryptodev.h | 413 +++++++++++++++++- lib/librte_cryptodev/rte_cryptodev_pmd.h | 51 ++- .../rte_cryptodev_version.map | 10 + 7 files changed, 665 insertions(+), 3 deletions(-) diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini index 133a246ee..17b177fc4 100644 --- a/doc/guides/cryptodevs/features/default.ini +++ b/doc/guides/cryptodevs/features/default.ini @@ -30,6 +30,7 @@ Asymmetric sessionless = CPU crypto = Symmetric sessionless = Non-Byte aligned data = +Sym raw data path API = ; ; Supported crypto algorithms of a default crypto driver. diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst index e7ba35c2d..bcf071326 100644 --- a/doc/guides/prog_guide/cryptodev_lib.rst +++ b/doc/guides/prog_guide/cryptodev_lib.rst @@ -632,6 +632,112 @@ a call argument. Status different than zero must be treated as error. For more details, e.g. how to convert an mbuf to an SGL, please refer to an example usage in the IPsec library implementation. +Cryptodev Raw Data-path APIs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Crypto Raw data-path APIs are a set of APIs designed to enable external +libraries/applications to leverage the cryptographic processing provided by +DPDK crypto PMDs through the cryptodev API but in a manner that is not +dependent on native DPDK data structures (eg. rte_mbuf, rte_crypto_op, ... etc) +in their data-path implementation. + +The raw data-path APIs have the following advantages: +- External data structure friendly design. The new APIs uses the operation + descriptor ``struct rte_crypto_sym_vec`` that supports raw data pointer and + IOVA addresses as input. Moreover, the APIs does not require the user to + allocate the descriptor from mempool, nor requiring mbufs to describe input + data's virtual and IOVA addresses. All these features made the translation + from user's own data structure into the descriptor easier and more efficient. +- Flexible enqueue and dequeue operation. The raw data-path APIs gives the + user more control to the enqueue and dequeue operations, including the + capability of precious enqueue/dequeue count, abandoning enqueue or dequeue + at any time, and operation status translation and set on the fly. + +Cryptodev PMDs which support the raw data-path APIs will have +``RTE_CRYPTODEV_FF_SYM_RAW_DP`` feature flag presented. To use this feature, +the user shall create a local ``struct rte_crypto_raw_dp_ctx`` buffer and +extend to at least the length returned by ``rte_cryptodev_get_raw_dp_ctx_size`` +function call. The created buffer is then initialized using +``rte_cryptodev_configure_raw_dp_ctx`` function with the ``is_update`` +parameter as 0. The library and the crypto device driver will then set the +buffer and attach either the cryptodev sym session, the rte_security session, +or the cryptodev xform for session-less operation into the ctx buffer, and +set the corresponding enqueue and dequeue function handlers based on the +algorithm information stored in the session or xform. When the ``is_update`` +parameter passed into ``rte_cryptodev_configure_raw_dp_ctx`` is 1, the driver +will not initialize the buffer but only update the session or xform and +the function handlers accordingly. + +After the ``struct rte_crypto_raw_dp_ctx`` buffer is initialized, it is now +ready for enqueue and dequeue operation. There are two different enqueue +functions: ``rte_cryptodev_raw_enqueue`` to enqueue single raw data +operation, and ``rte_cryptodev_raw_enqueue_burst`` to enqueue a descriptor +with multiple operations. In case of the application uses similar approach to +``struct rte_crypto_sym_vec`` to manage its data burst but with different +data structure, using the ``rte_cryptodev_raw_enqueue_burst`` function may be +less efficient as this is a situation where the application has to loop over +all crypto operations to assemble the ``struct rte_crypto_sym_vec`` descriptor +from its own data structure, and then the driver will loop over them again to +translate every operation in the descriptor to the driver's specific queue data. +The ``rte_cryptodev_raw_enqueue`` should be used to save one loop for each data +burst instead. + +The ``rte_cryptodev_raw_enqueue`` and ``rte_cryptodev_raw_enqueue_burst`` +functions will return or set the enqueue status. ``rte_cryptodev_raw_enqueue`` +will return the status directly, ``rte_cryptodev_raw_enqueue_burst`` will +return the number of operations enqueued or stored (explained as follows) and +set the ``enqueue_status`` buffer provided by the user. The possible +enqueue status values are: + +- ``1``: the operation(s) is/are enqueued successfully. +- ``0``: the operation(s) is/are cached successfully in the crypto device queue + but is not actually enqueued. The user shall call + ``rte_cryptodev_raw_enqueue_done`` function after the expected operations + are stored. The crypto device will then start enqueuing all of them at + once. +- The negative integer: error occurred during enqueue. + +Calling ``rte_cryptodev_configure_raw_dp_ctx`` with the parameter ``is_update`` +set as 0 twice without the enqueue function returning or setting enqueue status +to 1 or ``rte_cryptodev_raw_enqueue_done`` function being called in between will +invalidate any operation stored in the device queue but not enqueued. This +feature is useful when the user wants to abandon partially enqueued operations +for a failed enqueue burst operation and try enqueuing in a whole later. + +Similar as enqueue, there are two dequeue functions: +``rte_cryptodev_raw_dequeue`` for dequeing single operation, and +``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g. +all operations in a ``struct rte_crypto_sym_vec`` descriptor). The +``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback +functions to retrieve dequeue count from the enqueued user data and write the +expected status value to the user data on the fly. The dequeue functions also +set the dequeue status: + +- ``1``: the operation(s) is/are dequeued successfully. +- ``0``: the operation(s) is/are completed but is not actually dequeued (hence + still kept in the device queue). The user shall call the + ``rte_cryptodev_raw_dequeue_done`` function after the expected number of + operations (e.g. all operations in a descriptor) are dequeued. The crypto + device driver will then free them from the queue at once. +- The negative integer: error occurred during dequeue. + +Calling ``rte_cryptodev_configure_raw_dp_ctx`` with the parameter ``is_update`` +set as 0 twice without the dequeue functions execution changed dequeue_status +to 1 or ``rte_cryptodev_raw_dequeue_done`` function being called in between will +revert the crypto device queue's dequeue effort to the moment when the +``struct rte_crypto_raw_dp_ctx`` buffer is initialized. This feature is useful +when the user wants to abandon partially dequeued data and try dequeuing again +later in a whole. + +There are a few limitations to the raw data path APIs: + +* Only support in-place operations. +* APIs are NOT thread-safe. +* CANNOT mix the raw data-path API's enqueue with rte_cryptodev_enqueue_burst, + or vice versa. + +See *DPDK API Reference* for details on each API definitions. + Sample code ----------- diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index 2973b2a33..85a07d86e 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -74,6 +74,13 @@ New Features * Added SR-IOV PF support +* **Added raw data-path APIs for cryptodev library.** + + Cryptodev is added with raw data-path APIs to accelerate external + libraries or applications which need to avail fast cryptodev + enqueue/dequeue operations but does not necessarily depends on + mbufs and cryptodev operation mempools. + * **Updated the aesni_mb crypto PMD.** * Added support for AES-ECB 128, 192 and 256. diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index cda160f61..3d95ac6ea 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -1772,6 +1772,86 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec); } +int +rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + int32_t size = sizeof(struct rte_crypto_raw_dp_ctx); + int32_t priv_size; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) + return -EINVAL; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL || + !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { + return -ENOTSUP; + } + + priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev); + if (priv_size < 0) + return -ENOTSUP; + + return RTE_ALIGN_CEIL((size + priv_size), 8); +} + +int +rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, + uint8_t is_update) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_get_qp_status(dev_id, qp_id)) + return -EINVAL; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP) + || dev->dev_ops->sym_configure_raw_dp_ctx == NULL) + return -ENOTSUP; + + return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx, + sess_type, session_ctx, is_update); +} + +uint32_t +rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void **user_data, int *enqueue_status) +{ + return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec, + ofs, user_data, enqueue_status); +} + +int +rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, + uint32_t n) +{ + return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n); +} + +uint32_t +rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success_jobs, int *status) +{ + return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data, + get_dequeue_count, post_dequeue, out_user_data, + is_user_data_array, n_success_jobs, status); +} + +int +rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, + uint32_t n) +{ + return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n); +} + /** Initialise rte_crypto_op mempool element */ static void rte_crypto_op_init(struct rte_mempool *mempool, diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h index 26abd0c52..b5db8fd81 100644 --- a/lib/librte_cryptodev/rte_cryptodev.h +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -458,7 +458,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, /**< Support symmetric session-less operations */ #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23) /**< Support operations on data which is not byte aligned */ - +#define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24) +/**< Support accelerated specific symmetric raw data-path APIs */ /** * Get the name of a crypto device feature flag @@ -1319,6 +1320,416 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec); +/** + * Get the size of the raw data-path context buffer. + * + * @param dev_id The device identifier. + * + * @return + * - If the device supports raw data-path APIs, return the context size. + * - If the device does not support the APIs, return -1. + */ +__rte_experimental +int +rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id); + +/** + * Union of different crypto session types, including session-less xform + * pointer. + */ +union rte_cryptodev_session_ctx { + struct rte_cryptodev_sym_session *crypto_sess; + struct rte_crypto_sym_xform *xform; + struct rte_security_session *sec_sess; +}; + +/** + * Enqueue a vectorized operation descriptor into the device queue but the + * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() + * is called. + * + * @param qp Driver specific queue pair data. + * @param drv_ctx Driver specific context data. + * @param vec Vectorized operation descriptor. + * @param ofs Start and stop offsets for auth and cipher + * operations. + * @param user_data The array of user data for dequeue later. + * @param enqueue_status Driver written value to specify the + * enqueue status. Possible values: + * - 1: The number of operations returned are + * enqueued successfully. + * - 0: The number of operations returned are + * cached into the queue but are not processed + * until rte_cryptodev_raw_enqueue_done() is + * called. + * - negative integer: Error occurred. + * @return + * - The number of operations in the descriptor successfully enqueued or + * cached into the queue but not enqueued yet, depends on the + * "enqueue_status" value. + */ +typedef uint32_t (*cryptodev_sym_raw_enqueue_burst_t)( + void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, + union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status); + +/** + * Enqueue single raw data vector into the device queue but the driver may or + * may not start processing until rte_cryptodev_raw_enqueue_done() is called. + * + * @param qp Driver specific queue pair data. + * @param drv_ctx Driver specific context data. + * @param data_vec The buffer data vector. + * @param n_data_vecs Number of buffer data vectors. + * @param ofs Start and stop offsets for auth and cipher + * operations. + * @param iv IV virtual and IOVA addresses + * @param digest digest virtual and IOVA addresses + * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, + * depends on the algorithm used. + * @param user_data The user data. + * @return + * - 1: The data vector is enqueued successfully. + * - 0: The data vector is cached into the queue but is not processed + * until rte_cryptodev_raw_enqueue_done() is called. + * - negative integer: failure. + */ +typedef int (*cryptodev_sym_raw_enqueue_t)( + void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, + uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad_or_auth_iv, + void *user_data); + +/** + * Inform the cryptodev queue pair to start processing or finish dequeuing all + * enqueued/dequeued operations. + * + * @param qp Driver specific queue pair data. + * @param drv_ctx Driver specific context data. + * @param n The total number of processed operations. + * @return + * - On success return 0. + * - On failure return negative integer. + */ +typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, + uint32_t n); + +/** + * Typedef that the user provided for the driver to get the dequeue count. + * The function may return a fixed number or the number parsed from the user + * data stored in the first processed operation. + * + * @param user_data Dequeued user data. + * @return + * - The number of operations to be dequeued. + **/ +typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data); + +/** + * Typedef that the user provided to deal with post dequeue operation, such + * as filling status. + * + * @param user_data Dequeued user data. + * @param index Index number of the processed descriptor. + * @param is_op_success Operation status provided by the driver. + **/ +typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data, + uint32_t index, uint8_t is_op_success); + +/** + * Dequeue a burst of symmetric crypto processing. + * + * @param qp Driver specific queue pair data. + * @param drv_ctx Driver specific context data. + * @param get_dequeue_count User provided callback function to + * obtain dequeue operation count. + * @param post_dequeue User provided callback function to + * post-process a dequeued operation. + * @param out_user_data User data pointer array to be retrieve + * from device queue. In case of + * *is_user_data_array* is set there + * should be enough room to store all + * user data. + * @param is_user_data_array Set 1 if every dequeued user data will + * be written into out_user_data array. + * Set 0 if only the first user data will + * be written into out_user_data array. + * @param n_success Driver written value to specific the + * total successful operations count. + * @param dequeue_status Driver written value to specify the + * dequeue status. Possible values: + * - 1: Successfully dequeued the number + * of operations returned. The user + * data previously set during enqueue + * is stored in the "out_user_data". + * - 0: The number of operations returned + * are completed and the user data is + * stored in the "out_user_data", but + * they are not freed from the queue + * until + * rte_cryptodev_raw_dequeue_done() + * is called. + * - negative integer: Error occurred. + * @return + * - The number of operations dequeued or completed but not freed from the + * queue, depends on "dequeue_status" value. + */ +typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp, + uint8_t *drv_ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success, int *dequeue_status); + +/** + * Dequeue a symmetric crypto processing. + * + * @param qp Driver specific queue pair data. + * @param drv_ctx Driver specific context data. + * @param dequeue_status Driver written value to specify the + * dequeue status. Possible values: + * - 1: Successfully dequeued a operation. + * The user data is returned. + * - 0: The first operation in the queue + * is completed and the user data + * previously set during enqueue is + * returned, but it is not freed from + * the queue until + * rte_cryptodev_raw_dequeue_done() is + * called. + * - negative integer: Error occurred. + * @param op_status Driver written value to specify + * operation status. + * @return + * - The user data pointer retrieved from device queue or NULL if no + * operation is ready for dequeue. + */ +typedef void * (*cryptodev_sym_raw_dequeue_t)( + void *qp, uint8_t *drv_ctx, int *dequeue_status, + enum rte_crypto_op_status *op_status); + +/** + * Context data for raw data-path API crypto process. The buffer of this + * structure is to be allocated by the user application with the size equal + * or bigger than rte_cryptodev_get_raw_dp_ctx_size() returned value. + */ +struct rte_crypto_raw_dp_ctx { + void *qp_data; + + cryptodev_sym_raw_enqueue_t enqueue; + cryptodev_sym_raw_enqueue_burst_t enqueue_burst; + cryptodev_sym_raw_operation_done_t enqueue_done; + cryptodev_sym_raw_dequeue_t dequeue; + cryptodev_sym_raw_dequeue_burst_t dequeue_burst; + cryptodev_sym_raw_operation_done_t dequeue_done; + + /* Driver specific context data */ + __extension__ uint8_t drv_ctx_data[]; +}; + +/** + * Configure raw data-path context data. + * + * NOTE: + * After the context data is configured, the user should call + * rte_cryptodev_raw_attach_session() before using it in + * rte_cryptodev_raw_enqueue/dequeue function call. + * + * @param dev_id The device identifier. + * @param qp_id The index of the queue pair from which to + * retrieve processed packets. The value must be + * in the range [0, nb_queue_pair - 1] previously + * supplied to rte_cryptodev_configure(). + * @param ctx The raw data-path context data. + * @param sess_type session type. + * @param session_ctx Session context data. + * @param is_update Set 0 if it is to initialize the ctx. + * Set 1 if ctx is initialized and only to update + * session context data. + * @return + * - On success return 0. + * - On failure return negative integer. + */ +__rte_experimental +int +rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, + uint8_t is_update); + +/** + * Enqueue a vectorized operation descriptor into the device queue but the + * driver may or may not start processing until rte_cryptodev_raw_enqueue_done() + * is called. + * + * @param ctx The initialized raw data-path context data. + * @param vec Vectorized operation descriptor. + * @param ofs Start and stop offsets for auth and cipher + * operations. + * @param user_data The array of user data for dequeue later. + * @param enqueue_status Driver written value to specify the + * enqueue status. Possible values: + * - 1: The number of operations returned are + * enqueued successfully. + * - 0: The number of operations returned are + * cached into the queue but are not processed + * until rte_cryptodev_raw_enqueue_done() is + * called. + * - negative integer: Error occurred. + * @return + * - The number of operations in the descriptor successfully enqueued or + * cached into the queue but not enqueued yet, depends on the + * "enqueue_status" value. + */ +__rte_experimental +uint32_t +rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void **user_data, int *enqueue_status); + +/** + * Enqueue single raw data vector into the device queue but the driver may or + * may not start processing until rte_cryptodev_raw_enqueue_done() is called. + * + * @param ctx The initialized raw data-path context data. + * @param data_vec The buffer data vector. + * @param n_data_vecs Number of buffer data vectors. + * @param ofs Start and stop offsets for auth and cipher + * operations. + * @param iv IV virtual and IOVA addresses + * @param digest digest virtual and IOVA addresses + * @param aad_or_auth_iv AAD or auth IV virtual and IOVA addresses, + * depends on the algorithm used. + * @param user_data The user data. + * @return + * - 1: The data vector is enqueued successfully. + * - 0: The data vector is cached into the queue but is not processed + * until rte_cryptodev_raw_enqueue_done() is called. + * - negative integer: failure. + */ +__rte_experimental +static __rte_always_inline int +rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, + struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad_or_auth_iv, + void *user_data) +{ + return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec, + n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data); +} + +/** + * Start processing all enqueued operations from last + * rte_cryptodev_configure_raw_dp_ctx() call. + * + * @param ctx The initialized raw data-path context data. + * @param n The number of operations cached. + * @return + * - On success return 0. + * - On failure return negative integer. + */ +__rte_experimental +int +rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, + uint32_t n); + +/** + * Dequeue a burst of symmetric crypto processing. + * + * @param ctx The initialized raw data-path context + * data. + * @param get_dequeue_count User provided callback function to + * obtain dequeue operation count. + * @param post_dequeue User provided callback function to + * post-process a dequeued operation. + * @param out_user_data User data pointer array to be retrieve + * from device queue. In case of + * *is_user_data_array* is set there + * should be enough room to store all + * user data. + * @param is_user_data_array Set 1 if every dequeued user data will + * be written into out_user_data array. + * Set 0 if only the first user data will + * be written into out_user_data array. + * @param n_success Driver written value to specific the + * total successful operations count. + * @param dequeue_status Driver written value to specify the + * dequeue status. Possible values: + * - 1: Successfully dequeued the number + * of operations returned. The user + * data previously set during enqueue + * is stored in the "out_user_data". + * - 0: The number of operations returned + * are completed and the user data is + * stored in the "out_user_data", but + * they are not freed from the queue + * until + * rte_cryptodev_raw_dequeue_done() + * is called. + * - negative integer: Error occurred. + * @return + * - The number of operations dequeued or completed but not freed from the + * queue, depends on "dequeue_status" value. + */ +__rte_experimental +uint32_t +rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success, int *dequeue_status); + +/** + * Dequeue a symmetric crypto processing. + * + * @param ctx The initialized raw data-path context + * data. + * @param dequeue_status Driver written value to specify the + * dequeue status. Possible values: + * - 1: Successfully dequeued a operation. + * The user data is returned. + * - 0: The first operation in the queue + * is completed and the user data + * previously set during enqueue is + * returned, but it is not freed from + * the queue until + * rte_cryptodev_raw_dequeue_done() is + * called. + * - negative integer: Error occurred. + * @param op_status Driver written value to specify + * operation status. + * @return + * - The user data pointer retrieved from device queue or NULL if no + * operation is ready for dequeue. + */ +__rte_experimental +static __rte_always_inline void * +rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, + int *dequeue_status, enum rte_crypto_op_status *op_status) +{ + return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, + op_status); +} + +/** + * Inform the queue pair dequeue operations is finished. + * + * @param ctx The initialized raw data-path context data. + * @param n The number of operations. + * @return + * - On success return 0. + * - On failure return negative integer. + */ +__rte_experimental +int +rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, + uint32_t n); + #ifdef __cplusplus } #endif diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h index 1367222f7..9a8a7e632 100644 --- a/lib/librte_cryptodev/rte_cryptodev_pmd.h +++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h @@ -317,6 +317,42 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t) (struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec); +/** + * Typedef that the driver provided to get service context private date size. + * + * @param dev Crypto device pointer. + * + * @return + * - On success return the size of the device's service context private data. + * - On failure return negative integer. + */ +typedef int (*cryptodev_sym_get_raw_dp_ctx_size_t)(struct rte_cryptodev *dev); + +/** + * Typedef that the driver provided to configure raw data-path context. + * + * @param dev Crypto device pointer. + * @param qp_id Crypto device queue pair index. + * @param service_type Type of the service requested. + * @param ctx The raw data-path context data. + * @param sess_type session type. + * @param session_ctx Session context data. If NULL the driver + * shall only configure the drv_ctx_data in + * ctx buffer. Otherwise the driver shall only + * parse the session_ctx to set appropriate + * function pointers in ctx. + * @param is_update Set 0 if it is to initialize the ctx. + * Set 1 if ctx is initialized and only to update + * session context data. + * @return + * - On success return 0. + * - On failure return negative integer. + */ +typedef int (*cryptodev_sym_configure_raw_dp_ctx_t)( + struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update); /** Crypto device operations function pointer table */ struct rte_cryptodev_ops { @@ -349,8 +385,19 @@ struct rte_cryptodev_ops { /**< Clear a Crypto sessions private data. */ cryptodev_asym_free_session_t asym_session_clear; /**< Clear a Crypto sessions private data. */ - cryptodev_sym_cpu_crypto_process_t sym_cpu_process; - /**< process input data synchronously (cpu-crypto). */ + union { + cryptodev_sym_cpu_crypto_process_t sym_cpu_process; + /**< process input data synchronously (cpu-crypto). */ + __extension__ + struct { + cryptodev_sym_get_raw_dp_ctx_size_t + sym_get_raw_dp_ctx_size; + /**< Get raw data path service context data size. */ + cryptodev_sym_configure_raw_dp_ctx_t + sym_configure_raw_dp_ctx; + /**< Initialize raw data path context data. */ + }; + }; }; diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map index 7727286ac..7e4360ff0 100644 --- a/lib/librte_cryptodev/rte_cryptodev_version.map +++ b/lib/librte_cryptodev/rte_cryptodev_version.map @@ -99,4 +99,14 @@ EXPERIMENTAL { # added in 20.08 rte_cryptodev_get_qp_status; + + # added in 20.11 + rte_cryptodev_configure_raw_dp_ctx; + rte_cryptodev_get_raw_dp_ctx_size; + rte_cryptodev_raw_dequeue; + rte_cryptodev_raw_dequeue_burst; + rte_cryptodev_raw_dequeue_done; + rte_cryptodev_raw_enqueue; + rte_cryptodev_raw_enqueue_burst; + rte_cryptodev_raw_enqueue_done; }; From patchwork Sun Oct 11 00:38:53 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 80302 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 80A8EA04B6; Sun, 11 Oct 2020 02:40:01 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 605091D52B; Sun, 11 Oct 2020 02:39:12 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 470A71D506 for ; Sun, 11 Oct 2020 02:39:05 +0200 (CEST) IronPort-SDR: Ud2mvxRbe38lWiD3+9y6m3AVFPIc9aHXMo7zTbhJs7p4BtseZUXZ0nvtlA1deS2J2X5CTpKafn 1GaavY2FqvnQ== X-IronPort-AV: E=McAfee;i="6000,8403,9770"; a="144930905" X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="144930905" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2020 17:39:03 -0700 IronPort-SDR: hW5UTkRzE5UELBICxe5aiMbnPTwP4l3z/NMYtQnIX07+6p6X5kukPFAJjVkmgF/Yqm606CnBvz a1aST+8WxFiQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="462648553" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.136]) by orsmga004.jf.intel.com with ESMTP; 10 Oct 2020 17:39:02 -0700 From: Fan Zhang To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Fan Zhang , Adam Dybkowski Date: Sun, 11 Oct 2020 01:38:53 +0100 Message-Id: <20201011003854.54947-4-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20201011003854.54947-1-roy.fan.zhang@intel.com> References: <20201011003252.54558-1-roy.fan.zhang@intel.com> <20201011003854.54947-1-roy.fan.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [dpdk-dev v13 3/4] crypto/qat: add raw crypto data-path API support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch updates QAT PMD to add raw data-path API support. Signed-off-by: Fan Zhang Acked-by: Adam Dybkowski --- doc/guides/cryptodevs/features/qat.ini | 1 + doc/guides/rel_notes/release_20_11.rst | 4 + drivers/crypto/qat/meson.build | 1 + drivers/crypto/qat/qat_sym.h | 11 + drivers/crypto/qat/qat_sym_hw_dp.c | 959 +++++++++++++++++++++++++ drivers/crypto/qat/qat_sym_pmd.c | 9 +- 6 files changed, 983 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini index 9e82f2886..6cc09cde7 100644 --- a/doc/guides/cryptodevs/features/qat.ini +++ b/doc/guides/cryptodevs/features/qat.ini @@ -17,6 +17,7 @@ Digest encrypted = Y Asymmetric sessionless = Y RSA PRIV OP KEY EXP = Y RSA PRIV OP KEY QT = Y +Sym raw data path API = Y ; ; Supported crypto algorithms of the 'qat' crypto driver. diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index 85a07d86e..008f4eedc 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -104,6 +104,10 @@ New Features * Added support for non-HMAC auth algorithms (MD5, SHA1, SHA224, SHA256, SHA384, SHA512). +* **Update QAT crypto PMD.** + + * Added Raw Data-path APIs support. + * **Added Intel ACC100 bbdev PMD.** Added a new ``acc100`` bbdev driver for the Intel\ |reg| ACC100 accelerator diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build index a225f374a..bc90ec44c 100644 --- a/drivers/crypto/qat/meson.build +++ b/drivers/crypto/qat/meson.build @@ -15,6 +15,7 @@ if dep.found() qat_sources += files('qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c', + 'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c') qat_ext_deps += dep diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index 1a9748849..7254f5e3c 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -264,6 +264,16 @@ qat_sym_process_response(void **op, uint8_t *resp) } *op = (void *)rx_op; } + +int +qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update); + +int +qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev); + #else static inline void @@ -276,5 +286,6 @@ static inline void qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused) { } + #endif #endif /* _QAT_SYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c new file mode 100644 index 000000000..dfbbad59b --- /dev/null +++ b/drivers/crypto/qat/qat_sym_hw_dp.c @@ -0,0 +1,959 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include + +#include "adf_transport_access_macros.h" +#include "icp_qat_fw.h" +#include "icp_qat_fw_la.h" + +#include "qat_sym.h" +#include "qat_sym_pmd.h" +#include "qat_sym_session.h" +#include "qat_qp.h" + +struct qat_sym_dp_ctx { + struct qat_sym_session *session; + uint32_t tail; + uint32_t head; + uint16_t cached_enqueue; + uint16_t cached_dequeue; +}; + +static __rte_always_inline int32_t +qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_vec *data, uint16_t n_data_vecs) +{ + struct qat_queue *tx_queue; + struct qat_sym_op_cookie *cookie; + struct qat_sgl *list; + uint32_t i; + uint32_t total_len; + + if (likely(n_data_vecs == 1)) { + req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = + data[0].iova; + req->comn_mid.src_length = req->comn_mid.dst_length = + data[0].len; + return data[0].len; + } + + if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER) + return -1; + + total_len = 0; + tx_queue = &qp->tx_q; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz]; + list = (struct qat_sgl *)&cookie->qat_sgl_src; + + for (i = 0; i < n_data_vecs; i++) { + list->buffers[i].len = data[i].len; + list->buffers[i].resrvd = 0; + list->buffers[i].addr = data[i].iova; + if (total_len + data[i].len > UINT32_MAX) { + QAT_DP_LOG(ERR, "Message too long"); + return -1; + } + total_len += data[i].len; + } + + list->num_bufs = i; + req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = + cookie->qat_sgl_src_phys_addr; + req->comn_mid.src_length = req->comn_mid.dst_length = 0; + return total_len; +} + +static __rte_always_inline void +set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len, + struct icp_qat_fw_la_bulk_req *qat_req) +{ + /* copy IV into request if it fits */ + if (iv_len <= sizeof(cipher_param->u.cipher_IV_array)) + rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va, + iv_len); + else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova; + } +} + +#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \ + (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \ + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status)) + +static __rte_always_inline void +qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n) +{ + uint32_t i; + + for (i = 0; i < n; i++) + sta[i] = status; +} + +#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \ + RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n) + +static __rte_always_inline void +enqueue_one_aead_job(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + struct icp_qat_fw_la_auth_req_params *auth_param = + (void *)((uint8_t *)&req->serv_specif_rqpars + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + uint8_t *aad_data; + uint8_t aad_ccm_real_len; + uint8_t aad_len_field_sz; + uint32_t msg_len_be; + rte_iova_t aad_iova = 0; + uint8_t q; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + rte_memcpy(cipher_param->u.cipher_IV_array, iv->va, + ctx->cipher_iv.length); + aad_iova = aad->iova; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + aad_data = aad->va; + aad_iova = aad->iova; + aad_ccm_real_len = 0; + aad_len_field_sz = 0; + msg_len_be = rte_bswap32((uint32_t)data_len - + ofs.ofs.cipher.head); + + if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { + aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; + aad_ccm_real_len = ctx->aad_len - + ICP_QAT_HW_CCM_AAD_B0_LEN - + ICP_QAT_HW_CCM_AAD_LEN_INFO; + } else { + aad_data = iv->va; + aad_iova = iv->iova; + } + + q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length; + aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( + aad_len_field_sz, ctx->digest_length, q); + if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET + (q - + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), + (uint8_t *)&msg_len_be, + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); + } else { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)&msg_len_be + + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE + - q), q); + } + + if (aad_len_field_sz > 0) { + *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] = + rte_bswap16(aad_ccm_real_len); + + if ((aad_ccm_real_len + aad_len_field_sz) + % ICP_QAT_HW_CCM_AAD_B0_LEN) { + uint8_t pad_len = 0; + uint8_t pad_idx = 0; + + pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - + ((aad_ccm_real_len + + aad_len_field_sz) % + ICP_QAT_HW_CCM_AAD_B0_LEN); + pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + + aad_ccm_real_len + + aad_len_field_sz; + memset(&aad_data[pad_idx], 0, pad_len); + } + } + + rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)iv->va + + ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length); + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; + + rte_memcpy((uint8_t *)aad->va + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, + ctx->cipher_iv.length); + break; + default: + break; + } + + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + auth_param->auth_off = ofs.ofs.cipher.head; + auth_param->auth_len = cipher_param->cipher_length; + auth_param->auth_res_addr = digest->iova; + auth_param->u1.aad_adr = aad_iova; + + if (ctx->is_single_pass) { + cipher_param->spc_aad_addr = aad_iova; + cipher_param->spc_auth_res_addr = digest->iova; + } +} + +static __rte_always_inline int +qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); + if (unlikely(data_len < 0)) + return -1; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + + enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +static __rte_always_inline uint32_t +qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec, + vec->sgl[i].num); + if (unlikely(data_len < 0)) + break; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; + enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i], + &vec->aad[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static __rte_always_inline void +enqueue_one_cipher_job(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + + cipher_param = (void *)&req->serv_specif_rqpars; + + /* cipher IV */ + set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; +} + +static __rte_always_inline int +qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest __rte_unused, + struct rte_crypto_va_iova_ptr *aad __rte_unused, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); + if (unlikely(data_len < 0)) + return -1; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + + enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +static __rte_always_inline uint32_t +qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec, + vec->sgl[i].num); + if (unlikely(data_len < 0)) + break; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; + enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs, + (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static __rte_always_inline void +enqueue_one_auth_job(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + + cipher_param = (void *)&req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + + auth_param->auth_off = ofs.ofs.auth.head; + auth_param->auth_len = data_len - ofs.ofs.auth.head - + ofs.ofs.auth.tail; + auth_param->auth_res_addr = digest->iova; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + auth_param->u1.aad_adr = auth_iv->iova; + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va, + ctx->auth_iv.length); + break; + default: + break; + } +} + +static __rte_always_inline int +qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *iv __rte_unused, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); + if (unlikely(data_len < 0)) + return -1; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + + enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +static __rte_always_inline uint32_t +qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec, + vec->sgl[i].num); + if (unlikely(data_len < 0)) + break; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; + enqueue_one_auth_job(ctx, req, &vec->digest[i], + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static __rte_always_inline int +enqueue_one_chain_job(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_vec *data, + uint16_t n_data_vecs, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + rte_iova_t auth_iova_end; + int32_t cipher_len, auth_len; + + cipher_param = (void *)&req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); + + cipher_len = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail; + + if (unlikely(cipher_len < 0 || auth_len < 0)) + return -1; + + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = cipher_len; + set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req); + + auth_param->auth_off = ofs.ofs.auth.head; + auth_param->auth_len = auth_len; + auth_param->auth_res_addr = digest->iova; + + switch (ctx->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + auth_param->u1.aad_adr = auth_iv->iova; + + if (unlikely(n_data_vecs > 1)) { + int auth_end_get = 0, i = n_data_vecs - 1; + struct rte_crypto_vec *cvec = &data[0]; + uint32_t len; + + len = data_len - ofs.ofs.auth.tail; + + while (i >= 0 && len > 0) { + if (cvec->len >= len) { + auth_iova_end = cvec->iova + + (cvec->len - len); + len = 0; + auth_end_get = 1; + break; + } + len -= cvec->len; + i--; + cvec++; + } + + if (unlikely(auth_end_get == 0)) + return -1; + } else + auth_iova_end = data[0].iova + auth_param->auth_off + + auth_param->auth_len; + + /* Then check if digest-encrypted conditions are met */ + if ((auth_param->auth_off + auth_param->auth_len < + cipher_param->cipher_offset + + cipher_param->cipher_length) && + (digest->iova == auth_iova_end)) { + /* Handle partial digest encryption */ + if (cipher_param->cipher_offset + + cipher_param->cipher_length < + auth_param->auth_off + + auth_param->auth_len + + ctx->digest_length) + req->comn_mid.dst_length = + req->comn_mid.src_length = + auth_param->auth_off + + auth_param->auth_len + + ctx->digest_length; + struct icp_qat_fw_comn_req_hdr *header = + &req->comn_hdr; + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_DIGEST_IN_BUFFER); + } + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + break; + default: + break; + } + + return 0; +} + +static __rte_always_inline int +qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_vec *data, uint16_t n_data_vecs, + union rte_crypto_sym_ofs ofs, + struct rte_crypto_va_iova_ptr *cipher_iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + void *user_data) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); + if (unlikely(data_len < 0)) + return -1; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + + if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs, + cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len))) + return -1; + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue++; + + return 0; +} + +static __rte_always_inline uint32_t +qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx, + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, + void *user_data[], int *status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num); + *status = 0; + return 0; + } + + tail = dp_ctx->tail; + + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec, + vec->sgl[i].num); + if (unlikely(data_len < 0)) + break; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; + if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec, + vec->sgl[i].num, &vec->iv[i], &vec->digest[i], + &vec->auth_iv[i], ofs, (uint32_t)data_len))) + break; + + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } + + if (unlikely(i < n)) + qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i); + + dp_ctx->tail = tail; + dp_ctx->cached_enqueue += i; + *status = 0; + return i; +} + +static __rte_always_inline uint32_t +qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx, + rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, + rte_cryptodev_raw_post_dequeue_t post_dequeue, + void **out_user_data, uint8_t is_user_data_array, + uint32_t *n_success_jobs, int *return_status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *rx_queue = &qp->rx_q; + struct icp_qat_fw_comn_resp *resp; + void *resp_opaque; + uint32_t i, n, inflight; + uint32_t head; + uint8_t status; + + *n_success_jobs = 0; + *return_status = 0; + head = dp_ctx->head; + + inflight = qp->enqueued - qp->dequeued; + if (unlikely(inflight == 0)) + return 0; + + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + + head); + /* no operation ready */ + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + return 0; + + resp_opaque = (void *)(uintptr_t)resp->opaque_data; + /* get the dequeue count */ + n = get_dequeue_count(resp_opaque); + if (unlikely(n == 0)) + return 0; + + out_user_data[0] = resp_opaque; + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + post_dequeue(resp_opaque, 0, status); + *n_success_jobs += status; + + head = (head + rx_queue->msg_size) & rx_queue->modulo_mask; + + /* we already finished dequeue when n == 1 */ + if (unlikely(n == 1)) { + i = 1; + goto end_deq; + } + + if (is_user_data_array) { + for (i = 1; i < n; i++) { + resp = (struct icp_qat_fw_comn_resp *)( + (uint8_t *)rx_queue->base_addr + head); + if (unlikely(*(uint32_t *)resp == + ADF_RING_EMPTY_SIG)) + goto end_deq; + out_user_data[i] = (void *)(uintptr_t)resp->opaque_data; + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + *n_success_jobs += status; + post_dequeue(out_user_data[i], i, status); + head = (head + rx_queue->msg_size) & + rx_queue->modulo_mask; + } + + goto end_deq; + } + + /* opaque is not array */ + for (i = 1; i < n; i++) { + resp = (struct icp_qat_fw_comn_resp *)( + (uint8_t *)rx_queue->base_addr + head); + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp); + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + goto end_deq; + head = (head + rx_queue->msg_size) & + rx_queue->modulo_mask; + post_dequeue(resp_opaque, i, status); + *n_success_jobs += status; + } + +end_deq: + dp_ctx->head = head; + dp_ctx->cached_dequeue += i; + return i; +} + +static __rte_always_inline void * +qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status, + enum rte_crypto_op_status *op_status) +{ + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *rx_queue = &qp->rx_q; + register struct icp_qat_fw_comn_resp *resp; + + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr + + dp_ctx->head); + + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) + return NULL; + + dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) & + rx_queue->modulo_mask; + dp_ctx->cached_dequeue++; + + *op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ? + RTE_CRYPTO_OP_STATUS_SUCCESS : + RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + *dequeue_status = 0; + return (void *)(uintptr_t)resp->opaque_data; +} + +static __rte_always_inline int +qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n) +{ + struct qat_qp *qp = qp_data; + struct qat_queue *tx_queue = &qp->tx_q; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + + if (unlikely(dp_ctx->cached_enqueue != n)) + return -1; + + qp->enqueued += n; + qp->stats.enqueued_count += n; + + tx_queue->tail = dp_ctx->tail; + + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, + tx_queue->hw_bundle_number, + tx_queue->hw_queue_number, tx_queue->tail); + tx_queue->csr_tail = tx_queue->tail; + dp_ctx->cached_enqueue = 0; + + return 0; +} + +static __rte_always_inline int +qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n) +{ + struct qat_qp *qp = qp_data; + struct qat_queue *rx_queue = &qp->rx_q; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + + if (unlikely(dp_ctx->cached_dequeue != n)) + return -1; + + rx_queue->head = dp_ctx->head; + rx_queue->nb_processed_responses += n; + qp->dequeued += n; + qp->stats.dequeued_count += n; + if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = rx_queue->csr_head; + new_head = rx_queue->head; + max_head = qp->nb_descriptors * rx_queue->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, + max_head - old_head); + memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, + new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - + old_head); + } + rx_queue->nb_processed_responses = 0; + rx_queue->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, + rx_queue->hw_bundle_number, rx_queue->hw_queue_number, + new_head); + } + + dp_ctx->cached_dequeue = 0; + return 0; +} + +int +qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, + struct rte_crypto_raw_dp_ctx *raw_dp_ctx, + enum rte_crypto_op_sess_type sess_type, + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update) +{ + struct qat_qp *qp; + struct qat_sym_session *ctx; + struct qat_sym_dp_ctx *dp_ctx; + + qp = dev->data->queue_pairs[qp_id]; + dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data; + + if (!is_update) { + memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) + + sizeof(struct qat_sym_dp_ctx)); + raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id]; + dp_ctx->tail = qp->tx_q.tail; + dp_ctx->head = qp->rx_q.head; + dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0; + } + + if (sess_type != RTE_CRYPTO_OP_WITH_SESSION) + return -EINVAL; + + ctx = (struct qat_sym_session *)get_sym_session_private_data( + session_ctx.crypto_sess, qat_sym_driver_id); + + dp_ctx->session = ctx; + + raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail; + raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst; + raw_dp_ctx->dequeue = qat_sym_dp_dequeue; + raw_dp_ctx->dequeue_done = qat_sym_dp_update_head; + + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_aead_jobs; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead; + } else { + raw_dp_ctx->enqueue_burst = + qat_sym_dp_enqueue_chain_jobs; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs; + raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher; + } else + return -1; + + return 0; +} + +int +qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev) +{ + return sizeof(struct qat_sym_dp_ctx); +} diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c index 314742f53..6b189c319 100644 --- a/drivers/crypto/qat/qat_sym_pmd.c +++ b/drivers/crypto/qat/qat_sym_pmd.c @@ -258,7 +258,11 @@ static struct rte_cryptodev_ops crypto_qat_ops = { /* Crypto related operations */ .sym_session_get_size = qat_sym_session_get_private_size, .sym_session_configure = qat_sym_session_configure, - .sym_session_clear = qat_sym_session_clear + .sym_session_clear = qat_sym_session_clear, + + /* Raw data-path API related operations */ + .sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size, + .sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx, }; #ifdef RTE_LIBRTE_SECURITY @@ -376,7 +380,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | - RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED; + RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED | + RTE_CRYPTODEV_FF_SYM_RAW_DP; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; From patchwork Sun Oct 11 00:38:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fan Zhang X-Patchwork-Id: 80303 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id ED092A04B6; Sun, 11 Oct 2020 02:40:24 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2B6601D542; Sun, 11 Oct 2020 02:39:15 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id C948D1D518 for ; Sun, 11 Oct 2020 02:39:08 +0200 (CEST) IronPort-SDR: j9aUfBG09JzVfv3RgcYIAZGh8mUJeptLtmzTCbKMT0GBbFYPZS/L0U7eWDLuJRIujlbMXFIZXW ThZDHC5vymwg== X-IronPort-AV: E=McAfee;i="6000,8403,9770"; a="144930908" X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="144930908" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 10 Oct 2020 17:39:05 -0700 IronPort-SDR: inttl54in8hCSqqIcNcBNO4DrICqt6GmwdEStvmSdiXLbQmGMpGgVa57hPrFzJ0JZloShg+gyY ATeR7Iku128Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,360,1596524400"; d="scan'208";a="462648561" Received: from silpixa00398673.ir.intel.com (HELO silpixa00398673.ger.corp.intel.com) ([10.237.223.136]) by orsmga004.jf.intel.com with ESMTP; 10 Oct 2020 17:39:04 -0700 From: Fan Zhang To: dev@dpdk.org Cc: akhil.goyal@nxp.com, Fan Zhang , Adam Dybkowski Date: Sun, 11 Oct 2020 01:38:54 +0100 Message-Id: <20201011003854.54947-5-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20201011003854.54947-1-roy.fan.zhang@intel.com> References: <20201011003252.54558-1-roy.fan.zhang@intel.com> <20201011003854.54947-1-roy.fan.zhang@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [dpdk-dev v13 4/4] test/crypto: add unit-test for cryptodev raw API test X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds the cryptodev raw API test support to unit test. In addition a new test-case for QAT PMD for the test type is enabled. Signed-off-by: Fan Zhang Acked-by: Adam Dybkowski --- app/test/test_cryptodev.c | 787 ++++++++++++++++++++++++-- app/test/test_cryptodev.h | 12 + app/test/test_cryptodev_blockcipher.c | 58 +- 3 files changed, 803 insertions(+), 54 deletions(-) diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index 62a265520..219373e10 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -49,6 +49,10 @@ #define VDEV_ARGS_SIZE 100 #define MAX_NB_SESSIONS 4 +#define MAX_DRV_SERVICE_CTX_SIZE 256 + +#define MAX_RAW_DEQUEUE_COUNT 65535 + #define IN_PLACE 0 #define OUT_OF_PLACE 1 @@ -57,6 +61,8 @@ static int gbl_driver_id; static enum rte_security_session_action_type gbl_action_type = RTE_SECURITY_ACTION_TYPE_NONE; +enum cryptodev_api_test_type global_api_test_type = CRYPTODEV_API_TEST; + struct crypto_testsuite_params { struct rte_mempool *mbuf_pool; struct rte_mempool *large_mbuf_pool; @@ -147,6 +153,215 @@ ceil_byte_length(uint32_t num_bits) return (num_bits >> 3); } +static uint32_t +get_raw_dp_dequeue_count(void *user_data __rte_unused) +{ + return 1; +} + +static void +post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused, + uint8_t is_op_success) +{ + struct rte_crypto_op *op = user_data; + op->status = is_op_success ? RTE_CRYPTO_OP_STATUS_SUCCESS : + RTE_CRYPTO_OP_STATUS_ERROR; +} + +void +process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth, + uint8_t len_in_bits, uint8_t cipher_iv_len) +{ + struct rte_crypto_sym_op *sop = op->sym; + struct rte_crypto_op *ret_op = NULL; + struct rte_crypto_vec data_vec[UINT8_MAX]; + struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv; + union rte_crypto_sym_ofs ofs; + struct rte_crypto_sym_vec vec; + struct rte_crypto_sgl sgl; + uint32_t max_len; + union rte_cryptodev_session_ctx sess; + uint32_t count = 0; + struct rte_crypto_raw_dp_ctx *ctx; + uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0, + auth_len = 0; + int32_t n; + uint32_t n_success; + int ctx_service_size; + int32_t status = 0; + int enqueue_status, dequeue_status; + + ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id); + if (ctx_service_size < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return; + } + + ctx = malloc(ctx_service_size); + if (!ctx) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return; + } + + /* Both are enums, setting crypto_sess will suit any session type */ + sess.crypto_sess = op->sym->session; + + if (rte_cryptodev_configure_raw_dp_ctx(dev_id, qp_id, ctx, + op->sess_type, sess, 0) < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + + cipher_iv.iova = 0; + cipher_iv.va = NULL; + aad_auth_iv.iova = 0; + aad_auth_iv.va = NULL; + digest.iova = 0; + digest.va = NULL; + sgl.vec = data_vec; + vec.num = 1; + vec.sgl = &sgl; + vec.iv = &cipher_iv; + vec.digest = &digest; + vec.aad = &aad_auth_iv; + vec.status = &status; + + ofs.raw = 0; + + if (is_cipher && is_auth) { + cipher_offset = sop->cipher.data.offset; + cipher_len = sop->cipher.data.length; + auth_offset = sop->auth.data.offset; + auth_len = sop->auth.data.length; + max_len = RTE_MAX(cipher_offset + cipher_len, + auth_offset + auth_len); + if (len_in_bits) { + max_len = max_len >> 3; + cipher_offset = cipher_offset >> 3; + auth_offset = auth_offset >> 3; + cipher_len = cipher_len >> 3; + auth_len = auth_len >> 3; + } + ofs.ofs.cipher.head = cipher_offset; + ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len; + ofs.ofs.auth.head = auth_offset; + ofs.ofs.auth.tail = max_len - auth_offset - auth_len; + cipher_iv.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET); + aad_auth_iv.va = rte_crypto_op_ctod_offset( + op, void *, IV_OFFSET + cipher_iv_len); + aad_auth_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET + + cipher_iv_len); + digest.va = (void *)sop->auth.digest.data; + digest.iova = sop->auth.digest.phys_addr; + + } else if (is_cipher) { + cipher_offset = sop->cipher.data.offset; + cipher_len = sop->cipher.data.length; + max_len = cipher_len + cipher_offset; + if (len_in_bits) { + max_len = max_len >> 3; + cipher_offset = cipher_offset >> 3; + cipher_len = cipher_len >> 3; + } + ofs.ofs.cipher.head = cipher_offset; + ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len; + cipher_iv.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET); + + } else if (is_auth) { + auth_offset = sop->auth.data.offset; + auth_len = sop->auth.data.length; + max_len = auth_len + auth_offset; + if (len_in_bits) { + max_len = max_len >> 3; + auth_offset = auth_offset >> 3; + auth_len = auth_len >> 3; + } + ofs.ofs.auth.head = auth_offset; + ofs.ofs.auth.tail = max_len - auth_offset - auth_len; + aad_auth_iv.va = rte_crypto_op_ctod_offset( + op, void *, IV_OFFSET + cipher_iv_len); + aad_auth_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET + + cipher_iv_len); + digest.va = (void *)sop->auth.digest.data; + digest.iova = sop->auth.digest.phys_addr; + + } else { /* aead */ + cipher_offset = sop->aead.data.offset; + cipher_len = sop->aead.data.length; + max_len = cipher_len + cipher_offset; + if (len_in_bits) { + max_len = max_len >> 3; + cipher_offset = cipher_offset >> 3; + cipher_len = cipher_len >> 3; + } + ofs.ofs.cipher.head = cipher_offset; + ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len; + cipher_iv.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET); + cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET); + aad_auth_iv.va = (void *)sop->aead.aad.data; + aad_auth_iv.iova = sop->aead.aad.phys_addr; + digest.va = (void *)sop->aead.digest.data; + digest.iova = sop->aead.digest.phys_addr; + } + + n = rte_crypto_mbuf_to_vec(sop->m_src, 0, max_len, + data_vec, RTE_DIM(data_vec)); + if (n < 0 || n > sop->m_src->nb_segs) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + + sgl.num = n; + + if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op, + &enqueue_status) < 1) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + + if (enqueue_status == 0) { + status = rte_cryptodev_raw_enqueue_done(ctx, 1); + if (status < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + } else if (enqueue_status < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + + n = n_success = 0; + while (count++ < MAX_RAW_DEQUEUE_COUNT && n == 0) { + n = rte_cryptodev_raw_dequeue_burst(ctx, + get_raw_dp_dequeue_count, post_process_raw_dp_op, + (void **)&ret_op, 0, &n_success, + &dequeue_status); + if (dequeue_status < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + if (n == 0) + rte_pause(); + } + + if (n == 1 && dequeue_status == 0) { + if (rte_cryptodev_raw_dequeue_done(ctx, 1) < 0) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + goto exit; + } + } + + op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op != op || + n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR : + RTE_CRYPTO_OP_STATUS_SUCCESS; + +exit: + free(ctx); +} + static void process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op) { @@ -1661,6 +1876,9 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess, if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -1715,12 +1933,18 @@ test_AES_cipheronly_all(void) static int test_AES_docsis_all(void) { + /* Data-path service does not support DOCSIS yet */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; return test_blockcipher(BLKCIPHER_AES_DOCSIS_TYPE); } static int test_DES_docsis_all(void) { + /* Data-path service does not support DOCSIS yet */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; return test_blockcipher(BLKCIPHER_DES_DOCSIS_TYPE); } @@ -2435,6 +2659,12 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; @@ -2475,7 +2705,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 1, 0); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); ut_params->obuf = ut_params->op->sym->m_src; TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -2513,6 +2747,12 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; @@ -2554,7 +2794,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 1, 0); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_src; @@ -2580,6 +2824,16 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata) unsigned plaintext_pad_len; unsigned plaintext_len; uint8_t *plaintext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -2624,6 +2878,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata) if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 1, 0); else ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -2653,6 +2910,16 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata) unsigned plaintext_pad_len; unsigned plaintext_len; uint8_t *plaintext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -2695,7 +2962,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 1, 0); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_src; @@ -2860,6 +3131,16 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned plaintext_pad_len; unsigned plaintext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -2902,8 +3183,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], - ut_params->op); + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], + ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_dst; @@ -2959,6 +3244,12 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* Create KASUMI session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_ENCRYPT, @@ -2988,7 +3279,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -3031,10 +3326,14 @@ test_kasumi_encryption_oop(const struct kasumi_test_data *tdata) struct rte_cryptodev_sym_capability_idx cap_idx; cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_KASUMI_F8; + /* Data-path service does not support OOP */ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* Create KASUMI session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_ENCRYPT, @@ -3116,6 +3415,9 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -3201,6 +3503,9 @@ test_kasumi_decryption_oop(const struct kasumi_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* Create KASUMI session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_DECRYPT, @@ -3269,6 +3574,16 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata) uint8_t *ciphertext, *plaintext; unsigned ciphertext_pad_len; unsigned ciphertext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -3311,7 +3626,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, 0); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -3344,6 +3663,16 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned plaintext_pad_len; unsigned plaintext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -3386,7 +3715,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -3427,6 +3760,9 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* Create SNOW 3G session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_ENCRYPT, @@ -3510,6 +3846,9 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -3629,6 +3968,9 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* Create SNOW 3G session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_ENCRYPT, @@ -3719,6 +4061,16 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned ciphertext_pad_len; unsigned ciphertext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -3761,7 +4113,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_dst; @@ -3799,6 +4155,9 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* Create SNOW 3G session */ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0], RTE_CRYPTO_CIPHER_OP_DECRYPT, @@ -3886,6 +4245,12 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* Check if device supports ZUC EEA3 */ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3; @@ -3929,7 +4294,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_src; @@ -3969,6 +4338,16 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned plaintext_pad_len; unsigned plaintext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -4024,7 +4403,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = ut_params->op->sym->m_src; @@ -4092,6 +4475,14 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, printf("Device doesn't support digest encrypted.\n"); return -ENOTSUP; } + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + } + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; } /* Create SNOW 3G session */ @@ -4160,7 +4551,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -4270,7 +4665,14 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, "in both input and output mbufs.\n"); return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } } else { + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) { printf("Device doesn't support out-of-place scatter-gather " "in both input and output mbufs.\n"); @@ -4349,7 +4751,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -4457,7 +4863,15 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, uint64_t feat_flags = dev_info.feature_flags; + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + if (op_mode == OUT_OF_PLACE) { + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (!(feat_flags & RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED)) { printf("Device doesn't support digest encrypted.\n"); return -ENOTSUP; @@ -4531,7 +4945,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -4642,7 +5060,14 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, "in both input and output mbufs.\n"); return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } } else { + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) { printf("Device doesn't support out-of-place scatter-gather " "in both input and output mbufs.\n"); @@ -4721,7 +5146,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -4806,6 +5235,16 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned plaintext_pad_len; unsigned plaintext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -4862,7 +5301,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -4905,6 +5348,16 @@ test_zuc_encryption(const struct wireless_test_data *tdata) uint8_t *plaintext, *ciphertext; unsigned plaintext_pad_len; unsigned plaintext_len; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } struct rte_cryptodev_sym_capability_idx cap_idx; @@ -4949,7 +5402,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -5004,6 +5461,12 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + plaintext_len = ceil_byte_length(tdata->plaintext.len); /* Append data which is padded to a multiple */ @@ -5036,7 +5499,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 0, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -5084,6 +5551,12 @@ test_zuc_authentication(const struct wireless_test_data *tdata) return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* Check if device supports ZUC EIA3 */ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3; @@ -5124,7 +5597,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata) if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 1, 0); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); ut_params->obuf = ut_params->op->sym->m_src; TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -5181,7 +5658,15 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, "in both input and output mbufs.\n"); return -ENOTSUP; } + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } } else { + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) { printf("Device doesn't support out-of-place scatter-gather " "in both input and output mbufs.\n"); @@ -5256,7 +5741,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -5363,7 +5852,15 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, "in both input and output mbufs.\n"); return -ENOTSUP; } + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } } else { + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) { printf("Device doesn't support out-of-place scatter-gather " "in both input and output mbufs.\n"); @@ -5442,7 +5939,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, if (retval < 0) return retval; - ut_params->op = process_crypto_request(ts_params->valid_devs[0], + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 1, tdata->cipher_iv.len); + else + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -5585,6 +6086,9 @@ test_kasumi_decryption_test_case_2(void) static int test_kasumi_decryption_test_case_3(void) { + /* rte_crypto_mbuf_to_vec does not support incomplete mbuf build */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; return test_kasumi_decryption(&kasumi_test_case_3); } @@ -5784,6 +6288,9 @@ test_snow3g_auth_cipher_part_digest_enc_oop(void) static int test_snow3g_auth_cipher_test_case_3_sgl(void) { + /* rte_crypto_mbuf_to_vec does not support incomplete mbuf build */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; return test_snow3g_auth_cipher_sgl( &snow3g_auth_cipher_test_case_3, IN_PLACE, 0); } @@ -5798,6 +6305,9 @@ test_snow3g_auth_cipher_test_case_3_oop_sgl(void) static int test_snow3g_auth_cipher_part_digest_enc_sgl(void) { + /* rte_crypto_mbuf_to_vec does not support incomplete mbuf build */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; return test_snow3g_auth_cipher_sgl( &snow3g_auth_cipher_partial_digest_encryption, IN_PLACE, 0); @@ -6151,11 +6661,12 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, unsigned int ciphertext_len; struct rte_cryptodev_info dev_info; - struct rte_crypto_op *op; /* Check if device supports particular algorithms separately */ if (test_mixed_check_if_unsupported(tdata)) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); @@ -6166,6 +6677,9 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, return -ENOTSUP; } + if (op_mode == OUT_OF_PLACE) + return -ENOTSUP; + /* Create the session */ if (verify) retval = create_wireless_algo_cipher_auth_session( @@ -6197,9 +6711,11 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, /* clear mbuf payload */ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0, rte_pktmbuf_tailroom(ut_params->ibuf)); - if (op_mode == OUT_OF_PLACE) + if (op_mode == OUT_OF_PLACE) { + memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0, rte_pktmbuf_tailroom(ut_params->obuf)); + } ciphertext_len = ceil_byte_length(tdata->ciphertext.len_bits); plaintext_len = ceil_byte_length(tdata->plaintext.len_bits); @@ -6240,18 +6756,17 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata, if (retval < 0) return retval; - op = process_crypto_request(ts_params->valid_devs[0], + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); /* Check if the op failed because the device doesn't */ /* support this particular combination of algorithms */ - if (op == NULL && ut_params->op->status == + if (ut_params->op == NULL && ut_params->op->status == RTE_CRYPTO_OP_STATUS_INVALID_SESSION) { printf("Device doesn't support this mixed combination. " "Test Skipped.\n"); return -ENOTSUP; } - ut_params->op = op; TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); @@ -6342,11 +6857,12 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, uint8_t digest_buffer[10000]; struct rte_cryptodev_info dev_info; - struct rte_crypto_op *op; /* Check if device supports particular algorithms */ if (test_mixed_check_if_unsupported(tdata)) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); @@ -6445,20 +6961,18 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, if (retval < 0) return retval; - op = process_crypto_request(ts_params->valid_devs[0], + ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); /* Check if the op failed because the device doesn't */ /* support this particular combination of algorithms */ - if (op == NULL && ut_params->op->status == + if (ut_params->op == NULL && ut_params->op->status == RTE_CRYPTO_OP_STATUS_INVALID_SESSION) { printf("Device doesn't support this mixed combination. " "Test Skipped.\n"); return -ENOTSUP; } - ut_params->op = op; - TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf"); ut_params->obuf = (op_mode == IN_PLACE ? @@ -6999,6 +7513,16 @@ test_authenticated_encryption(const struct aead_test_data *tdata) uint8_t *ciphertext, *auth_tag; uint16_t plaintext_pad_len; uint32_t i; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -7048,6 +7572,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata) /* Process crypto operation */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 0, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -8496,6 +9023,16 @@ test_authenticated_decryption(const struct aead_test_data *tdata) int retval; uint8_t *plaintext; uint32_t i; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -8545,6 +9082,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata) /* Process crypto operation */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 0, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -8839,6 +9379,9 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; + /* not supported with CPU crypto */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) return -ENOTSUP; @@ -8928,8 +9471,9 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) &cap_idx) == NULL) return -ENOTSUP; - /* not supported with CPU crypto */ - if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) + /* not supported with CPU crypto and raw data-path APIs*/ + if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO || + global_api_test_type == CRYPTODEV_RAW_API_TEST) return -ENOTSUP; /* Create AEAD session */ @@ -9115,6 +9659,12 @@ test_authenticated_decryption_sessionless( return -ENOTSUP; } + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } + /* not supported with CPU crypto */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) return -ENOTSUP; @@ -9156,8 +9706,13 @@ test_authenticated_decryption_sessionless( "crypto op session type not sessionless"); /* Process crypto operation */ - TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0], - ut_params->op), "failed to process sym crypto op"); + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 0, 0, 0); + else + TEST_ASSERT_NOT_NULL(process_crypto_request( + ts_params->valid_devs[0], ut_params->op), + "failed to process sym crypto op"); TEST_ASSERT_NOT_NULL(ut_params->op, "failed crypto process"); @@ -9447,6 +10002,16 @@ test_MD5_HMAC_generate(const struct HMAC_MD5_vector *test_case) struct crypto_testsuite_params *ts_params = &testsuite_params; struct crypto_unittest_params *ut_params = &unittest_params; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -9475,6 +10040,9 @@ test_MD5_HMAC_generate(const struct HMAC_MD5_vector *test_case) if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -9507,6 +10075,16 @@ test_MD5_HMAC_verify(const struct HMAC_MD5_vector *test_case) struct crypto_testsuite_params *ts_params = &testsuite_params; struct crypto_unittest_params *ut_params = &unittest_params; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -9533,6 +10111,9 @@ test_MD5_HMAC_verify(const struct HMAC_MD5_vector *test_case) if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -10038,6 +10619,16 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) { struct crypto_testsuite_params *ts_params = &testsuite_params; struct crypto_unittest_params *ut_params = &unittest_params; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } int retval; @@ -10101,6 +10692,9 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -10159,6 +10753,16 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) int retval; uint32_t plaintext_pad_len; uint8_t *plaintext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0, "No GMAC length in the source data"); @@ -10218,6 +10822,9 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -10733,6 +11340,16 @@ test_authentication_verify_fail_when_data_corruption( int retval; uint8_t *plaintext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -10742,6 +11359,7 @@ test_authentication_verify_fail_when_data_corruption( &cap_idx) == NULL) return -ENOTSUP; + /* Create session */ retval = create_auth_session(ut_params, ts_params->valid_devs[0], @@ -10783,7 +11401,10 @@ test_authentication_verify_fail_when_data_corruption( TEST_ASSERT_NOT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS, "authentication not failed"); - } else { + } else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); + else { ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NULL(ut_params->op, "authentication not failed"); @@ -10801,6 +11422,16 @@ test_authentication_verify_GMAC_fail_when_corruption( { int retval; uint8_t *plaintext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -10854,7 +11485,10 @@ test_authentication_verify_GMAC_fail_when_corruption( TEST_ASSERT_NOT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS, "authentication not failed"); - } else { + } else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 1, 0, 0); + else { ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NULL(ut_params->op, "authentication not failed"); @@ -10873,6 +11507,16 @@ test_authenticated_decryption_fail_when_corruption( int retval; uint8_t *ciphertext; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -10929,7 +11573,10 @@ test_authenticated_decryption_fail_when_corruption( TEST_ASSERT_NOT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS, "authentication not failed"); - } else { + } else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 0, 0); + else { ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); TEST_ASSERT_NULL(ut_params->op, "authentication not failed"); @@ -10950,6 +11597,16 @@ test_authenticated_encryt_with_esn( uint16_t plaintext_pad_len; uint8_t cipher_key[reference->cipher_key.len + 1]; uint8_t auth_key[reference->auth_key.len + 1]; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -11024,6 +11681,9 @@ test_authenticated_encryt_with_esn( if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 0, 0); else ut_params->op = process_crypto_request( ts_params->valid_devs[0], ut_params->op); @@ -11070,6 +11730,16 @@ test_authenticated_decrypt_with_esn( uint8_t *ciphertext; uint8_t cipher_key[reference->cipher_key.len + 1]; uint8_t auth_key[reference->auth_key.len + 1]; + struct rte_cryptodev_info dev_info; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } /* Verify the capabilities */ struct rte_cryptodev_sym_capability_idx cap_idx; @@ -11144,6 +11814,9 @@ test_authenticated_decrypt_with_esn( if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_crypt_auth_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 1, 1, 0, 0); else ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -11284,10 +11957,21 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, if (sgl_in && (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL))) return -ENOTSUP; + + uint64_t feat_flags = dev_info.feature_flags; + + if ((global_api_test_type == CRYPTODEV_RAW_API_TEST) && + (!(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))) { + printf("Device doesn't support RAW data-path APIs.\n"); + return -ENOTSUP; + } } else { unsigned int sgl_in = fragsz < tdata->plaintext.len; unsigned int sgl_out = (fragsz_oop ? fragsz_oop : fragsz) < tdata->plaintext.len; + /* Raw data path API does not support OOP */ + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return -ENOTSUP; if (sgl_in && !sgl_out) { if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) @@ -11483,6 +12167,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, if (oop == IN_PLACE && gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op); + else if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + process_sym_raw_dp_op(ts_params->valid_devs[0], 0, + ut_params->op, 0, 0, 0, 0); else TEST_ASSERT_NOT_NULL( process_crypto_request(ts_params->valid_devs[0], @@ -12980,6 +13667,30 @@ test_cryptodev_bcmfs(void) return unit_test_suite_runner(&cryptodev_testsuite); } +static int +test_cryptodev_qat_raw_api(void /*argv __rte_unused, int argc __rte_unused*/) +{ + int ret; + + gbl_driver_id = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)); + + if (gbl_driver_id == -1) { + RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both " + "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM " + "are enabled in config file to run this testsuite.\n"); + return TEST_SKIPPED; + } + + global_api_test_type = CRYPTODEV_RAW_API_TEST; + ret = unit_test_suite_runner(&cryptodev_testsuite); + global_api_test_type = CRYPTODEV_API_TEST; + + return ret; +} + +REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest, + test_cryptodev_qat_raw_api); REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat); REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb); REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest, diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h index c58126368..bf4f6c8d6 100644 --- a/app/test/test_cryptodev.h +++ b/app/test/test_cryptodev.h @@ -72,6 +72,13 @@ #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym #define CRYPTODEV_NAME_BCMFS_PMD crypto_bcmfs +enum cryptodev_api_test_type { + CRYPTODEV_API_TEST = 0, + CRYPTODEV_RAW_API_TEST +}; + +extern enum cryptodev_api_test_type global_api_test_type; + /** * Write (spread) data from buffer to mbuf data * @@ -210,4 +217,9 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len, return NULL; } +void +process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth, + uint8_t len_in_bits, uint8_t cipher_iv_len); + #endif /* TEST_CRYPTODEV_H_ */ diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c index 221262341..0ddc5e358 100644 --- a/app/test/test_cryptodev_blockcipher.c +++ b/app/test/test_cryptodev_blockcipher.c @@ -136,6 +136,14 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, nb_segs = 3; } + if (global_api_test_type == CRYPTODEV_RAW_API_TEST && + !(feat_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) { + printf("Device doesn't support raw data-path APIs. " + "Test Skipped.\n"); + snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED"); + return TEST_SKIPPED; + } + if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) { uint64_t oop_flags = RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | @@ -148,6 +156,13 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, "SKIPPED"); return TEST_SKIPPED; } + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) { + printf("Raw Data Path APIs do not support OOP, " + "Test Skipped.\n"); + snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED"); + status = TEST_SUCCESS; + goto error_exit; + } } if (tdata->cipher_key.len) @@ -462,25 +477,36 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, } /* Process crypto operation */ - if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) { - snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, - "line %u FAILED: %s", - __LINE__, "Error sending packet for encryption"); - status = TEST_FAILED; - goto error_exit; - } + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) { + uint8_t is_cipher = 0, is_auth = 0; + if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) + is_cipher = 1; + if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH) + is_auth = 1; - op = NULL; + process_sym_raw_dp_op(dev_id, 0, op, is_cipher, is_auth, 0, + tdata->iv.len); + } else { + if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) { + snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, + "line %u FAILED: %s", + __LINE__, "Error sending packet for encryption"); + status = TEST_FAILED; + goto error_exit; + } - while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0) - rte_pause(); + op = NULL; - if (!op) { - snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, - "line %u FAILED: %s", - __LINE__, "Failed to process sym crypto op"); - status = TEST_FAILED; - goto error_exit; + while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0) + rte_pause(); + + if (!op) { + snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, + "line %u FAILED: %s", + __LINE__, "Failed to process sym crypto op"); + status = TEST_FAILED; + goto error_exit; + } } debug_hexdump(stdout, "m_src(after):",