[v10,1/4] cryptodev: change crypto symmetric vector structure

Message ID 20200924163417.49983-2-roy.fan.zhang@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: akhil goyal
Headers
Series cryptodev: add raw data-path APIs |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Fan Zhang Sept. 24, 2020, 4:34 p.m. UTC
  This patch updates ``rte_crypto_sym_vec`` structure to add
support for both cpu_crypto synchrounous operation and
asynchronous raw data-path APIs. The patch also includes
AESNI-MB and AESNI-GCM PMD changes, unit test changes and
documentation updates.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 app/test/test_cryptodev.c                  | 25 ++++++++------
 doc/guides/prog_guide/cryptodev_lib.rst    |  3 +-
 doc/guides/rel_notes/release_20_11.rst     |  3 ++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 18 +++++-----
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  9 +++--
 lib/librte_cryptodev/rte_crypto_sym.h      | 40 ++++++++++++++++------
 lib/librte_ipsec/esp_inb.c                 | 12 +++----
 lib/librte_ipsec/esp_outb.c                | 12 +++----
 lib/librte_ipsec/misc.h                    |  6 ++--
 9 files changed, 79 insertions(+), 49 deletions(-)
  

Comments

Dybkowski, AdamX Sept. 25, 2020, 8:03 a.m. UTC | #1
> -----Original Message-----
> From: Zhang, Roy Fan <roy.fan.zhang@intel.com>
> Sent: Thursday, 24 September, 2020 18:34
> To: dev@dpdk.org
> Cc: akhil.goyal@nxp.com; Trahe, Fiona <fiona.trahe@intel.com>; Kusztal,
> ArkadiuszX <arkadiuszx.kusztal@intel.com>; Dybkowski, AdamX
> <adamx.dybkowski@intel.com>; anoobj@marvell.com; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Zhang, Roy Fan
> <roy.fan.zhang@intel.com>
> Subject: [dpdk-dev v10 1/4] cryptodev: change crypto symmetric vector
> structure
> 
> This patch updates ``rte_crypto_sym_vec`` structure to add support for both
> cpu_crypto synchrounous operation and asynchronous raw data-path APIs.
> The patch also includes AESNI-MB and AESNI-GCM PMD changes, unit test
> changes and documentation updates.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>

Acked-by: Adam Dybkowski <adamx.dybkowski@intel.com>
  
Ananyev, Konstantin Sept. 28, 2020, 5:01 p.m. UTC | #2
> This patch updates ``rte_crypto_sym_vec`` structure to add
> support for both cpu_crypto synchrounous operation and
> asynchronous raw data-path APIs. The patch also includes
> AESNI-MB and AESNI-GCM PMD changes, unit test changes and
> documentation updates.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.20.1
  

Patch

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 70bf6fe2c..99f1eed82 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -151,11 +151,11 @@  static void
 process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 {
 	int32_t n, st;
-	void *iv;
 	struct rte_crypto_sym_op *sop;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sgl sgl;
 	struct rte_crypto_sym_vec symvec;
+	struct rte_crypto_va_iova_ptr iv_ptr, aad_ptr, digest_ptr;
 	struct rte_crypto_vec vec[UINT8_MAX];
 
 	sop = op->sym;
@@ -171,13 +171,17 @@  process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
 	sgl.vec = vec;
 	sgl.num = n;
 	symvec.sgl = &sgl;
-	iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
-	symvec.iv = &iv;
-	symvec.aad = (void **)&sop->aead.aad.data;
-	symvec.digest = (void **)&sop->aead.digest.data;
+	symvec.iv = &iv_ptr;
+	symvec.digest = &digest_ptr;
+	symvec.aad = &aad_ptr;
 	symvec.status = &st;
 	symvec.num = 1;
 
+	/* for CPU crypto the IOVA address is not required */
+	iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+	digest_ptr.va = (void *)sop->aead.digest.data;
+	aad_ptr.va = (void *)sop->aead.aad.data;
+
 	ofs.raw = 0;
 
 	n = rte_cryptodev_sym_cpu_crypto_process(dev_id, sop->session, ofs,
@@ -193,11 +197,11 @@  static void
 process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 {
 	int32_t n, st;
-	void *iv;
 	struct rte_crypto_sym_op *sop;
 	union rte_crypto_sym_ofs ofs;
 	struct rte_crypto_sgl sgl;
 	struct rte_crypto_sym_vec symvec;
+	struct rte_crypto_va_iova_ptr iv_ptr, digest_ptr;
 	struct rte_crypto_vec vec[UINT8_MAX];
 
 	sop = op->sym;
@@ -213,13 +217,14 @@  process_cpu_crypt_auth_op(uint8_t dev_id, struct rte_crypto_op *op)
 	sgl.vec = vec;
 	sgl.num = n;
 	symvec.sgl = &sgl;
-	iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
-	symvec.iv = &iv;
-	symvec.aad = (void **)&sop->aead.aad.data;
-	symvec.digest = (void **)&sop->auth.digest.data;
+	symvec.iv = &iv_ptr;
+	symvec.digest = &digest_ptr;
 	symvec.status = &st;
 	symvec.num = 1;
 
+	iv_ptr.va = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+	digest_ptr.va = (void *)sop->auth.digest.data;
+
 	ofs.raw = 0;
 	ofs.ofs.cipher.head = sop->cipher.data.offset - sop->auth.data.offset;
 	ofs.ofs.cipher.tail = (sop->auth.data.offset + sop->auth.data.length) -
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..e7ba35c2d 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -620,7 +620,8 @@  operation descriptor (``struct rte_crypto_sym_vec``) containing:
   descriptors of performed operations (``struct rte_crypto_sgl``). Each instance
   of ``struct rte_crypto_sgl`` consists of a number of segments and a pointer to
   an array of segment descriptors ``struct rte_crypto_vec``;
-- pointers to arrays of size ``num`` containing IV, AAD and digest information,
+- pointers to arrays of size ``num`` containing IV, AAD and digest information
+  in the ``cpu_crypto`` sub-structure,
 - pointer to an array of size ``num`` where status information will be stored
   for each operation.
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 73ac08fb0..20ebaef5b 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -135,6 +135,9 @@  API Changes
 
 * bpf: ``RTE_BPF_XTYPE_NUM`` has been dropped from ``rte_bpf_xtype``.
 
+* The structure ``rte_crypto_sym_vec`` is updated to support both cpu_crypto
+  synchrounous operation and asynchronous raw data-path APIs.
+
 
 ABI Changes
 -----------
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..973b61bd6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -464,9 +464,10 @@  aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i], vec->aad[i]);
+			&vec->sgl[i], vec->iv[i].va,
+			vec->aad[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i]);
+			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
 	}
 
@@ -482,9 +483,10 @@  aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
 	processed = 0;
 	for (i = 0; i < vec->num; ++i) {
 		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i], vec->aad[i]);
+			&vec->sgl[i], vec->iv[i].va,
+			vec->aad[i].va);
 		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i]);
+			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
 	}
 
@@ -505,9 +507,9 @@  aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i]);
+			&vec->sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i]);
+			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
 	}
 
@@ -528,9 +530,9 @@  aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 		}
 
 		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i]);
+			&vec->sgl[i], vec->iv[i].va);
 		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i]);
+			gdata_ctx, vec->digest[i].va);
 		processed += (vec->status[i] == 0);
 	}
 
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 1bddbcf74..01b3bfc29 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1744,7 +1744,7 @@  generate_sync_dgst(struct rte_crypto_sym_vec *vec,
 
 	for (i = 0, k = 0; i != vec->num; i++) {
 		if (vec->status[i] == 0) {
-			memcpy(vec->digest[i], dgst[i], len);
+			memcpy(vec->digest[i].va, dgst[i], len);
 			k++;
 		}
 	}
@@ -1760,7 +1760,7 @@  verify_sync_dgst(struct rte_crypto_sym_vec *vec,
 
 	for (i = 0, k = 0; i != vec->num; i++) {
 		if (vec->status[i] == 0) {
-			if (memcmp(vec->digest[i], dgst[i], len) != 0)
+			if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
 				vec->status[i] = EBADMSG;
 			else
 				k++;
@@ -1823,9 +1823,8 @@  aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
 		}
 
 		/* Submit job for processing */
-		set_cpu_mb_job_params(job, s, sofs, buf, len,
-			vec->iv[i], vec->aad[i], tmp_dgst[i],
-			&vec->status[i]);
+		set_cpu_mb_job_params(job, s, sofs, buf, len, vec->iv[i].va,
+			vec->aad[i].va, tmp_dgst[i], &vec->status[i]);
 		job = submit_sync_job(mb_mgr);
 		j++;
 
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index f29c98051..8201189e0 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -51,26 +51,44 @@  struct rte_crypto_sgl {
 };
 
 /**
- * Synchronous operation descriptor.
- * Supposed to be used with CPU crypto API call.
+ * Crypto virtual and IOVA address descriptor, used to describe cryptographic
+ * data buffer without the length information. The length information is
+ * normally predefined during session creation.
+ */
+struct rte_crypto_va_iova_ptr {
+	void *va;
+	rte_iova_t *iova;
+};
+
+/**
+ * Raw data operation descriptor.
+ * Supposed to be used with synchronous CPU crypto API call or asynchronous
+ * RAW data path API call.
  */
 struct rte_crypto_sym_vec {
+	/** number of operations to perform */
+	uint32_t num;
 	/** array of SGL vectors */
 	struct rte_crypto_sgl *sgl;
-	/** array of pointers to IV */
-	void **iv;
-	/** array of pointers to AAD */
-	void **aad;
+	/** array of pointers to cipher IV */
+	struct rte_crypto_va_iova_ptr *iv;
 	/** array of pointers to digest */
-	void **digest;
+	struct rte_crypto_va_iova_ptr *digest;
+
+	__extension__
+	union {
+		/** array of pointers to auth IV, used for chain operation */
+		struct rte_crypto_va_iova_ptr *auth_iv;
+		/** array of pointers to AAD, used for AEAD operation */
+		struct rte_crypto_va_iova_ptr *aad;
+	};
+
 	/**
 	 * array of statuses for each operation:
-	 *  - 0 on success
-	 *  - errno on error
+	 * - 0 on success
+	 * - errno on error
 	 */
 	int32_t *status;
-	/** number of operations to perform */
-	uint32_t num;
 };
 
 /**
diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 96eec0131..2b1df6a03 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -693,9 +693,9 @@  cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
 	struct rte_ipsec_sa *sa;
 	struct replay_sqn *rsn;
 	union sym_op_data icv;
-	void *iv[num];
-	void *aad[num];
-	void *dgst[num];
+	struct rte_crypto_va_iova_ptr iv[num];
+	struct rte_crypto_va_iova_ptr aad[num];
+	struct rte_crypto_va_iova_ptr dgst[num];
 	uint32_t dr[num];
 	uint32_t l4ofs[num];
 	uint32_t clen[num];
@@ -720,9 +720,9 @@  cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
 				l4ofs + k, rc, ivbuf[k]);
 
 			/* fill iv, digest and aad */
-			iv[k] = ivbuf[k];
-			aad[k] = icv.va + sa->icv_len;
-			dgst[k++] = icv.va;
+			iv[k].va = ivbuf[k];
+			aad[k].va = icv.va + sa->icv_len;
+			dgst[k++].va = icv.va;
 		} else {
 			dr[i - k] = i;
 			rte_errno = -rc;
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index fb9d5864c..1e181cf2c 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -449,9 +449,9 @@  cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
 	uint32_t i, k, n;
 	uint32_t l2, l3;
 	union sym_op_data icv;
-	void *iv[num];
-	void *aad[num];
-	void *dgst[num];
+	struct rte_crypto_va_iova_ptr iv[num];
+	struct rte_crypto_va_iova_ptr aad[num];
+	struct rte_crypto_va_iova_ptr dgst[num];
 	uint32_t dr[num];
 	uint32_t l4ofs[num];
 	uint32_t clen[num];
@@ -488,9 +488,9 @@  cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
 				ivbuf[k]);
 
 			/* fill iv, digest and aad */
-			iv[k] = ivbuf[k];
-			aad[k] = icv.va + sa->icv_len;
-			dgst[k++] = icv.va;
+			iv[k].va = ivbuf[k];
+			aad[k].va = icv.va + sa->icv_len;
+			dgst[k++].va = icv.va;
 		} else {
 			dr[i - k] = i;
 			rte_errno = -rc;
diff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h
index 1b543ed87..79b9a2076 100644
--- a/lib/librte_ipsec/misc.h
+++ b/lib/librte_ipsec/misc.h
@@ -112,7 +112,9 @@  mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs,
 static inline void
 cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 	union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
-	void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
+	struct rte_crypto_va_iova_ptr iv[],
+	struct rte_crypto_va_iova_ptr aad[],
+	struct rte_crypto_va_iova_ptr dgst[], uint32_t l4ofs[],
 	uint32_t clen[], uint32_t num)
 {
 	uint32_t i, j, n;
@@ -136,8 +138,8 @@  cpu_crypto_bulk(const struct rte_ipsec_session *ss,
 			/* fill the request structure */
 			symvec.sgl = &vecpkt[j];
 			symvec.iv = &iv[j];
-			symvec.aad = &aad[j];
 			symvec.digest = &dgst[j];
+			symvec.aad = &aad[j];
 			symvec.status = &st[j];
 			symvec.num = i - j;