[v5,09/15] crypto/dpaa2_sec: support OOP with raw buffer API

Message ID 20211017161651.9220-10-hemant.agrawal@nxp.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series crypto: add raw vector support in DPAAx |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Hemant Agrawal Oct. 17, 2021, 4:16 p.m. UTC
  From: Gagandeep Singh <g.singh@nxp.com>

add support for out of order processing with raw vector APIs.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c | 156 +++++++++++++++-----
 2 files changed, 116 insertions(+), 41 deletions(-)
  

Patch

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index f397b756e8..05bd7c0736 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -179,6 +179,7 @@  typedef int (*dpaa2_sec_build_fd_t)(
 
 typedef int (*dpaa2_sec_build_raw_dp_fd_t)(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 5c29c61f9d..4f78cef9c0 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -24,6 +24,7 @@  struct dpaa2_sec_raw_dp_ctx {
 static int
 build_raw_dp_chain_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -89,17 +90,33 @@  build_raw_dp_chain_fd(uint8_t *drv_ctx,
 			(cipher_len + icv_len) :
 			cipher_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -160,6 +177,7 @@  build_raw_dp_chain_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_aead_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -219,17 +237,33 @@  build_raw_dp_aead_fd(uint8_t *drv_ctx,
 			(aead_len + icv_len) :
 			aead_len;
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
-	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
+		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 
 	if (sess->dir == DIR_ENC) {
@@ -294,6 +328,7 @@  build_raw_dp_aead_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_auth_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -303,6 +338,7 @@  build_raw_dp_auth_fd(uint8_t *drv_ctx,
 {
 	RTE_SET_USED(iv);
 	RTE_SET_USED(auth_iv);
+	RTE_SET_USED(dest_sgl);
 
 	dpaa2_sec_session *sess =
 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
@@ -416,6 +452,7 @@  build_raw_dp_auth_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_proto_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -466,20 +503,39 @@  build_raw_dp_proto_fd(uint8_t *drv_ctx,
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
 
-	/* Configure Output SGE for Encap/Decap */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, 0);
-	sge->length = sgl->vec[0].len;
-	out_len += sge->length;
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+	/* OOP */
+	if (dest_sgl) {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
 		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		sge->length = dest_sgl->vec[0].len;
+		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = dest_sgl->vec[i - 1].tot_len;
+
+	} else {
+		/* Configure Output SGE for Encap/Decap */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, 0);
+		sge->length = sgl->vec[0].len;
 		out_len += sge->length;
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+			out_len += sge->length;
+		}
+		sge->length = sgl->vec[i - 1].tot_len;
 	}
-	sge->length = sgl->vec[i - 1].tot_len;
 	out_len += sge->length;
 
 	DPAA2_SET_FLE_FIN(sge);
@@ -528,6 +584,7 @@  build_raw_dp_proto_fd(uint8_t *drv_ctx,
 static int
 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 		       struct rte_crypto_sgl *sgl,
+		       struct rte_crypto_sgl *dest_sgl,
 		       struct rte_crypto_va_iova_ptr *iv,
 		       struct rte_crypto_va_iova_ptr *digest,
 		       struct rte_crypto_va_iova_ptr *auth_iv,
@@ -593,17 +650,33 @@  build_raw_dp_cipher_fd(uint8_t *drv_ctx,
 	op_fle->length = data_len;
 	DPAA2_SET_FLE_SG_EXT(op_fle);
 
-	/* o/p 1st seg */
-	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
-	DPAA2_SET_FLE_OFFSET(sge, data_offset);
-	sge->length = sgl->vec[0].len - data_offset;
+	/* OOP */
+	if (dest_sgl) {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = dest_sgl->vec[0].len - data_offset;
 
-	/* o/p segs */
-	for (i = 1; i < sgl->num; i++) {
-		sge++;
-		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
-		DPAA2_SET_FLE_OFFSET(sge, 0);
-		sge->length = sgl->vec[i].len;
+		/* o/p segs */
+		for (i = 1; i < dest_sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = dest_sgl->vec[i].len;
+		}
+	} else {
+		/* o/p 1st seg */
+		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
+		DPAA2_SET_FLE_OFFSET(sge, data_offset);
+		sge->length = sgl->vec[0].len - data_offset;
+
+		/* o/p segs */
+		for (i = 1; i < sgl->num; i++) {
+			sge++;
+			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
+			DPAA2_SET_FLE_OFFSET(sge, 0);
+			sge->length = sgl->vec[i].len;
+		}
 	}
 	DPAA2_SET_FLE_FIN(sge);
 
@@ -706,6 +779,7 @@  dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			ret = sess->build_raw_dp_fd(drv_ctx,
 						    &vec->src_sgl[loop],
+						    &vec->dest_sgl[loop],
 						    &vec->iv[loop],
 						    &vec->digest[loop],
 						    &vec->auth_iv[loop],