[v3,2/2] crypto/nitrox: support cipher only crypto operations

Message ID 20201009055725.47329-3-rnagadheeraj@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series Add AES-GCM and cipher only offload support |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/Intel-compilation fail apply issues

Commit Message

Nagadheeraj Rottela Oct. 9, 2020, 5:57 a.m. UTC
  This patch adds cipher only crypto operation support.

Signed-off-by: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
---
 doc/guides/cryptodevs/nitrox.rst          |   2 -
 doc/guides/rel_notes/release_20_11.rst    |   5 +
 drivers/crypto/nitrox/nitrox_sym.c        |   3 +
 drivers/crypto/nitrox/nitrox_sym_reqmgr.c | 189 ++++++++++++++++------
 4 files changed, 148 insertions(+), 51 deletions(-)
  

Patch

diff --git a/doc/guides/cryptodevs/nitrox.rst b/doc/guides/cryptodevs/nitrox.rst
index 91fca905a..095e545c6 100644
--- a/doc/guides/cryptodevs/nitrox.rst
+++ b/doc/guides/cryptodevs/nitrox.rst
@@ -33,8 +33,6 @@  Supported AEAD algorithms:
 Limitations
 -----------
 
-* AES_CBC Cipher Only combination is not supported.
-* 3DES Cipher Only combination is not supported.
 * Session-less APIs are not supported.
 
 Installation
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 73ac08fb0..ddcf90356 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -55,6 +55,11 @@  New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Marvell NITROX symmetric crypto PMD.**
+
+  * Added cipher only offload support.
+  * Added AES-GCM support.
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/nitrox/nitrox_sym.c b/drivers/crypto/nitrox/nitrox_sym.c
index fe3ee6e23..2768bdd2e 100644
--- a/drivers/crypto/nitrox/nitrox_sym.c
+++ b/drivers/crypto/nitrox/nitrox_sym.c
@@ -550,6 +550,9 @@  nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
 	ctx = mp_obj;
 	ctx->nitrox_chain = get_crypto_chain_order(xform);
 	switch (ctx->nitrox_chain) {
+	case NITROX_CHAIN_CIPHER_ONLY:
+		cipher_xform = &xform->cipher;
+		break;
 	case NITROX_CHAIN_CIPHER_AUTH:
 		cipher_xform = &xform->cipher;
 		auth_xform = &xform->next->auth;
diff --git a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
index 113ce5d11..fe3ca25a0 100644
--- a/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
+++ b/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
@@ -247,38 +247,6 @@  softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
 	sr->iv.len = sr->ctx->iv.length - salt_size;
 }
 
-static int
-extract_cipher_auth_digest(struct nitrox_softreq *sr,
-			   struct nitrox_sglist *digest)
-{
-	struct rte_crypto_op *op = sr->op;
-	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
-					op->sym->m_src;
-
-	if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
-	    unlikely(!op->sym->auth.digest.data))
-		return -EINVAL;
-
-	digest->len = sr->ctx->digest_length;
-	if (op->sym->auth.digest.data) {
-		digest->iova = op->sym->auth.digest.phys_addr;
-		digest->virt = op->sym->auth.digest.data;
-		return 0;
-	}
-
-	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
-	       op->sym->auth.data.length + digest->len))
-		return -EINVAL;
-
-	digest->iova = rte_pktmbuf_iova_offset(mdst,
-					op->sym->auth.data.offset +
-					op->sym->auth.data.length);
-	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
-					op->sym->auth.data.offset +
-					op->sym->auth.data.length);
-	return 0;
-}
-
 static void
 fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
 	    void *virt)
@@ -340,6 +308,143 @@  create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
 	return 0;
 }
 
+static void
+create_sgcomp(struct nitrox_sgtable *sgtbl)
+{
+	int i, j, nr_sgcomp;
+	struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
+	struct nitrox_sglist *sglist = sgtbl->sglist;
+
+	nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
+	sgtbl->nr_sgcomp = nr_sgcomp;
+	for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
+		for (j = 0; j < 4; j++, sglist++) {
+			sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
+			sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
+		}
+	}
+}
+
+static int
+create_cipher_inbuf(struct nitrox_softreq *sr)
+{
+	int err;
+	struct rte_crypto_op *op = sr->op;
+
+	fill_sglist(&sr->in, sr->iv.len, sr->iv.iova, sr->iv.virt);
+	err = create_sglist_from_mbuf(&sr->in, op->sym->m_src,
+				      op->sym->cipher.data.offset,
+				      op->sym->cipher.data.length);
+	if (unlikely(err))
+		return err;
+
+	create_sgcomp(&sr->in);
+	sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
+
+	return 0;
+}
+
+static int
+create_cipher_outbuf(struct nitrox_softreq *sr)
+{
+	struct rte_crypto_op *op = sr->op;
+	int err, cnt = 0;
+	struct rte_mbuf *m_dst = op->sym->m_dst ? op->sym->m_dst :
+		op->sym->m_src;
+
+	sr->resp.orh = PENDING_SIG;
+	sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
+	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+						       resp.orh);
+	sr->out.sglist[cnt].virt = &sr->resp.orh;
+	cnt++;
+
+	sr->out.map_bufs_cnt = cnt;
+	fill_sglist(&sr->out, sr->iv.len, sr->iv.iova, sr->iv.virt);
+	err = create_sglist_from_mbuf(&sr->out, m_dst,
+				      op->sym->cipher.data.offset,
+				      op->sym->cipher.data.length);
+	if (unlikely(err))
+		return err;
+
+	cnt = sr->out.map_bufs_cnt;
+	sr->resp.completion = PENDING_SIG;
+	sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
+	sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+						     resp.completion);
+	sr->out.sglist[cnt].virt = &sr->resp.completion;
+	cnt++;
+
+	RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
+	sr->out.map_bufs_cnt = cnt;
+
+	create_sgcomp(&sr->out);
+	sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
+
+	return 0;
+}
+
+static void
+create_cipher_gph(uint32_t cryptlen, uint16_t ivlen, struct gphdr *gph)
+{
+	gph->param0 = rte_cpu_to_be_16(cryptlen);
+	gph->param1 = 0;
+	gph->param2 = rte_cpu_to_be_16(ivlen);
+	gph->param3 = 0;
+}
+
+static int
+process_cipher_data(struct nitrox_softreq *sr)
+{
+	struct rte_crypto_op *op = sr->op;
+	int err;
+
+	softreq_copy_iv(sr, 0);
+	err = create_cipher_inbuf(sr);
+	if (unlikely(err))
+		return err;
+
+	err = create_cipher_outbuf(sr);
+	if (unlikely(err))
+		return err;
+
+	create_cipher_gph(op->sym->cipher.data.length, sr->iv.len, &sr->gph);
+
+	return 0;
+}
+
+static int
+extract_cipher_auth_digest(struct nitrox_softreq *sr,
+			   struct nitrox_sglist *digest)
+{
+	struct rte_crypto_op *op = sr->op;
+	struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
+					op->sym->m_src;
+
+	if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
+	    unlikely(!op->sym->auth.digest.data))
+		return -EINVAL;
+
+	digest->len = sr->ctx->digest_length;
+	if (op->sym->auth.digest.data) {
+		digest->iova = op->sym->auth.digest.phys_addr;
+		digest->virt = op->sym->auth.digest.data;
+		return 0;
+	}
+
+	if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
+	       op->sym->auth.data.length + digest->len))
+		return -EINVAL;
+
+	digest->iova = rte_pktmbuf_iova_offset(mdst,
+					op->sym->auth.data.offset +
+					op->sym->auth.data.length);
+	digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
+					op->sym->auth.data.offset +
+					op->sym->auth.data.length);
+	return 0;
+}
+
 static int
 create_cipher_auth_sglist(struct nitrox_softreq *sr,
 			  struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
@@ -408,23 +513,6 @@  create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
 	return err;
 }
 
-static void
-create_sgcomp(struct nitrox_sgtable *sgtbl)
-{
-	int i, j, nr_sgcomp;
-	struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
-	struct nitrox_sglist *sglist = sgtbl->sglist;
-
-	nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
-	sgtbl->nr_sgcomp = nr_sgcomp;
-	for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
-		for (j = 0; j < 4; j++, sglist++) {
-			sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
-			sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
-		}
-	}
-}
-
 static int
 create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
 {
@@ -661,6 +749,9 @@  process_softreq(struct nitrox_softreq *sr)
 	int err = 0;
 
 	switch (ctx->nitrox_chain) {
+	case NITROX_CHAIN_CIPHER_ONLY:
+		err = process_cipher_data(sr);
+		break;
 	case NITROX_CHAIN_CIPHER_AUTH:
 	case NITROX_CHAIN_AUTH_CIPHER:
 		err = process_cipher_auth_data(sr);