[6/8] crypto/cnxk: add cn9k lookaside IPsec datapath

Message ID 20210902134254.28373-7-marchana@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series add cn9k lookaside IPsec support |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Archana Muniganti Sept. 2, 2021, 1:42 p.m. UTC
  Adds support for cn9k lookaside enqueue and dequeue
operations.

Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 78 +++++++++++++++++++-
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h  | 90 ++++++++++++++++++++++++
 2 files changed, 166 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
  

Patch

diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 8ade1977e1..40109acc3f 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -5,10 +5,13 @@ 
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
 #include <rte_event_crypto_adapter.h>
+#include <rte_ip.h>
 #include <rte_vect.h>
 
 #include "cn9k_cryptodev.h"
 #include "cn9k_cryptodev_ops.h"
+#include "cn9k_ipsec.h"
+#include "cn9k_ipsec_la_ops.h"
 #include "cnxk_ae.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
@@ -34,6 +37,36 @@  cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 	return ret;
 }
 
+static __rte_always_inline int __rte_hot
+cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
+		       struct cpt_inflight_req *infl_req,
+		       struct cpt_inst_s *inst)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct cn9k_sec_session *priv;
+	struct cn9k_ipsec_sa *sa;
+
+	if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
+		plt_dp_err("Out of place is not supported");
+		return -ENOTSUP;
+	}
+
+	if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
+		plt_dp_err("Scatter Gather mode is not supported");
+		return -ENOTSUP;
+	}
+
+	priv = get_sec_session_private_data(op->sym->sec_session);
+	sa = &priv->sa;
+
+	if (sa->dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+		return process_outb_sa(op, sa, inst);
+
+	infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
+
+	return process_inb_sa(op, sa, inst);
+}
+
 static inline struct cnxk_se_sess *
 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
 {
@@ -80,7 +113,10 @@  cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 				sym_op->session, cn9k_cryptodev_driver_id);
 			ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
 						     inst);
-		} else {
+			inst->w7.u64 = sess->cpt_inst_w7;
+		} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+			ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
+		else {
 			sess = cn9k_cpt_sym_temp_sess_create(qp, op);
 			if (unlikely(sess == NULL)) {
 				plt_dp_err("Could not create temp session");
@@ -94,8 +130,8 @@  cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 						  op->sym->session);
 				rte_mempool_put(qp->sess_mp, op->sym->session);
 			}
+			inst->w7.u64 = sess->cpt_inst_w7;
 		}
-		inst->w7.u64 = sess->cpt_inst_w7;
 	} else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
 		struct rte_crypto_asym_op *asym_op;
 		struct cnxk_ae_sess *sess;
@@ -348,6 +384,39 @@  cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 	return 1;
 }
 
+static inline void
+cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
+			  struct cpt_inflight_req *infl_req)
+{
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct rte_mbuf *m = sym_op->m_src;
+	struct rte_ipv6_hdr *ip6;
+	struct rte_ipv4_hdr *ip;
+	uint16_t m_len = 0;
+	char *data;
+
+	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
+		data = rte_pktmbuf_mtod(m, char *);
+
+		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
+
+		if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
+		    IPVERSION) {
+			m_len = rte_be_to_cpu_16(ip->total_length);
+		} else {
+			PLT_ASSERT(((ip->version_ihl & 0xf0) >>
+				    RTE_IPV4_IHL_MULTIPLIER) == 6);
+			ip6 = (struct rte_ipv6_hdr *)ip;
+			m_len = rte_be_to_cpu_16(ip6->payload_len) +
+				sizeof(struct rte_ipv6_hdr);
+		}
+
+		m->data_len = m_len;
+		m->pkt_len = m_len;
+		m->data_off += ROC_IE_ON_INB_RPTR_HDR;
+	}
+}
+
 static inline void
 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 			      struct cpt_inflight_req *infl_req)
@@ -370,6 +439,11 @@  cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 
 		cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+			if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+				cn9k_cpt_sec_post_process(cop, infl_req);
+				return;
+			}
+
 			/* Verify authentication data if required */
 			if (unlikely(infl_req->op_flags &
 				     CPT_OP_FLAGS_AUTH_VERIFY)) {
diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
new file mode 100644
index 0000000000..b7a88e1b35
--- /dev/null
+++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
@@ -0,0 +1,90 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef __CN9K_IPSEC_LA_OPS_H__
+#define __CN9K_IPSEC_LA_OPS_H__
+
+#include <rte_crypto_sym.h>
+#include <rte_security.h>
+
+#include "cn9k_ipsec.h"
+
+static __rte_always_inline int32_t
+ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
+{
+	uint32_t enc_payload_len;
+
+	enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
+					 sa->rlens.roundup_byte);
+
+	return sa->rlens.partial_len + enc_payload_len;
+}
+
+static __rte_always_inline int
+process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
+		struct cpt_inst_s *inst)
+{
+	const unsigned int hdr_len = sizeof(struct roc_ie_on_outb_hdr);
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct rte_mbuf *m_src = sym_op->m_src;
+	uint32_t dlen, rlen, extend_tail;
+	struct roc_ie_on_outb_sa *out_sa;
+	struct roc_ie_on_outb_hdr *hdr;
+
+	out_sa = &sa->out_sa;
+
+	dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
+	rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
+
+	extend_tail = rlen - dlen;
+	if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
+		plt_dp_err("Not enough tail room");
+		return -ENOMEM;
+	}
+
+	m_src->data_len += extend_tail;
+	m_src->pkt_len += extend_tail;
+
+	hdr = (struct roc_ie_on_outb_hdr *)rte_pktmbuf_prepend(m_src, hdr_len);
+	if (unlikely(hdr == NULL)) {
+		plt_dp_err("Not enough head room");
+		return -ENOMEM;
+	}
+
+	memcpy(&hdr->iv[0],
+	       rte_crypto_op_ctod_offset(cop, uint8_t *, sa->cipher_iv_off),
+	       sa->cipher_iv_len);
+	hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
+	hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
+
+	out_sa->common_sa.esn_hi = sa->seq_hi;
+
+	sa->ip_id++;
+	sa->esn++;
+
+	/* Prepare CPT instruction */
+	inst->w4.u64 = sa->inst.w4 | dlen;
+	inst->dptr = rte_pktmbuf_iova(m_src);
+	inst->rptr = inst->dptr;
+	inst->w7.u64 = sa->inst.w7;
+
+	return 0;
+}
+
+static __rte_always_inline int
+process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
+	       struct cpt_inst_s *inst)
+{
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct rte_mbuf *m_src = sym_op->m_src;
+
+	/* Prepare CPT instruction */
+	inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
+	inst->dptr = rte_pktmbuf_iova(m_src);
+	inst->rptr = inst->dptr;
+	inst->w7.u64 = sa->inst.w7;
+
+	return 0;
+}
+#endif /* __CN9K_IPSEC_LA_OPS_H__ */