[v2,14/15] crypto/octeontx2: add inline tx path changes

Message ID 1579344553-11428-15-git-send-email-anoobj@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: akhil goyal
Headers
Series add OCTEONTX2 inline IPsec support |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Anoob Joseph Jan. 18, 2020, 10:49 a.m. UTC
  From: Ankur Dwivedi <adwivedi@marvell.com>

Adding pre-processing required for inline IPsec outbound packets.

Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
 drivers/crypto/octeontx2/otx2_security.c    |  82 +++++++++++++
 drivers/crypto/octeontx2/otx2_security.h    |  60 ++++++++++
 drivers/crypto/octeontx2/otx2_security_tx.h | 175 ++++++++++++++++++++++++++++
 drivers/event/octeontx2/meson.build         |   3 +-
 drivers/event/octeontx2/otx2_worker.h       |   6 +
 5 files changed, 325 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/octeontx2/otx2_security_tx.h
  

Patch

diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c
index ab488a0..9a08849 100644
--- a/drivers/crypto/octeontx2/otx2_security.c
+++ b/drivers/crypto/octeontx2/otx2_security.c
@@ -3,12 +3,15 @@ 
  */
 
 #include <rte_cryptodev.h>
+#include <rte_esp.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_ip.h>
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_security.h>
 #include <rte_security_driver.h>
+#include <rte_udp.h>
 
 #include "otx2_common.h"
 #include "otx2_cryptodev_qp.h"
@@ -18,6 +21,15 @@ 
 
 #define SEC_ETH_MAX_PKT_LEN	1450
 
+#define AH_HDR_LEN	12
+#define AES_GCM_IV_LEN	8
+#define AES_GCM_MAC_LEN	16
+#define AES_CBC_IV_LEN	16
+#define SHA1_HMAC_LEN	12
+
+#define AES_GCM_ROUNDUP_BYTE_LEN	4
+#define AES_CBC_ROUNDUP_BYTE_LEN	16
+
 struct sec_eth_tag_const {
 	RTE_STD_C11
 	union {
@@ -239,6 +251,60 @@  in_sa_get(uint16_t port, int sa_index)
 }
 
 static int
+ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
+		   struct rte_crypto_sym_xform *xform,
+		   struct otx2_sec_session_ipsec_ip *sess)
+{
+	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+
+	sess->partial_len = sizeof(struct rte_ipv4_hdr);
+
+	if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		sess->partial_len += sizeof(struct rte_esp_hdr);
+		sess->roundup_len = sizeof(struct rte_esp_tail);
+	} else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
+		sess->partial_len += AH_HDR_LEN;
+	} else {
+		return -EINVAL;
+	}
+
+	if (ipsec->options.udp_encap)
+		sess->partial_len += sizeof(struct rte_udp_hdr);
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+			sess->partial_len += AES_GCM_IV_LEN;
+			sess->partial_len += AES_GCM_MAC_LEN;
+			sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN;
+		}
+		return 0;
+	}
+
+	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		cipher_xform = xform;
+		auth_xform = xform->next;
+	} else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		auth_xform = xform;
+		cipher_xform = xform->next;
+	} else {
+		return -EINVAL;
+	}
+	if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+		sess->partial_len += AES_CBC_IV_LEN;
+		sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN;
+	} else {
+		return -EINVAL;
+	}
+
+	if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
+		sess->partial_len += SHA1_HMAC_LEN;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
 sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
 			      struct rte_security_ipsec_xform *ipsec,
 			      struct rte_crypto_sym_xform *crypto_xform,
@@ -252,6 +318,7 @@  sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
 	struct otx2_ipsec_fp_sa_ctl *ctl;
 	struct otx2_ipsec_fp_out_sa *sa;
 	struct otx2_sec_session *priv;
+	struct otx2_cpt_inst_s inst;
 	struct otx2_cpt_qp *qp;
 
 	priv = get_sec_session_private_data(sec_sess);
@@ -266,6 +333,12 @@  sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
 
 	memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
 
+	sess->seq = 1;
+
+	ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
+	if (ret < 0)
+		return ret;
+
 	memcpy(sa->nonce, &ipsec->salt, 4);
 
 	if (ipsec->options.udp_encap == 1) {
@@ -274,6 +347,9 @@  sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
 	}
 
 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		/* Start ip id from 1 */
+		sess->ip_id = 1;
+
 		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
 			memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
 			       sizeof(struct in_addr));
@@ -307,6 +383,12 @@  sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
 	else
 		return -EINVAL;
 
+	/* Determine word 7 of CPT instruction */
+	inst.u64[7] = 0;
+	inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
+	inst.cptr = rte_mempool_virt2iova(sa);
+	sess->inst_w7 = inst.u64[7];
+
 	/* Use OPAD & IPAD */
 	RTE_SET_USED(auth_key);
 	RTE_SET_USED(auth_key_len);
diff --git a/drivers/crypto/octeontx2/otx2_security.h b/drivers/crypto/octeontx2/otx2_security.h
index 6ec321d..fe7c883 100644
--- a/drivers/crypto/octeontx2/otx2_security.h
+++ b/drivers/crypto/octeontx2/otx2_security.h
@@ -14,6 +14,15 @@ 
 #define OTX2_MAX_CPT_QP_PER_PORT 64
 #define OTX2_MAX_INLINE_PORTS 64
 
+#define OTX2_CPT_RES_ALIGN		16
+#define OTX2_NIX_SEND_DESC_ALIGN	16
+#define OTX2_CPT_INST_SIZE		64
+
+#define OTX2_CPT_EGRP_INLINE_IPSEC	1
+
+#define OTX2_CPT_OP_INLINE_IPSEC_OUTB	(0x40 | 0x25)
+#define OTX2_CPT_OP_INLINE_IPSEC_INB	(0x40 | 0x26)
+
 struct otx2_cpt_qp;
 
 struct otx2_sec_eth_cfg {
@@ -45,6 +54,42 @@  struct otx2_cpt_res {
 	};
 };
 
+struct otx2_cpt_inst_s {
+	union {
+		struct {
+			/* W0 */
+			uint64_t nixtxl : 3;
+			uint64_t doneint : 1;
+			uint64_t nixtx_addr : 60;
+			/* W1 */
+			uint64_t res_addr : 64;
+			/* W2 */
+			uint64_t tag : 32;
+			uint64_t tt : 2;
+			uint64_t grp : 10;
+			uint64_t rsvd_175_172 : 4;
+			uint64_t rvu_pf_func : 16;
+			/* W3 */
+			uint64_t qord : 1;
+			uint64_t rsvd_194_193 : 2;
+			uint64_t wqe_ptr : 61;
+			/* W4 */
+			uint64_t dlen : 16;
+			uint64_t param2 : 16;
+			uint64_t param1 : 16;
+			uint64_t opcode : 16;
+			/* W5 */
+			uint64_t dptr : 64;
+			/* W6 */
+			uint64_t rptr : 64;
+			/* W7 */
+			uint64_t cptr : 61;
+			uint64_t egrp : 3;
+		};
+		uint64_t u64[8];
+	};
+};
+
 /*
  * Security session for inline IPsec protocol offload. This is private data of
  * inline capable PMD.
@@ -68,6 +113,21 @@  struct otx2_sec_session_ipsec_ip {
 	/* CPT LF enqueue register address */
 	rte_iova_t cpt_nq_reg;
 
+	/* Pre calculated lengths and data for a session */
+	uint8_t partial_len;
+	uint8_t roundup_len;
+	uint8_t roundup_byte;
+	uint16_t ip_id;
+	union {
+		uint64_t esn;
+		struct {
+			uint32_t seq;
+			uint32_t esn_hi;
+		};
+	};
+
+	uint64_t inst_w7;
+
 	/* CPT QP used by SA */
 	struct otx2_cpt_qp *qp;
 };
diff --git a/drivers/crypto/octeontx2/otx2_security_tx.h b/drivers/crypto/octeontx2/otx2_security_tx.h
new file mode 100644
index 0000000..16b8c66
--- /dev/null
+++ b/drivers/crypto/octeontx2/otx2_security_tx.h
@@ -0,0 +1,175 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_SECURITY_TX_H__
+#define __OTX2_SECURITY_TX_H__
+
+#include <rte_security.h>
+#include <rte_mbuf.h>
+
+#include "otx2_security.h"
+
+struct otx2_ipsec_fp_out_hdr {
+	uint32_t ip_id;
+	uint32_t seq;
+	uint8_t iv[16];
+};
+
+static __rte_always_inline int32_t
+otx2_ipsec_fp_out_rlen_get(struct otx2_sec_session_ipsec_ip *sess,
+			   uint32_t plen)
+{
+	uint32_t enc_payload_len;
+
+	enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
+			sess->roundup_byte);
+
+	return sess->partial_len + enc_payload_len;
+}
+
+static __rte_always_inline void
+otx2_ssogws_head_wait(struct otx2_ssogws *ws);
+
+static __rte_always_inline int
+otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event *ev,
+		  struct rte_mbuf *m, const struct otx2_eth_txq *txq,
+		  const uint32_t offload_flags)
+{
+	uint32_t dlen, rlen, desc_headroom, extend_head, extend_tail;
+	struct otx2_sec_session_ipsec_ip *sess;
+	struct otx2_ipsec_fp_out_hdr *hdr;
+	struct otx2_ipsec_fp_out_sa *sa;
+	uint64_t data_addr, desc_addr;
+	struct otx2_sec_session *priv;
+	struct otx2_cpt_inst_s inst;
+	uint64_t lmt_status;
+	char *data;
+
+	struct desc {
+		struct otx2_cpt_res cpt_res __rte_aligned(OTX2_CPT_RES_ALIGN);
+		struct nix_send_hdr_s nix_hdr
+				__rte_aligned(OTX2_NIX_SEND_DESC_ALIGN);
+		union nix_send_sg_s nix_sg;
+		struct nix_iova_s nix_iova;
+	} *sd;
+
+	priv = get_sec_session_private_data((void *)(m->udata64));
+	sess = &priv->ipsec.ip;
+	sa = &sess->out_sa;
+
+	RTE_ASSERT(sess->cpt_lmtline != NULL);
+	RTE_ASSERT(!(offload_flags & (NIX_TX_OFFLOAD_MBUF_NOFF_F |
+				      NIX_TX_OFFLOAD_VLAN_QINQ)));
+
+	dlen = rte_pktmbuf_pkt_len(m) + sizeof(*hdr) - RTE_ETHER_HDR_LEN;
+	rlen = otx2_ipsec_fp_out_rlen_get(sess, dlen - sizeof(*hdr));
+
+	RTE_BUILD_BUG_ON(OTX2_CPT_RES_ALIGN % OTX2_NIX_SEND_DESC_ALIGN);
+	RTE_BUILD_BUG_ON(sizeof(sd->cpt_res) % OTX2_NIX_SEND_DESC_ALIGN);
+
+	extend_head = sizeof(*hdr);
+	extend_tail = rlen - dlen;
+
+	desc_headroom = (OTX2_CPT_RES_ALIGN - 1) + sizeof(*sd);
+
+	if (unlikely(!rte_pktmbuf_is_contiguous(m)) ||
+	    unlikely(rte_pktmbuf_headroom(m) < extend_head + desc_headroom) ||
+	    unlikely(rte_pktmbuf_tailroom(m) < extend_tail)) {
+		goto drop;
+	}
+
+	/*
+	 * Extend mbuf data to point to the expected packet buffer for NIX.
+	 * This includes the Ethernet header followed by the encrypted IPsec
+	 * payload
+	 */
+	rte_pktmbuf_append(m, extend_tail);
+	data = rte_pktmbuf_prepend(m, extend_head);
+	data_addr = rte_pktmbuf_mtophys(m);
+
+	/*
+	 * Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior
+	 * to the IP header
+	 */
+	memcpy(data, data + sizeof(*hdr), RTE_ETHER_HDR_LEN);
+
+	hdr = (struct otx2_ipsec_fp_out_hdr *)(data + RTE_ETHER_HDR_LEN);
+
+	memcpy(hdr->iv, &sa->nonce, 4);
+	memset(hdr->iv + 4, 0, 12); //TODO: make it random
+
+	/* Keep CPT result and NIX send descriptors in headroom */
+	sd = (void *)RTE_PTR_ALIGN(data - desc_headroom, OTX2_CPT_RES_ALIGN);
+	desc_addr = data_addr - RTE_PTR_DIFF(data, sd);
+
+	/* Prepare CPT instruction */
+
+	inst.nixtx_addr = (desc_addr + offsetof(struct desc, nix_hdr)) >> 4;
+	inst.doneint = 0;
+	inst.nixtxl = 1;
+	inst.res_addr = desc_addr + offsetof(struct desc, cpt_res);
+	inst.u64[2] = 0;
+	inst.u64[3] = 0;
+	inst.wqe_ptr = desc_addr >> 3;	/* FIXME: Handle errors */
+	inst.qord = 1;
+	inst.opcode = OTX2_CPT_OP_INLINE_IPSEC_OUTB;
+	inst.dlen = dlen;
+	inst.dptr = data_addr + RTE_ETHER_HDR_LEN;
+	inst.u64[7] = sess->inst_w7;
+
+	/* First word contains 8 bit completion code & 8 bit uc comp code */
+	sd->cpt_res.u16[0] = 0;
+
+	/* Prepare NIX send descriptors for output expected from CPT */
+
+	sd->nix_hdr.w0.u = 0;
+	sd->nix_hdr.w1.u = 0;
+	sd->nix_hdr.w0.sq = txq->sq;
+	sd->nix_hdr.w0.sizem1 = 1;
+	sd->nix_hdr.w0.total = rte_pktmbuf_data_len(m);
+	sd->nix_hdr.w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
+
+	sd->nix_sg.u = 0;
+	sd->nix_sg.subdc = NIX_SUBDC_SG;
+	sd->nix_sg.ld_type = NIX_SENDLDTYPE_LDD;
+	sd->nix_sg.segs = 1;
+	sd->nix_sg.seg1_size = rte_pktmbuf_data_len(m);
+
+	sd->nix_iova.addr = rte_mbuf_data_iova(m);
+
+	/* Mark mempool object as "put" since it is freed by NIX */
+	__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+
+	if (!ev->sched_type)
+		otx2_ssogws_head_wait(ws);
+
+	inst.param1 = sess->esn_hi >> 16;
+	inst.param2 = sess->esn_hi & 0xffff;
+
+	hdr->seq = rte_cpu_to_be_32(sess->seq);
+	hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
+
+	sess->ip_id++;
+	sess->esn++;
+
+	rte_cio_wmb();
+
+	do {
+		otx2_lmt_mov(sess->cpt_lmtline, &inst, 2);
+		lmt_status = otx2_lmt_submit(sess->cpt_nq_reg);
+	} while (lmt_status == 0);
+
+	return 1;
+
+drop:
+	if (offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		/* Don't free if reference count > 1 */
+		if (rte_pktmbuf_prefree_seg(m) == NULL)
+			return 0;
+	}
+	rte_pktmbuf_free(m);
+	return 0;
+}
+
+#endif /* __OTX2_SECURITY_TX_H__ */
diff --git a/drivers/event/octeontx2/meson.build b/drivers/event/octeontx2/meson.build
index 56febb8..be4b47a 100644
--- a/drivers/event/octeontx2/meson.build
+++ b/drivers/event/octeontx2/meson.build
@@ -31,6 +31,7 @@  foreach flag: extra_flags
 	endif
 endforeach
 
-deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2']
+deps += ['bus_pci', 'common_octeontx2', 'cryptodev', 'mempool_octeontx2', 'pmd_octeontx2',
+         'security']
 
 includes += include_directories('../../crypto/octeontx2')
diff --git a/drivers/event/octeontx2/otx2_worker.h b/drivers/event/octeontx2/otx2_worker.h
index 7d161c8..c5ea4dd 100644
--- a/drivers/event/octeontx2/otx2_worker.h
+++ b/drivers/event/octeontx2/otx2_worker.h
@@ -10,6 +10,7 @@ 
 
 #include <otx2_common.h>
 #include "otx2_evdev.h"
+#include "otx2_security_tx.h"
 
 /* SSO Operations */
 
@@ -281,6 +282,11 @@  otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
 	const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
 
 	rte_prefetch_non_temporal(txq);
+
+	if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+	    (m->ol_flags & PKT_TX_SEC_OFFLOAD))
+		return otx2_sec_event_tx(ws, ev, m, txq, flags);
+
 	/* Perform header writes before barrier for TSO */
 	otx2_nix_xmit_prepare_tso(m, flags);
 	otx2_ssogws_order(ws, !ev->sched_type);