[v3,1/2] crypto/dpaa_sec: support event crypto adapter

Message ID 20191004112659.32394-2-akhil.goyal@nxp.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series dpaa_sec: support event crypto adapter |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail Compilation issues

Commit Message

Akhil Goyal Oct. 4, 2019, 11:26 a.m. UTC
  dpaa_sec hw queues can be attached to a hw dpaa event
device and the application can configure the event
crypto adapter to access the dpaa_sec packets using
hardware events.
This patch defines APIs which can be used by the
dpaa event device to attach/detach dpaa_sec queues.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/bus/dpaa/base/qbman/qman.c            |   9 +-
 drivers/bus/dpaa/include/fsl_qman.h           |   2 +-
 drivers/crypto/dpaa_sec/Makefile              |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c            | 202 +++++++++++++++++-
 drivers/crypto/dpaa_sec/dpaa_sec_event.h      |  19 ++
 .../dpaa_sec/rte_pmd_dpaa_sec_version.map     |   8 +
 6 files changed, 233 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/dpaa_sec/dpaa_sec_event.h
  

Patch

diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index c6f7d7bb3..e43fc65ef 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -2286,7 +2286,7 @@  int qman_enqueue_multi(struct qman_fq *fq,
 
 int
 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
-		      int frames_to_send)
+		      u32 *flags, int frames_to_send)
 {
 	struct qman_portal *p = get_affine_portal();
 	struct qm_portal *portal = &p->p;
@@ -2294,7 +2294,7 @@  qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 	register struct qm_eqcr *eqcr = &portal->eqcr;
 	struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
 
-	u8 i, diff, old_ci, sent = 0;
+	u8 i = 0, diff, old_ci, sent = 0;
 
 	/* Update the available entries if no entry is free */
 	if (!eqcr->available) {
@@ -2313,6 +2313,11 @@  qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
 		eq->fd.addr = cpu_to_be40(fd->addr);
 		eq->fd.status = cpu_to_be32(fd->status);
 		eq->fd.opaque = cpu_to_be32(fd->opaque);
+		if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+			eq->dca = QM_EQCR_DCA_ENABLE |
+				((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+		}
+		i++;
 
 		eq = (void *)((unsigned long)(eq + 1) &
 			(~(unsigned long)(QM_EQCR_SIZE << 6)));
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index e5cccbbea..29fb2eb9d 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1773,7 +1773,7 @@  int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
  */
 int
 qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
-		      int frames_to_send);
+		      u32 *flags, int frames_to_send);
 
 typedef int (*qman_cb_precommit) (void *arg);
 
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index 1d8b7bec1..353c2549f 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -16,6 +16,7 @@  CFLAGS += $(WERROR_FLAGS)
 
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/
 #sharing the hw flib headers from dpaa2_sec pmd
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index ea0b2054a..38cfdd378 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -37,6 +37,7 @@ 
 
 #include <rte_dpaa_bus.h>
 #include <dpaa_sec.h>
+#include <dpaa_sec_event.h>
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
@@ -61,9 +62,6 @@  dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 	}
-
-	/* report op status to sym->op and then free the ctx memory  */
-	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
 }
 
 static inline struct dpaa_sec_op_ctx *
@@ -1756,7 +1754,7 @@  dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 	struct rte_crypto_op *op;
 	struct dpaa_sec_job *cf;
 	dpaa_sec_session *ses;
-	uint32_t auth_only_len;
+	uint32_t auth_only_len, index, flags[DPAA_SEC_BURST] = {0};
 	struct qman_fq *inq[DPAA_SEC_BURST];
 
 	while (nb_ops) {
@@ -1764,6 +1762,18 @@  dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 				DPAA_SEC_BURST : nb_ops;
 		for (loop = 0; loop < frames_to_send; loop++) {
 			op = *(ops++);
+			if (op->sym->m_src->seqn != 0) {
+				index = op->sym->m_src->seqn - 1;
+				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
+					/* QM_EQCR_DCA_IDXMASK = 0x0f */
+					flags[loop] = ((index & 0x0f) << 8);
+					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
+					DPAA_PER_LCORE_DQRR_SIZE--;
+					DPAA_PER_LCORE_DQRR_HELD &=
+								~(1 << index);
+				}
+			}
+
 			switch (op->sess_type) {
 			case RTE_CRYPTO_OP_WITH_SESSION:
 				ses = (dpaa_sec_session *)
@@ -1882,7 +1892,7 @@  dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		loop = 0;
 		while (loop < frames_to_send) {
 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
-					frames_to_send - loop);
+					&flags[loop], frames_to_send - loop);
 		}
 		nb_ops -= frames_to_send;
 		num_tx += frames_to_send;
@@ -2679,6 +2689,188 @@  dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
 	}
 }
 
+static enum qman_cb_dqrr_result
+dpaa_sec_process_parallel_event(void *event,
+			struct qman_portal *qm __always_unused,
+			struct qman_fq *outq,
+			const struct qm_dqrr_entry *dqrr,
+			void **bufs)
+{
+	const struct qm_fd *fd;
+	struct dpaa_sec_job *job;
+	struct dpaa_sec_op_ctx *ctx;
+	struct rte_event *ev = (struct rte_event *)event;
+
+	fd = &dqrr->fd;
+
+	/* sg is embedded in an op ctx,
+	 * sg[0] is for output
+	 * sg[1] for input
+	 */
+	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
+	if (!ctx->fd_status) {
+		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	} else {
+		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+	}
+	ev->event_ptr = (void *)ctx->op;
+
+	ev->flow_id = outq->ev.flow_id;
+	ev->sub_event_type = outq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = outq->ev.sched_type;
+	ev->queue_id = outq->ev.queue_id;
+	ev->priority = outq->ev.priority;
+	*bufs = (void *)ctx->op;
+
+	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+	return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+dpaa_sec_process_atomic_event(void *event,
+			struct qman_portal *qm __rte_unused,
+			struct qman_fq *outq,
+			const struct qm_dqrr_entry *dqrr,
+			void **bufs)
+{
+	u8 index;
+	const struct qm_fd *fd;
+	struct dpaa_sec_job *job;
+	struct dpaa_sec_op_ctx *ctx;
+	struct rte_event *ev = (struct rte_event *)event;
+
+	fd = &dqrr->fd;
+
+	/* sg is embedded in an op ctx,
+	 * sg[0] is for output
+	 * sg[1] for input
+	 */
+	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
+	if (!ctx->fd_status) {
+		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	} else {
+		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+	}
+	ev->event_ptr = (void *)ctx->op;
+	ev->flow_id = outq->ev.flow_id;
+	ev->sub_event_type = outq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = outq->ev.sched_type;
+	ev->queue_id = outq->ev.queue_id;
+	ev->priority = outq->ev.priority;
+
+	/* Save active dqrr entries */
+	index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
+	DPAA_PER_LCORE_DQRR_SIZE++;
+	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
+	DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
+	ev->impl_opaque = index + 1;
+	ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
+	*bufs = (void *)ctx->op;
+
+	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+	return qman_cb_dqrr_defer;
+}
+
+int
+dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
+		int qp_id,
+		uint16_t ch_id,
+		const struct rte_event *event)
+{
+	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+	struct qm_mcc_initfq opts = {0};
+
+	int ret;
+
+	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+	opts.fqd.dest.channel = ch_id;
+
+	switch (event->sched_type) {
+	case RTE_SCHED_TYPE_ATOMIC:
+		opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+		/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+		 * configuration with HOLD_ACTIVE setting
+		 */
+		opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
+		break;
+	case RTE_SCHED_TYPE_ORDERED:
+		DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
+		return -1;
+	default:
+		opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+		qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
+		break;
+	}
+
+	ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
+	if (unlikely(ret)) {
+		DPAA_SEC_ERR("unable to init caam source fq!");
+		return ret;
+	}
+
+	memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
+
+	return 0;
+}
+
+int
+dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
+			int qp_id)
+{
+	struct qm_mcc_initfq opts = {0};
+	int ret;
+	struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+
+	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+	qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
+	qp->outq.cb.ern  = ern_sec_fq_handler;
+	qman_retire_fq(&qp->outq, NULL);
+	qman_oos_fq(&qp->outq);
+	ret = qman_init_fq(&qp->outq, 0, &opts);
+	if (ret)
+		RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
+	qp->outq.cb.dqrr = NULL;
+
+	return ret;
+}
+
 static struct rte_cryptodev_ops crypto_ops = {
 	.dev_configure	      = dpaa_sec_dev_configure,
 	.dev_start	      = dpaa_sec_dev_start,
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_event.h b/drivers/crypto/dpaa_sec/dpaa_sec_event.h
new file mode 100644
index 000000000..8d1a01809
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_event.h
@@ -0,0 +1,19 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 NXP
+ *
+ */
+
+#ifndef _DPAA_SEC_EVENT_H_
+#define _DPAA_SEC_EVENT_H_
+
+int
+dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
+		int qp_id,
+		uint16_t ch_id,
+		const struct rte_event *event);
+
+int
+dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
+		int qp_id);
+
+#endif /* _DPAA_SEC_EVENT_H_ */
diff --git a/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
index a70bd197b..cc7f2162e 100644
--- a/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
+++ b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
@@ -2,3 +2,11 @@  DPDK_17.11 {
 
 	local: *;
 };
+
+DPDK_19.11 {
+	global:
+
+	dpaa_sec_eventq_attach;
+	dpaa_sec_eventq_detach;
+
+} DPDK_17.11;