[2/3] crypto/dpaa2_sec: support for atomic queues

Message ID 20180914114810.14960-3-akhil.goyal@nxp.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series dpaa2: support for crypto adapter |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Akhil Goyal Sept. 14, 2018, 11:48 a.m. UTC
  From: Ashish Jain <ashish.jain@nxp.com>

Signed-off-by: Ashish Jain <ashish.jain@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 57 +++++++++++++++++++--
 1 file changed, 52 insertions(+), 5 deletions(-)
  

Patch

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index ae38f507b..781595b38 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1209,6 +1209,7 @@  dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
 	struct qbman_swp *swp;
 	uint16_t num_tx = 0;
+	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
 	/*todo - need to support multiple buffer pools */
 	uint16_t bpid;
 	struct rte_mempool *mb_pool;
@@ -1240,6 +1241,15 @@  dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			dpaa2_eqcr_size : nb_ops;
 
 		for (loop = 0; loop < frames_to_send; loop++) {
+			if ((*ops)->sym->m_src->seqn) {
+			 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+			 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+			 DPAA2_PER_LCORE_DQRR_SIZE--;
+			 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+			 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+			}
+
 			/*Clear the unused FD fields before sending*/
 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
 			mb_pool = (*ops)->sym->m_src->pool;
@@ -1256,7 +1266,7 @@  dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		while (loop < frames_to_send) {
 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
 							&fd_arr[loop],
-							NULL,
+							&flags[loop],
 							frames_to_send - loop);
 		}
 
@@ -1281,6 +1291,9 @@  sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
 
+	diff = len - mbuf->pkt_len;
+	mbuf->pkt_len += diff;
+	mbuf->data_len += diff;
 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
 	op->sym->aead.digest.phys_addr = 0L;
@@ -1291,9 +1304,6 @@  sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
 	else
 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
-	diff = len - mbuf->pkt_len;
-	mbuf->pkt_len += diff;
-	mbuf->data_len += diff;
 
 	return op;
 }
@@ -2879,6 +2889,38 @@  dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
 
 	qbman_swp_dqrr_consume(swp, dq);
 }
+static void
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+				 const struct qbman_fd *fd,
+				 const struct qbman_result *dq,
+				 struct dpaa2_queue *rxq,
+				 struct rte_event *ev)
+{
+	uint8_t dqrr_index;
+	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+	/* Prefetching mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+	ev->flow_id = rxq->ev.flow_id;
+	ev->sub_event_type = rxq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = rxq->ev.sched_type;
+	ev->queue_id = rxq->ev.queue_id;
+	ev->priority = rxq->ev.priority;
+
+	ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+				(rxq->dev))->driver_id);
+	dqrr_index = qbman_get_dqrr_idx(dq);
+	crypto_op->sym->m_src->seqn = dqrr_index + 1;
+	DPAA2_PER_LCORE_DQRR_SIZE++;
+	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+}
 
 int
 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
@@ -2894,6 +2936,8 @@  dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 
 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
+	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
+		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
 	else
 		return -EINVAL;
 
@@ -2905,7 +2949,10 @@  dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 
 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
 	cfg.user_ctx = (size_t)(qp);
-
+	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+		cfg.order_preservation_en = 1;
+	}
 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				  qp_id, &cfg);
 	if (ret) {