[dpdk-dev,v2] crypto/aesni_mb: process crypto op on dequeue

Message ID 20170330133453.1501-1-declan.doherty@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Pablo de Lara Guarch
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Doherty, Declan March 30, 2017, 1:34 p.m. UTC
moving the crypto processing from the enqueue burst to the dequeue burst,
to remove the requirement to continually call the
rte_cryptodev_burst_enqueue function to guarantee that all operations get
flushed from the multi-buffer managers buffers.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
v2 changes:
  - fixed all checkpatch issues
  - rebase to head of dpdk-next-crypto

drivers/crypto/aesni_mb/aesni_mb_ops.h             |   4 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 269 ++++++++++++---------
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  30 ++-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   8 +-
 4 files changed, 177 insertions(+), 134 deletions(-)
  

Comments

De Lara Guarch, Pablo March 30, 2017, 3:48 p.m. UTC | #1
> -----Original Message-----
> From: Doherty, Declan
> Sent: Thursday, March 30, 2017 2:35 PM
> To: dev@dpdk.org
> Cc: Doherty, Declan; De Lara Guarch, Pablo
> Subject: [PATCH v2] crypto/aesni_mb: process crypto op on dequeue
> 
> moving the crypto processing from the enqueue burst to the dequeue
> burst,
> to remove the requirement to continually call the
> rte_cryptodev_burst_enqueue function to guarantee that all operations get
> flushed from the multi-buffer managers buffers.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo
  

Patch

diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 2d41d73..3307c0b 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -67,7 +67,7 @@  typedef void (*aes_xcbc_expand_key_t)
 		(void *key, void *exp_k1, void *k2, void *k3);
 
 /** Multi-buffer library function pointer table */
-struct aesni_mb_ops {
+struct aesni_mb_op_fns {
 	struct {
 		init_mb_mgr_t init_mgr;
 		/**< Initialise scheduler  */
@@ -116,7 +116,7 @@  struct aesni_mb_ops {
 };
 
 
-static const struct aesni_mb_ops job_ops[] = {
+static const struct aesni_mb_op_fns job_ops[] = {
 		[RTE_AESNI_MB_NOT_SUPPORTED] = {
 			.job = {
 				NULL
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 05edb6c..e9daeae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -111,7 +111,7 @@  aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
 
 /** Set session authentication parameters */
 static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -181,7 +181,7 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
 
 /** Set session cipher parameters */
 static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -252,7 +252,7 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
 
 /** Parse crypto xform chain and set private session parameters */
 int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -309,16 +309,43 @@  aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
 	return 0;
 }
 
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct aesni_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+
+	return nb_enqueued;
+}
+
 /** Get multi buffer session */
-static struct aesni_mb_session *
+static inline struct aesni_mb_session *
 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 {
 	struct aesni_mb_session *sess = NULL;
 
 	if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
 		if (unlikely(op->sym->session->dev_type !=
-				RTE_CRYPTODEV_AESNI_MB_PMD))
+				RTE_CRYPTODEV_AESNI_MB_PMD)) {
 			return NULL;
+		}
 
 		sess = (struct aesni_mb_session *)op->sym->session->_private;
 	} else  {
@@ -330,7 +357,7 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 		sess = (struct aesni_mb_session *)
 			((struct rte_cryptodev_sym_session *)_sess)->_private;
 
-		if (unlikely(aesni_mb_set_session_parameters(qp->ops,
+		if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
 				sess, op->sym->xform) != 0)) {
 			rte_mempool_put(qp->sess_mp, _sess);
 			sess = NULL;
@@ -353,18 +380,20 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
  * - Completed JOB_AES_HMAC structure pointer on success
  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
  */
-static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
-		struct aesni_mb_session *session)
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+		struct rte_crypto_op *op)
 {
-	JOB_AES_HMAC *job;
-
 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	struct aesni_mb_session *session;
 	uint16_t m_offset = 0;
 
-	job = (*qp->ops->job.get_next)(&qp->mb_mgr);
-	if (unlikely(job == NULL))
-		return job;
+	session = get_session(qp, op);
+	if (session == NULL) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+	op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
 
 	/* Set crypto operation */
 	job->chain_order = session->chain_order;
@@ -399,7 +428,8 @@  process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
 		if (odata == NULL) {
 			MB_LOG_ERR("failed to allocate space in destination "
 					"mbuf for source data");
-			return NULL;
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
 		}
 
 		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
@@ -418,7 +448,8 @@  process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
 		if (job->auth_tag_output == NULL) {
 			MB_LOG_ERR("failed to allocate space in output mbuf "
 					"for temp digest");
-			return NULL;
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
 		}
 
 		memset(job->auth_tag_output, 0,
@@ -453,7 +484,22 @@  process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
 	job->user_data = op;
 	job->user_data2 = m_dst;
 
-	return job;
+	return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
+	struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
+
+	RTE_ASSERT(m_dst == NULL);
+
+	/* Verify digest if required */
+	if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+			job->auth_tag_output_len_in_bytes) != 0)
+		op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	/* trim area used for digest from mbuf */
+	rte_pktmbuf_trim(m_dst,	get_digest_byte_length(job->hash_alg));
 }
 
 /**
@@ -466,37 +512,31 @@  process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
  * verification of supplied digest in the case of a HASH_CIPHER operation
  * - Returns NULL on invalid job
  */
-static struct rte_crypto_op *
+static inline struct rte_crypto_op *
 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 {
-	struct rte_crypto_op *op =
-			(struct rte_crypto_op *)job->user_data;
-	struct rte_mbuf *m_dst =
-			(struct rte_mbuf *)job->user_data2;
+	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+
 	struct aesni_mb_session *sess;
 
-	if (op == NULL || m_dst == NULL)
-		return NULL;
+	RTE_ASSERT(op == NULL);
 
-	/* set status as successful by default */
-	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+		switch (job->status) {
+		case STS_COMPLETED:
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
-	/* check if job has been processed  */
-	if (unlikely(job->status != STS_COMPLETED)) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return op;
-	} else if (job->hash_alg != NULL_HASH) {
-		sess = (struct aesni_mb_session *)op->sym->session->_private;
-		if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
-			/* Verify digest if required */
-			if (memcmp(job->auth_tag_output,
-					op->sym->auth.digest.data,
-					job->auth_tag_output_len_in_bytes) != 0)
-				op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
-			/* trim area used for digest from mbuf */
-			rte_pktmbuf_trim(m_dst,
-					get_digest_byte_length(job->hash_alg));
+			if (job->hash_alg != NULL_HASH) {
+				sess = (struct aesni_mb_session *)
+						op->sym->session->_private;
+
+				if (sess->auth.operation ==
+						RTE_CRYPTO_AUTH_OP_VERIFY)
+					verify_digest(job, op);
+			}
+			break;
+		default:
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
 	}
 
@@ -520,115 +560,111 @@  post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
  * - Number of processed jobs
  */
 static unsigned
-handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rte_crypto_op *op = NULL;
 	unsigned processed_jobs = 0;
 
-	while (job) {
-		processed_jobs++;
+	while (job != NULL && processed_jobs < nb_ops) {
 		op = post_process_mb_job(qp, job);
-		if (op)
-			rte_ring_enqueue(qp->processed_ops, (void *)op);
-		else
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
 			qp->stats.dequeue_err_count++;
-		job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
+			break;
+		}
+
+		job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
 	}
 
 	return processed_jobs;
 }
 
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+	if (job)
+		processed_ops += handle_completed_jobs(qp, job,
+				&ops[processed_ops], nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job)
+{
+	job->chain_order = HASH_CIPHER;
+	job->cipher_mode = NULL_CIPHER;
+	job->hash_alg = NULL_HASH;
+	job->cipher_direction = DECRYPT;
+
+	return job;
+}
+
 static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	struct aesni_mb_session *sess;
 	struct aesni_mb_qp *qp = queue_pair;
 
-	JOB_AES_HMAC *job = NULL;
+	struct rte_crypto_op *op;
+	JOB_AES_HMAC *job;
 
-	int i, processed_jobs = 0;
+	int retval, processed_jobs = 0;
 
-	for (i = 0; i < nb_ops; i++) {
-#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
-		if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-			MB_LOG_ERR("PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", ops[i]);
-			qp->stats.enqueue_err_count++;
-			goto flush_jobs;
-		}
+	do {
+		/* Get next operation to process from ingress queue */
+		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (retval < 0)
+			break;
 
-		if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
-				(ops[i]->sym->m_dst != NULL &&
-				!rte_pktmbuf_is_contiguous(
-						ops[i]->sym->m_dst))) {
-			MB_LOG_ERR("PMD supports only contiguous mbufs, "
-				"op (%p) provides noncontiguous mbuf as "
-				"source/destination buffer.\n", ops[i]);
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->stats.enqueue_err_count++;
-			goto flush_jobs;
-		}
-#endif
+		/* Get next free mb job struct from mb manager */
+		job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += flush_mb_mgr(qp,
+					&ops[processed_jobs],
+					(nb_ops - processed_jobs) - 1);
 
-		sess = get_session(qp, ops[i]);
-		if (unlikely(sess == NULL)) {
-			qp->stats.enqueue_err_count++;
-			goto flush_jobs;
+			job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
 		}
 
-		job = process_crypto_op(qp, ops[i], sess);
-		if (unlikely(job == NULL)) {
-			qp->stats.enqueue_err_count++;
-			goto flush_jobs;
+		retval = set_mb_job_params(job, qp, op);
+		if (unlikely(retval != 0)) {
+			qp->stats.dequeue_err_count++;
+			set_job_null_op(job);
 		}
 
-		/* Submit Job */
-		job = (*qp->ops->job.submit)(&qp->mb_mgr);
+		/* Submit job to multi-buffer for processing */
+		job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
 
 		/*
 		 * If submit returns a processed job then handle it,
 		 * before submitting subsequent jobs
 		 */
 		if (job)
-			processed_jobs += handle_completed_jobs(qp, job);
-	}
-
-	if (processed_jobs == 0)
-		goto flush_jobs;
-	else
-		qp->stats.enqueued_count += processed_jobs;
-	return i;
-
-flush_jobs:
-	/*
-	 * If we haven't processed any jobs in submit loop, then flush jobs
-	 * queue to stop the output stalling
-	 */
-	job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
-	if (job)
-		qp->stats.enqueued_count += handle_completed_jobs(qp, job);
-
-	return i;
-}
-
-static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct aesni_mb_qp *qp = queue_pair;
+			processed_jobs += handle_completed_jobs(qp, job,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
 
-	unsigned nb_dequeued;
+	} while (processed_jobs < nb_ops);
 
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
-			(void **)ops, nb_ops, NULL);
-	qp->stats.dequeued_count += nb_dequeued;
+	if (processed_jobs < 1)
+		processed_jobs += flush_mb_mgr(qp,
+				&ops[processed_jobs],
+				nb_ops - processed_jobs);
 
-	return nb_dequeued;
+	return processed_jobs;
 }
 
-
 static int cryptodev_aesni_mb_remove(const char *name);
 
 static int
@@ -714,7 +750,6 @@  cryptodev_aesni_mb_create(struct rte_crypto_vdev_init_params *init_params)
 	return -EFAULT;
 }
 
-
 static int
 cryptodev_aesni_mb_probe(const char *name,
 		const char *input_args)
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d2bae19..ec13c56 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -343,24 +343,32 @@  aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
 /** Create a ring to place processed operations on */
 static struct rte_ring *
 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
-		unsigned ring_size, int socket_id)
+		const char *str, unsigned int ring_size, int socket_id)
 {
 	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_LEN];
 
-	r = rte_ring_lookup(qp->name);
+	unsigned int n = snprintf(ring_name, sizeof(ring_name),
+				"%s_%s",
+				qp->name, str);
+
+	if (n > sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
 	if (r) {
 		if (rte_ring_get_size(r) >= ring_size) {
 			MB_LOG_INFO("Reusing existing ring %s for processed ops",
-					 qp->name);
+			ring_name);
 			return r;
 		}
 
 		MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
-				 qp->name);
+			ring_name);
 		return NULL;
 	}
 
-	return rte_ring_create(qp->name, ring_size, socket_id,
+	return rte_ring_create(ring_name, ring_size, socket_id,
 			RING_F_SP_ENQ | RING_F_SC_DEQ);
 }
 
@@ -389,11 +397,12 @@  aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
 		goto qp_setup_cleanup;
 
-	qp->ops = &job_ops[internals->vector_mode];
 
-	qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_ops == NULL)
+	qp->op_fns = &job_ops[internals->vector_mode];
+
+	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+			"ingress", qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL)
 		goto qp_setup_cleanup;
 
 	qp->sess_mp = dev->data->session_pool;
@@ -401,8 +410,7 @@  aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	memset(&qp->stats, 0, sizeof(qp->stats));
 
 	/* Initialise multi-buffer manager */
-	(*qp->ops->job.init_mgr)(&qp->mb_mgr);
-
+	(*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
 	return 0;
 
 qp_setup_cleanup:
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index eb4b2ad..0d82699 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -151,12 +151,12 @@  struct aesni_mb_qp {
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_LEN];
 	/**< Unique Queue Pair Name */
-	const struct aesni_mb_ops *ops;
+	const struct aesni_mb_op_fns *op_fns;
 	/**< Vector mode dependent pointer table of the multi-buffer APIs */
 	MB_MGR mb_mgr;
 	/**< Multi-buffer instance */
-	struct rte_ring *processed_ops;
-	/**< Ring for placing process operations */
+	struct rte_ring *ingress_queue;
+       /**< Ring for placing operations ready for processing */
 	struct rte_mempool *sess_mp;
 	/**< Session Mempool */
 	struct rte_cryptodev_stats stats;
@@ -227,7 +227,7 @@  struct aesni_mb_session {
  *
  */
 extern int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);