[v4,2/8] crypto/aesni_gcm: cpu crypto support

Message ID 20200128031642.15256-3-marcinx.smoczynski@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series Introduce CPU crypto mode |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Marcin Smoczynski Jan. 28, 2020, 3:16 a.m. UTC
  Add support for CPU crypto mode by introducing required handler.
Crypto mode (sync/async) is chosen during sym session create if an
appropriate flag is set in an xform type number.

Authenticated encryption and decryption are supported with tag
generation/verification.

Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>
---
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h      |   9 +
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c      | 220 +++++++++++++++++-
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c  |   3 +
 .../crypto/aesni_gcm/aesni_gcm_pmd_private.h  |  11 +-
 4 files changed, 237 insertions(+), 6 deletions(-)
  

Comments

De Lara Guarch, Pablo Jan. 28, 2020, 10:49 a.m. UTC | #1
> -----Original Message-----
> From: Smoczynski, MarcinX <marcinx.smoczynski@intel.com>
> Sent: Tuesday, January 28, 2020 3:17 AM
> To: akhil.goyal@nxp.com; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Zhang, Roy Fan <roy.fan.zhang@intel.com>;
> Doherty, Declan <declan.doherty@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; Smoczynski, MarcinX <marcinx.smoczynski@intel.com>
> Subject: [PATCH v4 2/8] crypto/aesni_gcm: cpu crypto support
> 
> Add support for CPU crypto mode by introducing required handler.
> Crypto mode (sync/async) is chosen during sym session create if an appropriate
> flag is set in an xform type number.
> 
> Authenticated encryption and decryption are supported with tag
> generation/verification.
> 
> Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>

...

> @@ -331,9 +331,12 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
>  		.queue_pair_release	= aesni_gcm_pmd_qp_release,
>  		.queue_pair_count	= aesni_gcm_pmd_qp_count,
> 
> +		.sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,
> +
>  		.sym_session_get_size	=
> aesni_gcm_pmd_sym_session_get_size,
>  		.sym_session_configure	=
> aesni_gcm_pmd_sym_session_configure,
>  		.sym_session_clear	= aesni_gcm_pmd_sym_session_clear
>  };
> 
>  struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
> +

Minor thing, but you should remove this blank line.

Apart from that:

Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
  

Patch

diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index e272f1067..404c0adff 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -65,4 +65,13 @@  struct aesni_gcm_ops {
 	aesni_gcm_finalize_t finalize_dec;
 };
 
+/** GCM per-session operation handlers */
+struct aesni_gcm_session_ops {
+	aesni_gcm_t cipher;
+	aesni_gcm_pre_t pre;
+	aesni_gcm_init_t init;
+	aesni_gcm_update_t update;
+	aesni_gcm_finalize_t finalize;
+};
+
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1a03be31d..9901c811b 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -15,6 +15,31 @@ 
 
 static uint8_t cryptodev_driver_id;
 
+/* setup session handlers */
+static void
+set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
+{
+	s->ops.pre = gcm_ops->pre;
+	s->ops.init = gcm_ops->init;
+
+	switch (s->op) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		s->ops.cipher = gcm_ops->enc;
+		s->ops.update = gcm_ops->update_enc;
+		s->ops.finalize = gcm_ops->finalize_enc;
+		break;
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		s->ops.cipher = gcm_ops->dec;
+		s->ops.update = gcm_ops->update_dec;
+		s->ops.finalize = gcm_ops->finalize_dec;
+		break;
+	case AESNI_GMAC_OP_GENERATE:
+	case AESNI_GMAC_OP_VERIFY:
+		s->ops.finalize = gcm_ops->finalize_enc;
+		break;
+	}
+}
+
 /** Parse crypto xform chain and set private session parameters */
 int
 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
@@ -65,6 +90,7 @@  aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
 		/* Select Crypto operation */
 		if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
 			sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+		/* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
 		else
 			sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
 
@@ -78,7 +104,6 @@  aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
 		return -ENOTSUP;
 	}
 
-
 	/* IV check */
 	if (sess->iv.length != 16 && sess->iv.length != 12 &&
 			sess->iv.length != 0) {
@@ -102,6 +127,10 @@  aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
 		return -EINVAL;
 	}
 
+	/* setup session handlers */
+	set_func_ops(sess, &gcm_ops[sess->key]);
+
+	/* pre-generate key */
 	gcm_ops[sess->key].pre(key, &sess->gdata_key);
 
 	/* Digest check */
@@ -356,6 +385,191 @@  process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
+{
+	uint32_t i;
+
+	for (i = 0; i < vec->num; i++)
+		vec->status[i] = errnum;
+}
+
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+	if (s->req_digest_length != s->gen_digest_length) {
+		uint8_t tmpdigest[s->gen_digest_length];
+
+		s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+			s->gen_digest_length);
+		memcpy(digest, tmpdigest, s->req_digest_length);
+	} else {
+		s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
+			s->gen_digest_length);
+	}
+
+	return 0;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+	uint8_t tmpdigest[s->gen_digest_length];
+
+	s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+		s->gen_digest_length);
+
+	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
+		EBADMSG;
+}
+
+static inline void
+aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
+	void *iv, void *aad)
+{
+	uint32_t i;
+
+	/* init crypto operation */
+	s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
+		(uint64_t)s->aad_length);
+
+	/* update with sgl data */
+	for (i = 0; i < sgl->num; i++) {
+		struct rte_crypto_vec *vec = &sgl->vec[i];
+
+		s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
+			vec->len);
+	}
+}
+
+static inline void
+aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
+	void *iv)
+{
+	s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
+		sgl->vec[0].len);
+}
+
+static inline uint32_t
+aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
+			&vec->sgl[i], vec->iv[i], vec->aad[i]);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
+			gdata_ctx, vec->digest[i]);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
+			&vec->sgl[i], vec->iv[i], vec->aad[i]);
+		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
+			gdata_ctx, vec->digest[i]);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
+			&vec->sgl[i], vec->iv[i]);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
+			gdata_ctx, vec->digest[i]);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
+	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
+			&vec->sgl[i], vec->iv[i]);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
+			gdata_ctx, vec->digest[i]);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+/** Process CPU crypto bulk operations */
+uint32_t
+aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
+	struct rte_cryptodev_sym_session *sess,
+	__rte_unused union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_sym_vec *vec)
+{
+	void *sess_priv;
+	struct aesni_gcm_session *s;
+	struct gcm_context_data gdata_ctx;
+
+	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+	if (unlikely(sess_priv == NULL)) {
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+
+	s = sess_priv;
+	switch (s->op) {
+	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
+	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
+	case AESNI_GMAC_OP_GENERATE:
+		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
+	case AESNI_GMAC_OP_VERIFY:
+		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
+	default:
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+}
+
 /**
  * Process a completed job and return rte_mbuf which job processed
  *
@@ -527,7 +741,8 @@  aesni_gcm_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO;
 
 	/* Check CPU for support for AES instruction set */
 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
@@ -672,7 +887,6 @@  RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
 RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
 		cryptodev_driver_id);
 
-
 RTE_INIT(aesni_gcm_init_log)
 {
 	aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 2f66c7c58..5228d98b1 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -331,9 +331,12 @@  struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
 		.queue_pair_release	= aesni_gcm_pmd_qp_release,
 		.queue_pair_count	= aesni_gcm_pmd_qp_count,
 
+		.sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,
+
 		.sym_session_get_size	= aesni_gcm_pmd_sym_session_get_size,
 		.sym_session_configure	= aesni_gcm_pmd_sym_session_configure,
 		.sym_session_clear	= aesni_gcm_pmd_sym_session_clear
 };
 
 struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
+
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 2039adb53..1823a9997 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -92,6 +92,8 @@  struct aesni_gcm_session {
 	/**< GCM key type */
 	struct gcm_key_data gdata_key;
 	/**< GCM parameters */
+	struct aesni_gcm_session_ops ops;
+	/**< Session handlers */
 };
 
 
@@ -109,10 +111,13 @@  aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
 		struct aesni_gcm_session *sess,
 		const struct rte_crypto_sym_xform *xform);
 
-
-/**
- * Device specific operations function pointer structure */
+/* Device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
 
+/** CPU crypto bulk process handler */
+uint32_t
+aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
+	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_sym_vec *vec);
 
 #endif /* _AESNI_GCM_PMD_PRIVATE_H_ */