[v5,4/4] crypto/ipsec_mb: unified IPsec MB interface

Message ID 20240305174227.1785111-4-brian.dooley@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series [v5,1/4] crypto/ipsec_mb: bump minimum IPsec Multi-buffer version |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/intel-Functional success Functional PASS
ci/github-robot: build fail github build: failed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-compile-arm64-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS

Commit Message

Brian Dooley March 5, 2024, 5:42 p.m. UTC
  Currently IPsec MB provides both the JOB API and direct API.
AESNI_MB PMD is using the JOB API codepath while ZUC, KASUMI, SNOW3G
and CHACHA20_POLY1305 are using the direct API.
Instead of using the direct API for these PMDs, they should now make
use of the JOB API codepath. This would remove all use of the IPsec MB
direct API for these PMDs.

Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
Acked-by: Wathsala Vithanage <wathsala.vithanage@arm.com>
---
v5:
- Rebased and added patchset
v4:
- Keep AES GCM PMD and fix extern issue
v3:
- Remove session configure pointer for each PMD
v2:
- Fix compilation failure
---
 doc/guides/rel_notes/release_24_03.rst        |   3 +
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c        |   8 +-
 drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h   |  15 +-
 drivers/crypto/ipsec_mb/pmd_chacha_poly.c     | 338 +----------
 .../crypto/ipsec_mb/pmd_chacha_poly_priv.h    |  28 -
 drivers/crypto/ipsec_mb/pmd_kasumi.c          | 410 +------------
 drivers/crypto/ipsec_mb/pmd_kasumi_priv.h     |  20 -
 drivers/crypto/ipsec_mb/pmd_snow3g.c          | 543 +-----------------
 drivers/crypto/ipsec_mb/pmd_snow3g_priv.h     |  21 -
 drivers/crypto/ipsec_mb/pmd_zuc.c             | 347 +----------
 drivers/crypto/ipsec_mb/pmd_zuc_priv.h        |  20 -
 11 files changed, 44 insertions(+), 1709 deletions(-)
  

Comments

Patrick Robb March 15, 2024, 6:25 p.m. UTC | #1
Recheck-request: iol-unit-arm64-testing

Even though ipsec update is postponed to a later release, I'm putting
in rechecks for all series that have fails for the arm crypto tests
now that we are building from SECLIB-IPSEC-2024.03.12.
  

Patch

diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 8fa8cf1dd6..a4309311d4 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -147,6 +147,9 @@  New Features
 * **Updated ipsec_mb crypto driver.**
 
   * Bump minimum IPSec Multi-buffer version to 1.4 for SW PMDs.
+  * Kasumi, Snow3G, ChaChaPoly and ZUC PMDs now share the job API codepath
+    with AESNI_MB PMD. Depending on the architecture, the performance of ZUC
+    crypto PMD is approximately 10% less for small fixed packet sizes.
 
 * **Updated Marvell cnxk crypto driver.**
 
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
index 92703a76f0..35bd7eaa51 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -8,6 +8,8 @@ 
 
 RTE_DEFINE_PER_LCORE(pid_t, pid);
 
+uint8_t pmd_driver_id_aesni_mb;
+
 struct aesni_mb_op_buf_data {
 	struct rte_mbuf *m;
 	uint32_t offset;
@@ -692,7 +694,7 @@  aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
 }
 
 /** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
+int
 aesni_mb_session_configure(IMB_MGR *mb_mgr,
 		void *priv_sess,
 		const struct rte_crypto_sym_xform *xform)
@@ -2039,7 +2041,7 @@  set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
 	return job;
 }
 
-static uint16_t
+uint16_t
 aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
@@ -2227,7 +2229,7 @@  verify_sync_dgst(struct rte_crypto_sym_vec *vec,
 	return k;
 }
 
-static uint32_t
+uint32_t
 aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
 	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
 	struct rte_crypto_sym_vec *vec)
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
index 51cfd7e2aa..4805627679 100644
--- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h
@@ -19,6 +19,19 @@ 
 
 #define MAX_NUM_SEGS 16
 
+int
+aesni_mb_session_configure(IMB_MGR * m __rte_unused, void *priv_sess,
+		const struct rte_crypto_sym_xform *xform);
+
+uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
+	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+	struct rte_crypto_sym_vec *vec);
+
 static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
 	{	/* MD5 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -715,8 +728,6 @@  static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-uint8_t pmd_driver_id_aesni_mb;
-
 struct aesni_mb_qp_data {
 	uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
 	/* *< Buffers used to store the digest generated
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
index 97e7cef233..7436353fc2 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -3,334 +3,7 @@ 
  */
 
 #include "pmd_chacha_poly_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
-		void *priv_sess, const struct rte_crypto_sym_xform *xform)
-{
-	struct chacha20_poly1305_session *sess = priv_sess;
-	const struct rte_crypto_sym_xform *auth_xform;
-	const struct rte_crypto_sym_xform *cipher_xform;
-	const struct rte_crypto_sym_xform *aead_xform;
-
-	uint8_t key_length;
-	const uint8_t *key;
-	enum ipsec_mb_operation mode;
-	int ret = 0;
-
-	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
-				&cipher_xform, &aead_xform);
-	if (ret)
-		return ret;
-
-	sess->op = mode;
-
-	switch (sess->op) {
-	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
-	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
-		if (aead_xform->aead.algo !=
-				RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
-			IPSEC_MB_LOG(ERR,
-			"The only combined operation supported is CHACHA20 POLY1305");
-			ret = -ENOTSUP;
-			goto error_exit;
-		}
-		/* Set IV parameters */
-		sess->iv.offset = aead_xform->aead.iv.offset;
-		sess->iv.length = aead_xform->aead.iv.length;
-		key_length = aead_xform->aead.key.length;
-		key = aead_xform->aead.key.data;
-		sess->aad_length = aead_xform->aead.aad_length;
-		sess->req_digest_length = aead_xform->aead.digest_length;
-		break;
-	default:
-		IPSEC_MB_LOG(
-		    ERR, "Wrong xform type, has to be AEAD or authentication");
-		ret = -ENOTSUP;
-		goto error_exit;
-	}
-
-	/* IV check */
-	if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
-		sess->iv.length != 0) {
-		IPSEC_MB_LOG(ERR, "Wrong IV length");
-		ret = -EINVAL;
-		goto error_exit;
-	}
-
-	/* Check key length */
-	if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
-		IPSEC_MB_LOG(ERR, "Invalid key length");
-		ret = -EINVAL;
-		goto error_exit;
-	} else {
-		memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
-	}
-
-	/* Digest check */
-	if (sess->req_digest_length !=  CHACHA20_POLY1305_DIGEST_LENGTH) {
-		IPSEC_MB_LOG(ERR, "Invalid digest length");
-		ret = -EINVAL;
-		goto error_exit;
-	} else {
-		sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
-	}
-
-error_exit:
-	return ret;
-}
-
-/**
- * Process a crypto operation, calling
- * the direct chacha poly API from the multi buffer library.
- *
- * @param	qp		queue pair
- * @param	op		symmetric crypto operation
- * @param	session		chacha poly session
- *
- * @return
- * - Return 0 if success
- */
-static int
-chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
-		struct chacha20_poly1305_session *session)
-{
-	struct chacha20_poly1305_qp_data *qp_data =
-					ipsec_mb_get_qp_private_data(qp);
-	uint8_t *src, *dst;
-	uint8_t *iv_ptr;
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	struct rte_mbuf *m_src = sym_op->m_src;
-	uint32_t offset, data_offset, data_length;
-	uint32_t part_len, data_len;
-	int total_len;
-	uint8_t *tag;
-	unsigned int oop = 0;
-
-	offset = sym_op->aead.data.offset;
-	data_offset = offset;
-	data_length = sym_op->aead.data.length;
-	RTE_ASSERT(m_src != NULL);
-
-	while (offset >= m_src->data_len && data_length != 0) {
-		offset -= m_src->data_len;
-		m_src = m_src->next;
-
-		RTE_ASSERT(m_src != NULL);
-	}
-
-	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
-	data_len = m_src->data_len - offset;
-	part_len = (data_len < data_length) ? data_len :
-			data_length;
-
-	/* In-place */
-	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
-		dst = src;
-	/* Out-of-place */
-	else {
-		oop = 1;
-		/* Segmented destination buffer is not supported
-		 * if operation is Out-of-place
-		 */
-		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
-		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
-					data_offset);
-	}
-
-	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->iv.offset);
-
-	IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
-				&qp_data->chacha20_poly1305_ctx_data,
-				iv_ptr,	sym_op->aead.aad.data,
-				(uint64_t)session->aad_length);
-
-	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
-		IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
-				session->key,
-				&qp_data->chacha20_poly1305_ctx_data,
-				dst, src, (uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			if (dst == NULL || src == NULL) {
-				IPSEC_MB_LOG(ERR, "Invalid src or dst input");
-				return -EINVAL;
-			}
-			IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
-					session->key,
-					&qp_data->chacha20_poly1305_ctx_data,
-					dst, src, (uint64_t)part_len);
-			total_len -= part_len;
-			if (total_len < 0) {
-				IPSEC_MB_LOG(ERR, "Invalid part len");
-				return -EINVAL;
-			}
-		}
-
-		tag = sym_op->aead.digest.data;
-		IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
-					&qp_data->chacha20_poly1305_ctx_data,
-					tag, session->gen_digest_length);
-
-	} else {
-		IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
-					session->key,
-					&qp_data->chacha20_poly1305_ctx_data,
-					dst, src, (uint64_t)part_len);
-
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			if (dst == NULL || src == NULL) {
-				IPSEC_MB_LOG(ERR, "Invalid src or dst input");
-				return -EINVAL;
-			}
-			IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
-					session->key,
-					&qp_data->chacha20_poly1305_ctx_data,
-					dst, src, (uint64_t)part_len);
-			total_len -= part_len;
-			if (total_len < 0) {
-				IPSEC_MB_LOG(ERR, "Invalid part len");
-				return -EINVAL;
-			}
-		}
-
-		tag = qp_data->temp_digest;
-		IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
-					&qp_data->chacha20_poly1305_ctx_data,
-					tag, session->gen_digest_length);
-	}
-
-	return 0;
-}
-
-/**
- * Process a completed chacha poly op
- *
- * @param qp		Queue Pair to process
- * @param op		Crypto operation
- * @param sess		Crypto session
- *
- * @return
- * - void
- */
-static void
-post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
-		struct rte_crypto_op *op,
-		struct chacha20_poly1305_session *session)
-{
-	struct chacha20_poly1305_qp_data *qp_data =
-					ipsec_mb_get_qp_private_data(qp);
-
-	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-	/* Verify digest if required */
-	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
-			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
-		uint8_t *digest = op->sym->aead.digest.data;
-		uint8_t *tag = qp_data->temp_digest;
-
-#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
-		rte_hexdump(stdout, "auth tag (orig):",
-				digest, session->req_digest_length);
-		rte_hexdump(stdout, "auth tag (calc):",
-				tag, session->req_digest_length);
-#endif
-		if (memcmp(tag, digest,	session->req_digest_length) != 0)
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
-	}
-
-}
-
-/**
- * Process a completed Chacha20_poly1305 request
- *
- * @param qp		Queue Pair to process
- * @param op		Crypto operation
- * @param sess		Crypto session
- *
- * @return
- * - void
- */
-static void
-handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
-		struct rte_crypto_op *op,
-		struct chacha20_poly1305_session *sess)
-{
-	post_process_chacha20_poly1305_crypto_op(qp, op, sess);
-
-	/* Free session if a session-less crypto op */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(sess, 0, sizeof(struct chacha20_poly1305_session));
-		rte_mempool_put(qp->sess_mp, op->sym->session);
-		op->sym->session = NULL;
-	}
-}
-
-static uint16_t
-chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct chacha20_poly1305_session *sess;
-	struct ipsec_mb_qp *qp = queue_pair;
-
-	int retval = 0;
-	unsigned int i = 0, nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
-			(void **)ops, nb_ops, NULL);
-
-	for (i = 0; i < nb_dequeued; i++) {
-
-		sess = ipsec_mb_get_session_private(qp, ops[i]);
-		if (unlikely(sess == NULL)) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->stats.dequeue_err_count++;
-			break;
-		}
-
-		retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
-		if (retval < 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->stats.dequeue_err_count++;
-			break;
-		}
-
-		handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
-	}
-
-	qp->stats.dequeued_count += i;
-
-	return i;
-}
+#include "pmd_aesni_mb_priv.h"
 
 struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
 	.dev_configure = ipsec_mb_config,
@@ -384,7 +57,7 @@  RTE_INIT(ipsec_mb_register_chacha20_poly1305)
 		= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
 
 	chacha_poly_data->caps = chacha20_poly1305_capabilities;
-	chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+	chacha_poly_data->dequeue_burst = aesni_mb_dequeue_burst;
 	chacha_poly_data->feature_flags =
 		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
@@ -395,10 +68,9 @@  RTE_INIT(ipsec_mb_register_chacha20_poly1305)
 		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
 	chacha_poly_data->internals_priv_size = 0;
 	chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
-	chacha_poly_data->qp_priv_size =
-			sizeof(struct chacha20_poly1305_qp_data);
+	chacha_poly_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
 	chacha_poly_data->session_configure =
-			chacha20_poly1305_session_configure;
+			aesni_mb_session_configure;
 	chacha_poly_data->session_priv_size =
-			sizeof(struct chacha20_poly1305_session);
+			sizeof(struct aesni_mb_session);
 }
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
index 842f62f5d1..e668bfe07f 100644
--- a/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly_priv.h
@@ -7,9 +7,7 @@ 
 
 #include "ipsec_mb_private.h"
 
-#define CHACHA20_POLY1305_IV_LENGTH 12
 #define CHACHA20_POLY1305_DIGEST_LENGTH 16
-#define CHACHA20_POLY1305_KEY_SIZE  32
 
 static const
 struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
@@ -45,30 +43,4 @@  struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
 
 uint8_t pmd_driver_id_chacha20_poly1305;
 
-/** CHACHA20 POLY1305 private session structure */
-struct chacha20_poly1305_session {
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;
-	/**< IV parameters */
-	uint16_t aad_length;
-	/**< AAD length */
-	uint16_t req_digest_length;
-	/**< Requested digest length */
-	uint16_t gen_digest_length;
-	/**< Generated digest length */
-	uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
-	enum ipsec_mb_operation op;
-} __rte_cache_aligned;
-
-struct chacha20_poly1305_qp_data {
-	struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
-	uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-};
-
 #endif /* _PMD_CHACHA_POLY_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
index 70536ec3dc..c3571ec81b 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi.c
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -10,406 +10,7 @@ 
 #include <rte_malloc.h>
 
 #include "pmd_kasumi_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
-			  const struct rte_crypto_sym_xform *xform)
-{
-	const struct rte_crypto_sym_xform *auth_xform = NULL;
-	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum ipsec_mb_operation mode;
-	struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
-	/* Select Crypto operation - hash then cipher / cipher then hash */
-	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
-				&cipher_xform, NULL);
-
-	if (ret)
-		return ret;
-
-	if (cipher_xform) {
-		/* Only KASUMI F8 supported */
-		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
-			IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
-			return -ENOTSUP;
-		}
-
-		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-		if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-
-		/* Initialize key */
-		IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
-					      cipher_xform->cipher.key.data,
-					      &sess->pKeySched_cipher);
-	}
-
-	if (auth_xform) {
-		/* Only KASUMI F9 supported */
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
-			IPSEC_MB_LOG(ERR, "Unsupported authentication");
-			return -ENOTSUP;
-		}
-
-		if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong digest length");
-			return -EINVAL;
-		}
-
-		sess->auth_op = auth_xform->auth.op;
-
-		/* Initialize key */
-		IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
-					      &sess->pKeySched_hash);
-	}
-
-	sess->op = mode;
-	return ret;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-			  struct kasumi_session *session, uint8_t num_ops)
-{
-	unsigned int i;
-	uint8_t processed_ops = 0;
-	const void *src[num_ops];
-	void *dst[num_ops];
-	uint8_t *iv_ptr;
-	uint64_t iv[num_ops];
-	uint32_t num_bytes[num_ops];
-
-	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
-						 uint8_t *,
-						 (ops[i]->sym->cipher.data.offset >> 3));
-		dst[i] = ops[i]->sym->m_dst
-			     ? rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst,
-						       uint8_t *,
-						       (ops[i]->sym->cipher.data.offset >> 3))
-			     : rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
-						       uint8_t *,
-						       (ops[i]->sym->cipher.data.offset >> 3));
-		iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-						    session->cipher_iv_offset);
-		iv[i] = *((uint64_t *)(iv_ptr));
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
-		processed_ops++;
-	}
-
-	if (processed_ops != 0)
-		IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
-					iv, src, dst, num_bytes,
-					processed_ops);
-
-	return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
-			      struct kasumi_session *session)
-{
-	uint8_t *src, *dst;
-	uint8_t *iv_ptr;
-	uint64_t iv;
-	uint32_t length_in_bits, offset_in_bits;
-
-	offset_in_bits = op->sym->cipher.data.offset;
-	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
-	if (op->sym->m_dst == NULL)
-		dst = src;
-	else
-		dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
-	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-					    session->cipher_iv_offset);
-	iv = *((uint64_t *)(iv_ptr));
-	length_in_bits = op->sym->cipher.data.length;
-
-	IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
-				    src, dst, length_in_bits, offset_in_bits);
-
-	return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-			struct kasumi_session *session, uint8_t num_ops)
-{
-	unsigned int i;
-	uint8_t processed_ops = 0;
-	uint8_t *src, *dst;
-	uint32_t length_in_bits;
-	uint32_t num_bytes;
-	struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-	for (i = 0; i < num_ops; i++) {
-		/* Data must be byte aligned */
-		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			IPSEC_MB_LOG(ERR, "Invalid Offset");
-			break;
-		}
-
-		length_in_bits = ops[i]->sym->auth.data.length;
-
-		src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
-					      (ops[i]->sym->auth.data.offset >> 3));
-		/* Direction from next bit after end of message */
-		num_bytes = length_in_bits >> 3;
-
-		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
-			dst = qp_data->temp_digest;
-			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
-						&session->pKeySched_hash, src,
-						num_bytes, dst);
-
-			/* Verify digest. */
-			if (memcmp(dst, ops[i]->sym->auth.digest.data,
-				    KASUMI_DIGEST_LENGTH)
-			    != 0)
-				ops[i]->status
-				    = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else {
-			dst = ops[i]->sym->auth.digest.data;
-
-			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
-						&session->pKeySched_hash, src,
-						num_bytes, dst);
-		}
-		processed_ops++;
-	}
-
-	return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
-		struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
-	unsigned int i;
-	unsigned int processed_ops;
-
-	switch (session->op) {
-	case IPSEC_MB_OP_ENCRYPT_ONLY:
-	case IPSEC_MB_OP_DECRYPT_ONLY:
-		processed_ops
-		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
-		break;
-	case IPSEC_MB_OP_HASH_GEN_ONLY:
-	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
-		processed_ops
-		    = process_kasumi_hash_op(qp, ops, session, num_ops);
-		break;
-	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
-	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
-		processed_ops
-		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
-		process_kasumi_hash_op(qp, ops, session, processed_ops);
-		break;
-	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
-	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
-		processed_ops
-		    = process_kasumi_hash_op(qp, ops, session, num_ops);
-		process_kasumi_cipher_op(qp, ops, session, processed_ops);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_ops = 0;
-	}
-
-	for (i = 0; i < num_ops; i++) {
-		/*
-		 * If there was no error/authentication failure,
-		 * change status to successful.
-		 */
-		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-		/* Free session if a session-less crypto op. */
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			memset(session, 0, sizeof(struct kasumi_session));
-			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
-			ops[i]->sym->session = NULL;
-		}
-	}
-	return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
-		struct ipsec_mb_qp *qp)
-{
-	unsigned int processed_op;
-
-	switch (session->op) {
-		/* case KASUMI_OP_ONLY_CIPHER: */
-	case IPSEC_MB_OP_ENCRYPT_ONLY:
-	case IPSEC_MB_OP_DECRYPT_ONLY:
-		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
-		break;
-	/* case KASUMI_OP_ONLY_AUTH: */
-	case IPSEC_MB_OP_HASH_GEN_ONLY:
-	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
-		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
-		break;
-	/* case KASUMI_OP_CIPHER_AUTH: */
-	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
-		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
-		if (processed_op == 1)
-			process_kasumi_hash_op(qp, &op, session, 1);
-		break;
-	/* case KASUMI_OP_AUTH_CIPHER: */
-	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
-		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
-		if (processed_op == 1)
-			process_kasumi_cipher_op_bit(qp, op, session);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_op = 0;
-	}
-
-	/*
-	 * If there was no error/authentication failure,
-	 * change status to successful.
-	 */
-	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-	/* Free session if a session-less crypto op. */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
-			sizeof(struct kasumi_session));
-		rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
-		op->sym->session = NULL;
-	}
-	return processed_op;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
-			  uint16_t nb_ops)
-{
-	struct rte_crypto_op *c_ops[nb_ops];
-	struct rte_crypto_op *curr_c_op = NULL;
-
-	struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
-	struct ipsec_mb_qp *qp = queue_pair;
-	unsigned int i;
-	uint8_t burst_size = 0;
-	uint8_t processed_ops;
-	unsigned int nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
-					      (void **)ops, nb_ops, NULL);
-	for (i = 0; i < nb_dequeued; i++) {
-		curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
-		if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
-		    || (curr_c_op->sym->m_dst != NULL
-			&& !rte_pktmbuf_is_contiguous(
-			    curr_c_op->sym->m_dst))) {
-			IPSEC_MB_LOG(ERR,
-				      "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
-				      curr_c_op);
-			curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			break;
-		}
-#endif
-
-		/* Set status as enqueued (not processed yet) by default. */
-		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
-		curr_sess = (struct kasumi_session *)
-			ipsec_mb_get_session_private(qp, curr_c_op);
-		if (unlikely(curr_sess == NULL
-			      || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
-			curr_c_op->status
-			    = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-			break;
-		}
-
-		/* If length/offset is at bit-level, process this buffer alone.
-		 */
-		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
-		    || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
-			/* Process the ops of the previous session. */
-			if (prev_sess != NULL) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-
-			processed_ops = process_op_bit(curr_c_op,
-					curr_sess, qp);
-			if (processed_ops != 1)
-				break;
-
-			continue;
-		}
-
-		/* Batch ops that share the same session. */
-		if (prev_sess == NULL) {
-			prev_sess = curr_sess;
-			c_ops[burst_size++] = curr_c_op;
-		} else if (curr_sess == prev_sess) {
-			c_ops[burst_size++] = curr_c_op;
-			/*
-			 * When there are enough ops to process in a batch,
-			 * process them, and start a new batch.
-			 */
-			if (burst_size == KASUMI_MAX_BURST) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-		} else {
-			/*
-			 * Different session, process the ops
-			 * of the previous session.
-			 */
-			processed_ops = process_ops(c_ops, prev_sess, qp,
-					burst_size);
-			if (processed_ops < burst_size) {
-				burst_size = 0;
-				break;
-			}
-
-			burst_size = 0;
-			prev_sess = curr_sess;
-
-			c_ops[burst_size++] = curr_c_op;
-		}
-	}
-
-	if (burst_size != 0) {
-		/* Process the crypto ops of the last session. */
-		processed_ops = process_ops(c_ops, prev_sess, qp, burst_size);
-	}
-
-	qp->stats.dequeued_count += i;
-	return i;
-}
+#include "pmd_aesni_mb_priv.h"
 
 struct rte_cryptodev_ops kasumi_pmd_ops = {
 	.dev_configure = ipsec_mb_config,
@@ -460,7 +61,7 @@  RTE_INIT(ipsec_mb_register_kasumi)
 	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
 
 	kasumi_data->caps = kasumi_capabilities;
-	kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+	kasumi_data->dequeue_burst = aesni_mb_dequeue_burst;
 	kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
 				| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
 				| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -469,7 +70,8 @@  RTE_INIT(ipsec_mb_register_kasumi)
 				| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
 	kasumi_data->internals_priv_size = 0;
 	kasumi_data->ops = &kasumi_pmd_ops;
-	kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
-	kasumi_data->session_configure = kasumi_session_configure;
-	kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+	kasumi_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+	kasumi_data->session_configure = aesni_mb_session_configure;
+	kasumi_data->session_priv_size =
+			sizeof(struct aesni_mb_session);
 }
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
index 8db1d1cc5b..3223cf1a14 100644
--- a/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi_priv.h
@@ -9,8 +9,6 @@ 
 
 #define KASUMI_KEY_LENGTH 16
 #define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
 #define KASUMI_DIGEST_LENGTH 4
 
 uint8_t pmd_driver_id_kasumi;
@@ -60,22 +58,4 @@  static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-/** KASUMI private session structure */
-struct kasumi_session {
-	/* Keys have to be 16-byte aligned */
-	kasumi_key_sched_t pKeySched_cipher;
-	kasumi_key_sched_t pKeySched_hash;
-	enum ipsec_mb_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct kasumi_qp_data {
-	uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
-	/* *< Buffers used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-};
-
 #endif /* _PMD_KASUMI_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index a96779f059..957f6aade8 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -3,539 +3,7 @@ 
  */
 
 #include "pmd_snow3g_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
-	const struct rte_crypto_sym_xform *auth_xform = NULL;
-	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum ipsec_mb_operation mode;
-
-	/* Select Crypto operation - hash then cipher / cipher then hash */
-	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
-				&cipher_xform, NULL);
-	if (ret)
-		return ret;
-
-	if (cipher_xform) {
-		/* Only SNOW 3G UEA2 supported */
-		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
-			return -ENOTSUP;
-
-		if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-		if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
-			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
-			return -ENOMEM;
-		}
-
-		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
-		/* Initialize key */
-		IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
-					&sess->pKeySched_cipher);
-	}
-
-	if (auth_xform) {
-		/* Only SNOW 3G UIA2 supported */
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
-			return -ENOTSUP;
-
-		if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong digest length");
-			return -EINVAL;
-		}
-		if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
-			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
-			return -ENOMEM;
-		}
-
-		sess->auth_op = auth_xform->auth.op;
-
-		if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-		sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
-		/* Initialize key */
-		IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
-					&sess->pKeySched_hash);
-	}
-
-	sess->op = mode;
-
-	return 0;
-}
-
-/** Check if conditions are met for digest-appended operations */
-static uint8_t *
-snow3g_digest_appended_in_src(struct rte_crypto_op *op)
-{
-	unsigned int auth_size, cipher_size;
-
-	auth_size = (op->sym->auth.data.offset >> 3) +
-		(op->sym->auth.data.length >> 3);
-	cipher_size = (op->sym->cipher.data.offset >> 3) +
-		(op->sym->cipher.data.length >> 3);
-
-	if (auth_size < cipher_size)
-		return rte_pktmbuf_mtod_offset(op->sym->m_src,
-				uint8_t *, auth_size);
-
-	return NULL;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-		struct snow3g_session *session,
-		uint8_t num_ops)
-{
-	uint32_t i;
-	uint8_t processed_ops = 0;
-	const void *src[SNOW3G_MAX_BURST] = {NULL};
-	void *dst[SNOW3G_MAX_BURST] = {NULL};
-	uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
-	const void *iv[SNOW3G_MAX_BURST] = {NULL};
-	uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
-	uint32_t cipher_off, cipher_len;
-	int unencrypted_bytes = 0;
-
-	for (i = 0; i < num_ops; i++) {
-
-		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
-		cipher_len = ops[i]->sym->cipher.data.length >> 3;
-		src[i] = rte_pktmbuf_mtod_offset(
-			ops[i]->sym->m_src,	uint8_t *, cipher_off);
-
-		/* If out-of-place operation */
-		if (ops[i]->sym->m_dst &&
-			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
-			dst[i] = rte_pktmbuf_mtod_offset(
-				ops[i]->sym->m_dst, uint8_t *, cipher_off);
-
-			/* In case of out-of-place, auth-cipher operation
-			 * with partial encryption of the digest, copy
-			 * the remaining, unencrypted part.
-			 */
-			if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
-			    || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
-				unencrypted_bytes =
-					(ops[i]->sym->auth.data.offset >> 3) +
-					(ops[i]->sym->auth.data.length >> 3) +
-					(SNOW3G_DIGEST_LENGTH) -
-					cipher_off - cipher_len;
-			if (unencrypted_bytes > 0)
-				rte_memcpy(
-					rte_pktmbuf_mtod_offset(
-						ops[i]->sym->m_dst, uint8_t *,
-						cipher_off + cipher_len),
-					rte_pktmbuf_mtod_offset(
-						ops[i]->sym->m_src, uint8_t *,
-						cipher_off + cipher_len),
-					unencrypted_bytes);
-		} else
-			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
-						uint8_t *, cipher_off);
-
-		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-				session->cipher_iv_offset);
-		num_bytes[i] = cipher_len;
-		processed_ops++;
-	}
-
-	IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
-			src, dst, num_bytes, processed_ops);
-
-	/* Take care of the raw digest data in src buffer */
-	for (i = 0; i < num_ops; i++) {
-		if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
-			session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
-				ops[i]->sym->m_dst != NULL) {
-			digest_appended[i] =
-				snow3g_digest_appended_in_src(ops[i]);
-			/* Clear unencrypted digest from
-			 * the src buffer
-			 */
-			if (digest_appended[i] != NULL)
-				memset(digest_appended[i],
-					0, SNOW3G_DIGEST_LENGTH);
-		}
-	}
-	return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
-		struct rte_crypto_op *op,
-		struct snow3g_session *session)
-{
-	uint8_t *src, *dst;
-	uint8_t *iv;
-	uint32_t length_in_bits, offset_in_bits;
-	int unencrypted_bytes = 0;
-
-	offset_in_bits = op->sym->cipher.data.offset;
-	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
-	if (op->sym->m_dst == NULL) {
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-		IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
-		return 0;
-	}
-	length_in_bits = op->sym->cipher.data.length;
-	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
-	/* In case of out-of-place, auth-cipher operation
-	 * with partial encryption of the digest, copy
-	 * the remaining, unencrypted part.
-	 */
-	if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
-		session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
-		unencrypted_bytes =
-			(op->sym->auth.data.offset >> 3) +
-			(op->sym->auth.data.length >> 3) +
-			(SNOW3G_DIGEST_LENGTH) -
-			(offset_in_bits >> 3) -
-			(length_in_bits >> 3);
-	if (unencrypted_bytes > 0)
-		rte_memcpy(
-			rte_pktmbuf_mtod_offset(
-				op->sym->m_dst, uint8_t *,
-				(length_in_bits >> 3)),
-			rte_pktmbuf_mtod_offset(
-				op->sym->m_src, uint8_t *,
-				(length_in_bits >> 3)),
-				unencrypted_bytes);
-
-	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->cipher_iv_offset);
-
-	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
-			src, dst, length_in_bits, offset_in_bits);
-
-	return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-		struct snow3g_session *session,
-		uint8_t num_ops)
-{
-	uint32_t i;
-	uint8_t processed_ops = 0;
-	uint8_t *src, *dst;
-	uint32_t length_in_bits;
-	uint8_t *iv;
-	uint8_t digest_appended = 0;
-	struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-	for (i = 0; i < num_ops; i++) {
-		/* Data must be byte aligned */
-		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			IPSEC_MB_LOG(ERR, "Offset");
-			break;
-		}
-
-		dst = NULL;
-
-		length_in_bits = ops[i]->sym->auth.data.length;
-
-		src = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
-					      (ops[i]->sym->auth.data.offset >> 3));
-		iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-				session->auth_iv_offset);
-
-		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
-			dst = qp_data->temp_digest;
-			 /* Handle auth cipher verify oop case*/
-			if ((session->op ==
-				IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
-				session->op ==
-				IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
-				ops[i]->sym->m_dst != NULL)
-				src = rte_pktmbuf_mtod_offset(
-					ops[i]->sym->m_dst, uint8_t *,
-					ops[i]->sym->auth.data.offset >> 3);
-
-			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
-					&session->pKeySched_hash,
-					iv, src, length_in_bits, dst);
-			/* Verify digest. */
-			if (memcmp(dst, ops[i]->sym->auth.digest.data,
-					SNOW3G_DIGEST_LENGTH) != 0)
-				ops[i]->status =
-					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else {
-			if (session->op ==
-				IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
-				session->op ==
-				IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
-				dst = snow3g_digest_appended_in_src(ops[i]);
-
-			if (dst != NULL)
-				digest_appended = 1;
-			else
-				dst = ops[i]->sym->auth.digest.data;
-
-			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
-					&session->pKeySched_hash,
-					iv, src, length_in_bits, dst);
-
-			/* Copy back digest from src to auth.digest.data */
-			if (digest_appended)
-				rte_memcpy(ops[i]->sym->auth.digest.data,
-					dst, SNOW3G_DIGEST_LENGTH);
-		}
-		processed_ops++;
-	}
-
-	return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
-		struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
-	uint32_t i;
-	uint32_t processed_ops;
-
-#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
-	for (i = 0; i < num_ops; i++) {
-		if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
-				(ops[i]->sym->m_dst != NULL &&
-				!rte_pktmbuf_is_contiguous(
-						ops[i]->sym->m_dst))) {
-			IPSEC_MB_LOG(ERR,
-				"PMD supports only contiguous mbufs, "
-				"op (%p) provides noncontiguous mbuf as "
-				"source/destination buffer.\n", ops[i]);
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			return 0;
-		}
-	}
-#endif
-
-	switch (session->op) {
-	case IPSEC_MB_OP_ENCRYPT_ONLY:
-	case IPSEC_MB_OP_DECRYPT_ONLY:
-		processed_ops = process_snow3g_cipher_op(qp, ops,
-				session, num_ops);
-		break;
-	case IPSEC_MB_OP_HASH_GEN_ONLY:
-	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
-		processed_ops = process_snow3g_hash_op(qp, ops, session,
-				num_ops);
-		break;
-	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
-	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
-		processed_ops = process_snow3g_cipher_op(qp, ops, session,
-				num_ops);
-		process_snow3g_hash_op(qp, ops, session, processed_ops);
-		break;
-	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
-	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
-		processed_ops = process_snow3g_hash_op(qp, ops, session,
-				num_ops);
-		process_snow3g_cipher_op(qp, ops, session, processed_ops);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_ops = 0;
-	}
-
-	for (i = 0; i < num_ops; i++) {
-		/*
-		 * If there was no error/authentication failure,
-		 * change status to successful.
-		 */
-		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-		/* Free session if a session-less crypto op. */
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			memset(session, 0, sizeof(struct snow3g_session));
-			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
-			ops[i]->sym->session = NULL;
-		}
-	}
-	return processed_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
-		struct ipsec_mb_qp *qp)
-{
-	unsigned int processed_op;
-	int ret;
-
-	switch (session->op) {
-	case IPSEC_MB_OP_ENCRYPT_ONLY:
-	case IPSEC_MB_OP_DECRYPT_ONLY:
-
-		processed_op = process_snow3g_cipher_op_bit(qp, op,
-				session);
-		break;
-	case IPSEC_MB_OP_HASH_GEN_ONLY:
-	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
-		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
-		break;
-	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
-	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
-		processed_op = process_snow3g_cipher_op_bit(qp, op, session);
-		if (processed_op == 1)
-			process_snow3g_hash_op(qp, &op, session, 1);
-		break;
-	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
-	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
-		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
-		if (processed_op == 1)
-			process_snow3g_cipher_op_bit(qp, op, session);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_op = 0;
-	}
-
-	/*
-	 * If there was no error/authentication failure,
-	 * change status to successful.
-	 */
-	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-	/* Free session if a session-less crypto op. */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session), 0,
-			sizeof(struct snow3g_session));
-		rte_mempool_put(qp->sess_mp, (void *)op->sym->session);
-		op->sym->session = NULL;
-	}
-
-	if (unlikely(processed_op != 1))
-		return 0;
-
-	ret = rte_ring_enqueue(qp->ingress_queue, op);
-	if (ret != 0)
-		return ret;
-
-	return 1;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct ipsec_mb_qp *qp = queue_pair;
-	struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
-	struct rte_crypto_op *curr_c_op;
-
-	struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
-	uint32_t i;
-	uint8_t burst_size = 0;
-	uint8_t processed_ops;
-	uint32_t nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
-			(void **)ops, nb_ops, NULL);
-
-	for (i = 0; i < nb_dequeued; i++) {
-		curr_c_op = ops[i];
-
-		/* Set status as enqueued (not processed yet) by default. */
-		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
-		curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
-		if (unlikely(curr_sess == NULL ||
-				curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
-			curr_c_op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-			break;
-		}
-
-		/* If length/offset is at bit-level,
-		 * process this buffer alone.
-		 */
-		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
-				|| ((curr_c_op->sym->cipher.data.offset
-					% BYTE_LEN) != 0)) {
-			/* Process the ops of the previous session. */
-			if (prev_sess != NULL) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-
-			processed_ops = process_op_bit(curr_c_op, curr_sess, qp);
-			if (processed_ops != 1)
-				break;
-
-			continue;
-		}
-
-		/* Batch ops that share the same session. */
-		if (prev_sess == NULL) {
-			prev_sess = curr_sess;
-			c_ops[burst_size++] = curr_c_op;
-		} else if (curr_sess == prev_sess) {
-			c_ops[burst_size++] = curr_c_op;
-			/*
-			 * When there are enough ops to process in a batch,
-			 * process them, and start a new batch.
-			 */
-			if (burst_size == SNOW3G_MAX_BURST) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-		} else {
-			/*
-			 * Different session, process the ops
-			 * of the previous session.
-			 */
-			processed_ops = process_ops(c_ops, prev_sess,
-					qp, burst_size);
-			if (processed_ops < burst_size) {
-				burst_size = 0;
-				break;
-			}
-
-			burst_size = 0;
-			prev_sess = curr_sess;
-
-			c_ops[burst_size++] = curr_c_op;
-		}
-	}
-
-	if (burst_size != 0) {
-		/* Process the crypto ops of the last session. */
-		processed_ops = process_ops(c_ops, prev_sess,
-				qp, burst_size);
-	}
-
-	qp->stats.dequeued_count += i;
-	return i;
-}
+#include "pmd_aesni_mb_priv.h"
 
 struct rte_cryptodev_ops snow3g_pmd_ops = {
 	.dev_configure = ipsec_mb_config,
@@ -586,7 +54,7 @@  RTE_INIT(ipsec_mb_register_snow3g)
 		= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
 
 	snow3g_data->caps = snow3g_capabilities;
-	snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+	snow3g_data->dequeue_burst = aesni_mb_dequeue_burst;
 	snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
@@ -595,7 +63,8 @@  RTE_INIT(ipsec_mb_register_snow3g)
 			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 	snow3g_data->internals_priv_size = 0;
 	snow3g_data->ops = &snow3g_pmd_ops;
-	snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
-	snow3g_data->session_configure = snow3g_session_configure;
-	snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+	snow3g_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+	snow3g_data->session_configure = aesni_mb_session_configure;
+	snow3g_data->session_priv_size =
+			sizeof(struct aesni_mb_session);
 }
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
index ca1ce7f9d6..3ceb33b602 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g_priv.h
@@ -8,10 +8,7 @@ 
 #include "ipsec_mb_private.h"
 
 #define SNOW3G_IV_LENGTH 16
-#define SNOW3G_MAX_BURST 8
-#define BYTE_LEN 8
 #define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE  128
 
 uint8_t pmd_driver_id_snow3g;
 
@@ -64,22 +61,4 @@  static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-/** SNOW 3G private session structure */
-struct snow3g_session {
-	enum ipsec_mb_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	snow3g_key_schedule_t pKeySched_cipher;
-	snow3g_key_schedule_t pKeySched_hash;
-	uint16_t cipher_iv_offset;
-	uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct snow3g_qp_data {
-	uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-};
-
 #endif /* _PMD_SNOW3G_PRIV_H_ */
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
index 44781be1d1..b72191c7a7 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -3,343 +3,7 @@ 
  */
 
 #include "pmd_zuc_priv.h"
-
-/** Parse crypto xform chain and set private session parameters. */
-static int
-zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	struct zuc_session *sess = (struct zuc_session *) zuc_sess;
-	const struct rte_crypto_sym_xform *auth_xform = NULL;
-	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum ipsec_mb_operation mode;
-	/* Select Crypto operation - hash then cipher / cipher then hash */
-	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
-				&cipher_xform, NULL);
-
-	if (ret)
-		return ret;
-
-	if (cipher_xform) {
-		/* Only ZUC EEA3 supported */
-		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
-			return -ENOTSUP;
-
-		if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-
-		/* Copy the key */
-		memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
-				ZUC_IV_KEY_LENGTH);
-	}
-
-	if (auth_xform) {
-		/* Only ZUC EIA3 supported */
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
-			return -ENOTSUP;
-
-		if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong digest length");
-			return -EINVAL;
-		}
-
-		sess->auth_op = auth_xform->auth.op;
-
-		if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
-			IPSEC_MB_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-		sess->auth_iv_offset = auth_xform->auth.iv.offset;
-
-		/* Copy the key */
-		memcpy(sess->pKey_hash, auth_xform->auth.key.data,
-				ZUC_IV_KEY_LENGTH);
-	}
-
-	sess->op = mode;
-	return 0;
-}
-
-/** Encrypt/decrypt mbufs. */
-static uint8_t
-process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-		struct zuc_session **sessions,
-		uint8_t num_ops)
-{
-	unsigned int i;
-	uint8_t processed_ops = 0;
-	const void *src[ZUC_MAX_BURST];
-	void *dst[ZUC_MAX_BURST];
-	const void *iv[ZUC_MAX_BURST];
-	uint32_t num_bytes[ZUC_MAX_BURST];
-	const void *cipher_keys[ZUC_MAX_BURST];
-	struct zuc_session *sess;
-
-	for (i = 0; i < num_ops; i++) {
-		if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
-				|| ((ops[i]->sym->cipher.data.offset
-					% BYTE_LEN) != 0)) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			IPSEC_MB_LOG(ERR, "Data Length or offset");
-			break;
-		}
-
-		sess = sessions[i];
-
-#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
-		if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
-				(ops[i]->sym->m_dst != NULL &&
-				!rte_pktmbuf_is_contiguous(
-						ops[i]->sym->m_dst))) {
-			IPSEC_MB_LOG(ERR, "PMD supports only "
-				" contiguous mbufs, op (%p) "
-				"provides noncontiguous mbuf "
-				"as source/destination buffer.\n",
-				"PMD supports only contiguous mbufs, "
-				"op (%p) provides noncontiguous mbuf "
-				"as source/destination buffer.\n",
-				ops[i]);
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			break;
-		}
-#endif
-
-		src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
-						 uint8_t *,
-						 (ops[i]->sym->cipher.data.offset >> 3));
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod_offset(ops[i]->sym->m_dst, uint8_t *,
-						(ops[i]->sym->cipher.data.offset >> 3)) :
-			rte_pktmbuf_mtod_offset(ops[i]->sym->m_src, uint8_t *,
-						(ops[i]->sym->cipher.data.offset >> 3));
-		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-				sess->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
-		cipher_keys[i] = sess->pKey_cipher;
-
-		processed_ops++;
-	}
-
-	IMB_ZUC_EEA3_N_BUFFER(qp->mb_mgr, (const void **)cipher_keys,
-			(const void **)iv, (const void **)src, (void **)dst,
-			num_bytes, processed_ops);
-
-	return processed_ops;
-}
-
-/** Generate/verify hash from mbufs. */
-static int
-process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
-		struct zuc_session **sessions,
-		uint8_t num_ops)
-{
-	unsigned int i;
-	uint8_t processed_ops = 0;
-	uint8_t *src[ZUC_MAX_BURST] = { 0 };
-	uint32_t *dst[ZUC_MAX_BURST];
-	uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
-	uint8_t *iv[ZUC_MAX_BURST] = { 0 };
-	const void *hash_keys[ZUC_MAX_BURST] = { 0 };
-	struct zuc_session *sess;
-	struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
-
-
-	for (i = 0; i < num_ops; i++) {
-		/* Data must be byte aligned */
-		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			IPSEC_MB_LOG(ERR, "Offset");
-			break;
-		}
-
-		sess = sessions[i];
-
-		length_in_bits[i] = ops[i]->sym->auth.data.length;
-
-		src[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
-						 uint8_t *,
-						 (ops[i]->sym->auth.data.offset >> 3));
-		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-				sess->auth_iv_offset);
-
-		hash_keys[i] = sess->pKey_hash;
-		if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
-			dst[i] = (uint32_t *)qp_data->temp_digest[i];
-		else
-			dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
-
-		processed_ops++;
-	}
-
-	IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
-			(const void * const *)iv, (const void * const *)src,
-			length_in_bits, dst, processed_ops);
-
-	/*
-	 * If tag needs to be verified, compare generated tag
-	 * with attached tag
-	 */
-	for (i = 0; i < processed_ops; i++)
-		if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
-			if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
-					ZUC_DIGEST_LENGTH) != 0)
-				ops[i]->status =
-					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
-	return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same operation type. */
-static int
-process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
-		struct zuc_session **sessions,
-		struct ipsec_mb_qp *qp, uint8_t num_ops)
-{
-	unsigned int i;
-	unsigned int processed_ops = 0;
-
-	switch (op_type) {
-	case IPSEC_MB_OP_ENCRYPT_ONLY:
-	case IPSEC_MB_OP_DECRYPT_ONLY:
-		processed_ops = process_zuc_cipher_op(qp, ops,
-				sessions, num_ops);
-		break;
-	case IPSEC_MB_OP_HASH_GEN_ONLY:
-	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
-		processed_ops = process_zuc_hash_op(qp, ops, sessions,
-				num_ops);
-		break;
-	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
-	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
-		processed_ops = process_zuc_cipher_op(qp, ops, sessions,
-				num_ops);
-		process_zuc_hash_op(qp, ops, sessions, processed_ops);
-		break;
-	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
-	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
-		processed_ops = process_zuc_hash_op(qp, ops, sessions,
-				num_ops);
-		process_zuc_cipher_op(qp, ops, sessions, processed_ops);
-		break;
-	default:
-		/* Operation not supported. */
-		for (i = 0; i < num_ops; i++)
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-	}
-
-	for (i = 0; i < num_ops; i++) {
-		/*
-		 * If there was no error/authentication failure,
-		 * change status to successful.
-		 */
-		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-		/* Free session if a session-less crypto op. */
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			memset(sessions[i], 0, sizeof(struct zuc_session));
-			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
-			ops[i]->sym->session = NULL;
-		}
-	}
-	return processed_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-
-	struct rte_crypto_op *curr_c_op;
-
-	struct zuc_session *curr_sess;
-	struct zuc_session *sessions[ZUC_MAX_BURST];
-	struct rte_crypto_op *int_c_ops[ZUC_MAX_BURST];
-	enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
-	enum ipsec_mb_operation curr_zuc_op;
-	struct ipsec_mb_qp *qp = queue_pair;
-	unsigned int nb_dequeued;
-	unsigned int i;
-	uint8_t burst_size = 0;
-	uint8_t processed_ops;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
-			(void **)c_ops, nb_ops, NULL);
-
-
-	for (i = 0; i < nb_dequeued; i++) {
-		curr_c_op = c_ops[i];
-
-		curr_sess = (struct zuc_session *)
-			ipsec_mb_get_session_private(qp, curr_c_op);
-		if (unlikely(curr_sess == NULL)) {
-			curr_c_op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-			break;
-		}
-
-		curr_zuc_op = curr_sess->op;
-
-		/*
-		 * Batch ops that share the same operation type
-		 * (cipher only, auth only...).
-		 */
-		if (burst_size == 0) {
-			prev_zuc_op = curr_zuc_op;
-			int_c_ops[0] = curr_c_op;
-			sessions[0] = curr_sess;
-			burst_size++;
-		} else if (curr_zuc_op == prev_zuc_op) {
-			int_c_ops[burst_size] = curr_c_op;
-			sessions[burst_size] = curr_sess;
-			burst_size++;
-			/*
-			 * When there are enough ops to process in a batch,
-			 * process them, and start a new batch.
-			 */
-			if (burst_size == ZUC_MAX_BURST) {
-				processed_ops = process_ops(int_c_ops, curr_zuc_op,
-						sessions, qp, burst_size);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-			}
-		} else {
-			/*
-			 * Different operation type, process the ops
-			 * of the previous type.
-			 */
-			processed_ops = process_ops(int_c_ops, prev_zuc_op,
-					sessions, qp, burst_size);
-			if (processed_ops < burst_size) {
-				burst_size = 0;
-				break;
-			}
-
-			burst_size = 0;
-			prev_zuc_op = curr_zuc_op;
-
-			int_c_ops[0] = curr_c_op;
-			sessions[0] = curr_sess;
-			burst_size++;
-		}
-	}
-
-	if (burst_size != 0) {
-		/* Process the crypto ops of the last operation type. */
-		processed_ops = process_ops(int_c_ops, prev_zuc_op,
-				sessions, qp, burst_size);
-	}
-
-	qp->stats.dequeued_count += i;
-	return i;
-}
+#include "pmd_aesni_mb_priv.h"
 
 struct rte_cryptodev_ops zuc_pmd_ops = {
 	.dev_configure = ipsec_mb_config,
@@ -390,7 +54,7 @@  RTE_INIT(ipsec_mb_register_zuc)
 	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
 
 	zuc_data->caps = zuc_capabilities;
-	zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+	zuc_data->dequeue_burst = aesni_mb_dequeue_burst;
 	zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
 			| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
 			| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
@@ -399,7 +63,8 @@  RTE_INIT(ipsec_mb_register_zuc)
 			| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
 	zuc_data->internals_priv_size = 0;
 	zuc_data->ops = &zuc_pmd_ops;
-	zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
-	zuc_data->session_configure = zuc_session_configure;
-	zuc_data->session_priv_size = sizeof(struct zuc_session);
+	zuc_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+	zuc_data->session_configure = aesni_mb_session_configure;
+	zuc_data->session_priv_size =
+			sizeof(struct aesni_mb_session);
 }
diff --git a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
index 76fd6758c2..a1e8e3aade 100644
--- a/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
+++ b/drivers/crypto/ipsec_mb/pmd_zuc_priv.h
@@ -10,7 +10,6 @@ 
 #define ZUC_IV_KEY_LENGTH 16
 #define ZUC_DIGEST_LENGTH 4
 #define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
 
 uint8_t pmd_driver_id_zuc;
 
@@ -63,23 +62,4 @@  static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-/** ZUC private session structure */
-struct zuc_session {
-	enum ipsec_mb_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
-	uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
-	uint16_t cipher_iv_offset;
-	uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-struct zuc_qp_data {
-
-	uint8_t temp_digest[ZUC_MAX_BURST][ZUC_DIGEST_LENGTH];
-	/* *< Buffers used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-};
-
 #endif /* _PMD_ZUC_PRIV_H_ */