[RFC,4/5] crypto/aesni_mb: support IPSec MB library v0.53

Message ID 20200305153454.724874-5-pablo.de.lara.guarch@intel.com (mailing list archive)
State Not Applicable, archived
Headers
Series Support Intel IPSec MB v0.53 in DPDK 18.11 |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

De Lara Guarch, Pablo March 5, 2020, 3:34 p.m. UTC
  Add support for underlying IPSec Multi-buffer library v0.53.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c    | 476 ++++++++++++------
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops.c    | 205 +++++---
 .../aesni_mb/rte_aesni_mb_pmd_private.h       |  30 +-
 3 files changed, 489 insertions(+), 222 deletions(-)
  

Patch

diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 83250e32c..9dfa89f71 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -35,7 +35,7 @@  typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_
 static void
 calculate_auth_precomputes(hash_one_block_t one_block_hash,
 		uint8_t *ipad, uint8_t *opad,
-		uint8_t *hkey, uint16_t hkey_len,
+		const uint8_t *hkey, uint16_t hkey_len,
 		uint16_t blocksize)
 {
 	unsigned i, length;
@@ -85,6 +85,25 @@  aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
 			return AESNI_MB_OP_HASH_CIPHER;
 	}
 
+#if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+			/*
+			 * CCM requires to hash first and cipher later
+			 * when encrypting
+			 */
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+				return AESNI_MB_OP_AEAD_HASH_CIPHER;
+			else
+				return AESNI_MB_OP_AEAD_CIPHER_HASH;
+		} else {
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+				return AESNI_MB_OP_AEAD_CIPHER_HASH;
+			else
+				return AESNI_MB_OP_AEAD_HASH_CIPHER;
+		}
+	}
+#else
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
 		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
 				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -94,19 +113,21 @@  aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
 				return AESNI_MB_OP_AEAD_HASH_CIPHER;
 		}
 	}
+#endif
 
 	return AESNI_MB_OP_NOT_SUPPORTED;
 }
 
 /** Set session authentication parameters */
 static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
-	hash_one_block_t hash_oneblock_fn;
+	hash_one_block_t hash_oneblock_fn = NULL;
 	unsigned int key_larger_block_size = 0;
 	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
+	uint32_t auth_precompute = 1;
 
 	if (xform == NULL) {
 		sess->auth.algo = NULL_HASH;
@@ -135,13 +156,16 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 			return -EINVAL;
 		}
 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-		(*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
 				sess->auth.xcbc.k1_expanded,
 				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
 		return 0;
 	}
 
 	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
 		sess->auth.algo = AES_CMAC;
 
 		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
@@ -150,102 +174,144 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 			AESNI_MB_LOG(ERR, "Invalid digest size\n");
 			return -EINVAL;
 		}
+		if (sess->auth.req_digest_len < 4)
+			sess->auth.gen_digest_len = cmac_digest_len;
+		else
+			sess->auth.gen_digest_len = sess->auth.req_digest_len;
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
+				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+			sess->cipher.direction = ENCRYPT;
+			sess->chain_order = CIPHER_HASH;
+		} else
+			sess->cipher.direction = DECRYPT;
+
+		sess->auth.algo = AES_GMAC;
 		/*
-		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
-		 * in version 0.50 and sizes of 12 and 16 bytes,
-		 * in version 0.49.
+		 * Multi-buffer lib supports 8, 12 and 16 bytes of digest.
 		 * If size requested is different, generate the full digest
 		 * (16 bytes) in a temporary location and then memcpy
 		 * the requested number of bytes.
 		 */
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-		if (sess->auth.req_digest_len < 4)
-#else
-		uint16_t cmac_trunc_digest_len =
-				get_truncated_digest_byte_length(AES_CMAC);
-		if (sess->auth.req_digest_len != cmac_digest_len &&
-				sess->auth.req_digest_len != cmac_trunc_digest_len)
-#endif
-			sess->auth.gen_digest_len = cmac_digest_len;
-		else
+		if (sess->auth.req_digest_len != 16 &&
+				sess->auth.req_digest_len != 12 &&
+				sess->auth.req_digest_len != 8) {
+			sess->auth.gen_digest_len = 16;
+		} else {
 			sess->auth.gen_digest_len = sess->auth.req_digest_len;
-		(*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
-				sess->auth.cmac.expkey);
+		}
+		sess->iv.length = xform->auth.iv.length;
+		sess->iv.offset = xform->auth.iv.offset;
+
+		switch (xform->auth.key.length) {
+		case AES_128_BYTES:
+			IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = AES_128_BYTES;
+			break;
+		case AES_192_BYTES:
+			IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = AES_192_BYTES;
+			break;
+		case AES_256_BYTES:
+			IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = AES_256_BYTES;
+			break;
+		default:
+			RTE_LOG(ERR, PMD, "failed to parse test type\n");
+			return -EINVAL;
+		}
 
-		(*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
-				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
 		return 0;
 	}
 
 	switch (xform->auth.algo) {
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
 		sess->auth.algo = MD5;
-		hash_oneblock_fn = mb_ops->aux.one_block.md5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		sess->auth.algo = SHA1;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha1;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
-			mb_ops->aux.multi_block.sha1(
+			IMB_SHA1(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
 			key_larger_block_size = 1;
 		}
-#endif
+		break;
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.algo = PLAIN_SHA1;
+		auth_precompute = 0;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		sess->auth.algo = SHA_224;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha224;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
-			mb_ops->aux.multi_block.sha224(
+			IMB_SHA224(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
 			key_larger_block_size = 1;
 		}
-#endif
+		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = PLAIN_SHA_224;
+		auth_precompute = 0;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		sess->auth.algo = SHA_256;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha256;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
-			mb_ops->aux.multi_block.sha256(
+			IMB_SHA256(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
 			key_larger_block_size = 1;
 		}
-#endif
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = PLAIN_SHA_256;
+		auth_precompute = 0;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		sess->auth.algo = SHA_384;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha384;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
-			mb_ops->aux.multi_block.sha384(
+			IMB_SHA384(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
 			key_larger_block_size = 1;
 		}
-#endif
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = PLAIN_SHA_384;
+		auth_precompute = 0;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		sess->auth.algo = SHA_512;
-		hash_oneblock_fn = mb_ops->aux.one_block.sha512;
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
 		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
-			mb_ops->aux.multi_block.sha512(
+			IMB_SHA512(mb_mgr,
 				xform->auth.key.data,
 				xform->auth.key.length,
 				hashed_key);
 			key_larger_block_size = 1;
 		}
-#endif
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = PLAIN_SHA_512;
+		auth_precompute = 0;
 		break;
 	default:
 		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
@@ -256,12 +322,8 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 	uint16_t full_digest_size =
 			get_digest_byte_length(sess->auth.algo);
 
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 	if (sess->auth.req_digest_len > full_digest_size ||
 			sess->auth.req_digest_len == 0) {
-#else
-	if (sess->auth.req_digest_len != trunc_digest_size) {
-#endif
 		AESNI_MB_LOG(ERR, "Invalid digest size\n");
 		return -EINVAL;
 	}
@@ -272,6 +334,10 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 	else
 		sess->auth.gen_digest_len = sess->auth.req_digest_len;
 
+	/* Plain SHA does not require precompute key */
+	if (auth_precompute == 0)
+		return 0;
+
 	/* Calculate Authentication precomputes */
 	if (key_larger_block_size) {
 		calculate_auth_precomputes(hash_oneblock_fn,
@@ -292,13 +358,12 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 /** Set session cipher parameters */
 static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
 	uint8_t is_aes = 0;
 	uint8_t is_3DES = 0;
-	aes_keyexp_t aes_keyexp_fn;
 
 	if (xform == NULL) {
 		sess->cipher.mode = NULL_CIPHER;
@@ -361,26 +426,26 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->cipher.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		case AES_192_BYTES:
 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		case AES_256_BYTES:
 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
-
-		/* Expanded cipher keys */
-		(*aes_keyexp_fn)(xform->cipher.key.data,
-				sess->cipher.expanded_aes_keys.encode,
-				sess->cipher.expanded_aes_keys.decode);
-
 	} else if (is_3DES) {
 		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
 				sess->cipher.exp_3des_keys.key[1],
@@ -388,9 +453,12 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 		switch (xform->cipher.key.length) {
 		case  24:
-			des_key_schedule(keys[0], xform->cipher.key.data);
-			des_key_schedule(keys[1], xform->cipher.key.data+8);
-			des_key_schedule(keys[2], xform->cipher.key.data+16);
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
 
 			/* Initialize keys - 24 bytes: [K1-K2-K3] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
@@ -398,8 +466,10 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
 			break;
 		case 16:
-			des_key_schedule(keys[0], xform->cipher.key.data);
-			des_key_schedule(keys[1], xform->cipher.key.data+8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
 
 			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
@@ -407,7 +477,8 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
 			break;
 		case 8:
-			des_key_schedule(keys[0], xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
 
 			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
 			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
@@ -419,11 +490,7 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 			return -EINVAL;
 		}
 
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 		sess->cipher.key_length_in_bytes = 24;
-#else
-		sess->cipher.key_length_in_bytes = 8;
-#endif
 	} else {
 		if (xform->cipher.key.length != 8) {
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
@@ -431,9 +498,11 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 		}
 		sess->cipher.key_length_in_bytes = 8;
 
-		des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
 				xform->cipher.key.data);
-		des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
 				xform->cipher.key.data);
 	}
 
@@ -441,15 +510,10 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
 }
 
 static int
-aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
-	union {
-		aes_keyexp_t aes_keyexp_fn;
-		aes_gcm_keyexp_t aes_gcm_keyexp_fn;
-	} keyexp;
-
 	switch (xform->aead.op) {
 	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
 		sess->cipher.direction = ENCRYPT;
@@ -473,17 +537,15 @@  aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->aead.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			keyexp.aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
 
-		/* Expanded cipher keys */
-		(*keyexp.aes_keyexp_fn)(xform->aead.key.data,
-				sess->cipher.expanded_aes_keys.encode,
-				sess->cipher.expanded_aes_keys.decode);
 		break;
 
 	case RTE_CRYPTO_AEAD_AES_GCM:
@@ -493,26 +555,24 @@  aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 		switch (xform->aead.key.length) {
 		case AES_128_BYTES:
 			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_128;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		case AES_192_BYTES:
 			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_192;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		case AES_256_BYTES:
 			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			keyexp.aes_gcm_keyexp_fn =
-					mb_ops->aux.keyexp.aes_gcm_256;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
 			break;
 		default:
 			AESNI_MB_LOG(ERR, "Invalid cipher key length");
 			return -EINVAL;
 		}
 
-		(keyexp.aes_gcm_keyexp_fn)(xform->aead.key.data,
-				&sess->cipher.gcm_key);
 		break;
 
 	default:
@@ -539,7 +599,7 @@  aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
 
 /** Parse crypto xform chain and set private session parameters */
 int
-aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform)
 {
@@ -598,13 +658,13 @@  aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 	/* Default IV length = 0 */
 	sess->iv.length = 0;
 
-	ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
 		return ret;
 	}
 
-	ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
 			cipher_xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
@@ -612,7 +672,7 @@  aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
 	}
 
 	if (aead_xform) {
-		ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
 				aead_xform);
 		if (ret != 0) {
 			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
@@ -673,7 +733,7 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 
 		sess = (struct aesni_mb_session *)_sess_private_data;
 
-		if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
 				sess, op->sym->xform) != 0)) {
 			rte_mempool_put(qp->sess_mp, _sess);
 			rte_mempool_put(qp->sess_mp, _sess_private_data);
@@ -690,6 +750,56 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
 	return sess;
 }
 
+static inline uint64_t
+auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+		uint32_t oop)
+{
+	struct rte_mbuf *m_src, *m_dst;
+	uint8_t *p_src, *p_dst;
+	uintptr_t u_src, u_dst;
+	uint32_t cipher_end, auth_end;
+
+	/* Only cipher then hash needs special calculation. */
+	if (!oop || session->chain_order != CIPHER_HASH)
+		return op->sym->auth.data.offset;
+
+	m_src = op->sym->m_src;
+	m_dst = op->sym->m_dst;
+
+	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+	u_src = (uintptr_t)p_src;
+	u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
+
+	/**
+	 * Copy the content between cipher offset and auth offset for generating
+	 * correct digest.
+	 */
+	if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
+		memcpy(p_dst + op->sym->auth.data.offset,
+				p_src + op->sym->auth.data.offset,
+				op->sym->cipher.data.offset -
+				op->sym->auth.data.offset);
+
+	/**
+	 * Copy the content between (cipher offset + length) and (auth offset +
+	 * length) for generating correct digest
+	 */
+	cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
+	auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
+	if (cipher_end < auth_end)
+		memcpy(p_dst + cipher_end, p_src + cipher_end,
+				auth_end - cipher_end);
+
+	/**
+	 * Since intel-ipsec-mb only supports positive values,
+	 * we need to deduct the correct offset between src and dst.
+	 */
+
+	return u_src < u_dst ? (u_dst - u_src) :
+			(UINT64_MAX - u_src + u_dst + 1);
+}
+
 /**
  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
  * submission to the multi buffer library for processing.
@@ -708,7 +818,7 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 {
 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
 	struct aesni_mb_session *session;
-	uint16_t m_offset = 0;
+	uint32_t m_offset, oop;
 
 	session = get_session(qp, op);
 	if (session == NULL) {
@@ -760,8 +870,16 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 		break;
 
 	case AES_GMAC:
-		job->u.GCM.aad = op->sym->aead.aad.data;
-		job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		if (session->cipher.mode == GCM) {
+			job->u.GCM.aad = op->sym->aead.aad.data;
+			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		} else {
+			/* For GMAC */
+			job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
+					uint8_t *, op->sym->auth.data.offset);
+			job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
+			job->cipher_mode = GCM;
+		}
 		job->aes_enc_key_expanded = &session->cipher.gcm_key;
 		job->aes_dec_key_expanded = &session->cipher.gcm_key;
 		break;
@@ -783,37 +901,34 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 		}
 	}
 
-	/* Mutable crypto operation parameters */
-	if (op->sym->m_dst) {
-		m_src = m_dst = op->sym->m_dst;
-
-		/* append space for output data to mbuf */
-		char *odata = rte_pktmbuf_append(m_dst,
-				rte_pktmbuf_data_len(op->sym->m_src));
-		if (odata == NULL) {
-			AESNI_MB_LOG(ERR, "failed to allocate space in destination "
-					"mbuf for source data");
-			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			return -1;
-		}
-
-		memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
-				rte_pktmbuf_data_len(op->sym->m_src));
-	} else {
+	if (!op->sym->m_dst) {
+		/* in-place operation */
 		m_dst = m_src;
-		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
-			m_offset = op->sym->aead.data.offset;
-		else
-			m_offset = op->sym->cipher.data.offset;
+		oop = 0;
+	} else if (op->sym->m_dst == op->sym->m_src) {
+		/* in-place operation */
+		m_dst = m_src;
+		oop = 0;
+	} else {
+		/* out-of-place operation */
+		m_dst = op->sym->m_dst;
+		oop = 1;
 	}
 
+	if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
+			session->cipher.mode == GCM))
+		m_offset = op->sym->aead.data.offset;
+	else
+		m_offset = op->sym->cipher.data.offset;
+
 	/* Set digest output location */
 	if (job->hash_alg != NULL_HASH &&
 			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
 		job->auth_tag_output = qp->temp_digests[*digest_idx];
 		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
 	} else {
-		if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
+		if (job->hash_alg == AES_CCM || (job->hash_alg == AES_GMAC &&
+				session->cipher.mode == GCM))
 			job->auth_tag_output = op->sym->aead.digest.data;
 		else
 			job->auth_tag_output = op->sym->auth.digest.data;
@@ -834,7 +949,7 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 	/* Set IV parameters */
 	job->iv_len_in_bytes = session->iv.length;
 
-	/* Data  Parameter */
+	/* Data Parameters */
 	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
 	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
 
@@ -851,11 +966,24 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 		break;
 
 	case AES_GMAC:
-		job->cipher_start_src_offset_in_bytes =
-				op->sym->aead.data.offset;
-		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
-		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
-		job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+		if (session->cipher.mode == GCM) {
+			job->cipher_start_src_offset_in_bytes =
+					op->sym->aead.data.offset;
+			job->hash_start_src_offset_in_bytes =
+					op->sym->aead.data.offset;
+			job->msg_len_to_cipher_in_bytes =
+					op->sym->aead.data.length;
+			job->msg_len_to_hash_in_bytes =
+					op->sym->aead.data.length;
+		} else {
+			job->cipher_start_src_offset_in_bytes =
+					op->sym->auth.data.offset;
+			job->hash_start_src_offset_in_bytes =
+					op->sym->auth.data.offset;
+			job->msg_len_to_cipher_in_bytes = 0;
+			job->msg_len_to_hash_in_bytes = 0;
+		}
+
 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->iv.offset);
 		break;
@@ -865,7 +993,8 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 				op->sym->cipher.data.offset;
 		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
 
-		job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+				session, oop);
 		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
 
 		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -879,26 +1008,18 @@  set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
 }
 
 static inline void
-verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
-		struct aesni_mb_session *sess)
+verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
 {
 	/* Verify digest if required */
-	if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
-		if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
-				sess->auth.req_digest_len) != 0)
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
-				sess->auth.req_digest_len) != 0)
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	}
+	if (memcmp(job->auth_tag_output, digest, len) != 0)
+		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 }
 
 static inline void
 generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
 		struct aesni_mb_session *sess)
 {
-	/* No extra copy neeed */
+	/* No extra copy needed */
 	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
 		return;
 
@@ -933,13 +1054,24 @@  post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 		case STS_COMPLETED:
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
-			if (job->hash_alg != NULL_HASH) {
-				if (sess->auth.operation ==
-						RTE_CRYPTO_AUTH_OP_VERIFY)
-					verify_digest(job, op, sess);
+			if (job->hash_alg == NULL_HASH)
+				break;
+
+			if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+				if (job->hash_alg == AES_CCM ||
+					(job->hash_alg == AES_GMAC &&
+						sess->cipher.mode == GCM))
+					verify_digest(job,
+						op->sym->aead.digest.data,
+						sess->auth.req_digest_len,
+						&op->status);
 				else
-					generate_digest(job, op, sess);
-			}
+					verify_digest(job,
+						op->sym->auth.digest.data,
+						sess->auth.req_digest_len,
+						&op->status);
+			} else
+				generate_digest(job, op, sess);
 			break;
 		default:
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -989,7 +1121,7 @@  handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
 		if (processed_jobs == nb_ops)
 			break;
 
-		job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr);
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
 	}
 
 	return processed_jobs;
@@ -1002,7 +1134,7 @@  flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
 	int processed_ops = 0;
 
 	/* Flush the remaining jobs */
-	JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr);
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
 
 	if (job)
 		processed_ops += handle_completed_jobs(qp, job,
@@ -1042,7 +1174,7 @@  aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 	uint8_t digest_idx = qp->digest_idx;
 	do {
 		/* Get next free mb job struct from mb manager */
-		job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
 		if (unlikely(job == NULL)) {
 			/* if no free mb job structs we need to flush mb_mgr */
 			processed_jobs += flush_mb_mgr(qp,
@@ -1052,7 +1184,7 @@  aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 			if (nb_ops == processed_jobs)
 				break;
 
-			job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
 		}
 
 		/*
@@ -1072,8 +1204,11 @@  aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		}
 
 		/* Submit job to multi-buffer for processing */
-		job = (*qp->op_fns->job.submit)(qp->mb_mgr);
-
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
 		/*
 		 * If submit returns a processed job then handle it,
 		 * before submitting subsequent jobs
@@ -1105,12 +1240,7 @@  cryptodev_aesni_mb_create(const char *name,
 	struct rte_cryptodev *dev;
 	struct aesni_mb_private *internals;
 	enum aesni_mb_vector_mode vector_mode;
-
-	/* Check CPU for support for AES instruction set */
-	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
-		AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
-		return -EFAULT;
-	}
+	MB_MGR *mb_mgr;
 
 	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
 	if (dev == NULL) {
@@ -1137,23 +1267,38 @@  cryptodev_aesni_mb_create(const char *name,
 
 	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_CPU_AESNI;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+	/* Check CPU for support for AES instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
+	else
+		AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
+
+	mb_mgr = alloc_mb_mgr(0);
+	if (mb_mgr == NULL)
+		return -ENOMEM;
 
 	switch (vector_mode) {
 	case RTE_AESNI_MB_SSE:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX2:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_MB_AVX512:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
-		break;
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+		goto error_exit;
 	}
 
 	/* Set vector instructions mode supported */
@@ -1161,15 +1306,19 @@  cryptodev_aesni_mb_create(const char *name,
 
 	internals->vector_mode = vector_mode;
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+	internals->mb_mgr = mb_mgr;
 
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
 			imb_get_version_str());
-#else
-	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
-#endif
 
 	return 0;
+error_exit:
+	if (mb_mgr)
+		free_mb_mgr(mb_mgr);
+
+	rte_cryptodev_pmd_destroy(dev);
+
+	return -1;
 }
 
 static int
@@ -1204,6 +1353,7 @@  static int
 cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_cryptodev *cryptodev;
+	struct aesni_mb_private *internals;
 	const char *name;
 
 	name = rte_vdev_device_name(vdev);
@@ -1214,6 +1364,10 @@  cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
 	if (cryptodev == NULL)
 		return -ENODEV;
 
+	internals = cryptodev->data->dev_private;
+
+	free_mb_mgr(internals->mb_mgr);
+
 	return rte_cryptodev_pmd_destroy(cryptodev);
 }
 
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index f3eff2685..e58dfa33b 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -25,15 +25,9 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 16,
 					.increment = 1
-#else
-					.min = 12,
-					.max = 12,
-					.increment = 0
-#endif
 				},
 				.iv_size = { 0 }
 			}, }
@@ -48,23 +42,34 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 				.block_size = 64,
 				.key_size = {
 					.min = 1,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.max = 65535,
-#else
-					.max = 64,
-#endif
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 20,
 					.increment = 1
-#else
-					.min = 12,
-					.max = 12,
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
 					.increment = 0
-#endif
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
 				},
 				.iv_size = { 0 }
 			}, }
@@ -79,23 +84,34 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 				.block_size = 64,
 				.key_size = {
 					.min = 1,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.max = 65535,
-#else
-					.max = 64,
-#endif
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 28,
 					.increment = 1
-#else
-					.min = 14,
-					.max = 14,
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
 					.increment = 0
-#endif
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
 				},
 				.iv_size = { 0 }
 			}, }
@@ -110,23 +126,34 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 				.block_size = 64,
 				.key_size = {
 					.min = 1,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.max = 65535,
-#else
-					.max = 64,
-#endif
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 32,
 					.increment = 1
-#else
-					.min = 16,
-					.max = 16,
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
 					.increment = 0
-#endif
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
 				},
 				.iv_size = { 0 }
 			}, }
@@ -141,23 +168,34 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 				.block_size = 128,
 				.key_size = {
 					.min = 1,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.max = 65535,
-#else
-					.max = 128,
-#endif
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 48,
 					.increment = 1
-#else
-					.min = 24,
-					.max = 24,
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384,
+				.block_size = 128,
+				.key_size = {
+					.min = 0,
+					.max = 0,
 					.increment = 0
-#endif
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
 				},
 				.iv_size = { 0 }
 			}, }
@@ -172,23 +210,34 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 				.block_size = 128,
 				.key_size = {
 					.min = 1,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.max = 65535,
-#else
-					.max = 128,
-#endif
 					.increment = 1
 				},
 				.digest_size = {
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 					.min = 1,
 					.max = 64,
 					.increment = 1
-#else
-					.min = 32,
-					.max = 32,
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512,
+				.block_size = 128,
+				.key_size = {
+					.min = 0,
+					.max = 0,
 					.increment = 0
-#endif
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
 				},
 				.iv_size = { 0 }
 			}, }
@@ -416,6 +465,31 @@  static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -595,7 +669,28 @@  aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 		goto qp_setup_cleanup;
 	}
 
-	qp->op_fns = &job_ops[internals->vector_mode];
+	switch (internals->vector_mode) {
+	case RTE_AESNI_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		init_mb_mgr_sse(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		init_mb_mgr_avx(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		init_mb_mgr_avx2(qp->mb_mgr);
+		break;
+	case RTE_AESNI_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		init_mb_mgr_avx512(qp->mb_mgr);
+		break;
+	default:
+		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
+				internals->vector_mode);
+		goto qp_setup_cleanup;
+	}
 
 	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
 			qp_conf->nb_descriptors, socket_id);
@@ -613,13 +708,11 @@  aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
 				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
 
-	/* Initialise multi-buffer manager */
-	(*qp->op_fns->job.init_mgr)(qp->mb_mgr);
 	return 0;
 
 qp_setup_cleanup:
 	if (qp) {
-		if (qp->mb_mgr == NULL)
+		if (qp->mb_mgr)
 			free_mb_mgr(qp->mb_mgr);
 		rte_free(qp);
 	}
@@ -663,7 +756,7 @@  aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
 		return -ENOMEM;
 	}
 
-	ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
 			sess_private_data, xform);
 	if (ret != 0) {
 		AESNI_MB_LOG(ERR, "failed configure session parameters");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index d8021cdaa..6a5df942c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -25,6 +25,7 @@  int aesni_mb_logtype_driver;
 /* Maximum length for digest */
 #define DIGEST_LENGTH_MAX 64
 static const unsigned auth_blocksize[] = {
+		[NULL_HASH]	= 0,
 		[MD5]		= 64,
 		[SHA1]		= 64,
 		[SHA_224]	= 64,
@@ -33,6 +34,13 @@  static const unsigned auth_blocksize[] = {
 		[SHA_512]	= 128,
 		[AES_XCBC]	= 16,
 		[AES_CCM]	= 16,
+		[AES_CMAC]	= 16,
+		[AES_GMAC]	= 16,
+		[PLAIN_SHA1]	= 64,
+		[PLAIN_SHA_224]	= 64,
+		[PLAIN_SHA_256]	= 64,
+		[PLAIN_SHA_384]	= 128,
+		[PLAIN_SHA_512]	= 128
 };
 
 /**
@@ -57,7 +65,13 @@  static const unsigned auth_truncated_digest_byte_lengths[] = {
 		[AES_XCBC]	= 12,
 		[AES_CMAC]	= 12,
 		[AES_CCM]	= 8,
-		[NULL_HASH]	= 0
+		[NULL_HASH]	= 0,
+		[AES_GMAC]	= 16,
+		[PLAIN_SHA1]	= 20,
+		[PLAIN_SHA_224]	= 28,
+		[PLAIN_SHA_256]	= 32,
+		[PLAIN_SHA_384]	= 48,
+		[PLAIN_SHA_512]	= 64
 };
 
 /**
@@ -82,8 +96,14 @@  static const unsigned auth_digest_byte_lengths[] = {
 		[SHA_512]	= 64,
 		[AES_XCBC]	= 16,
 		[AES_CMAC]	= 16,
+		[AES_CCM]	= 16,
 		[AES_GMAC]	= 12,
-		[NULL_HASH]		= 0
+		[NULL_HASH]	= 0,
+		[PLAIN_SHA1]	= 20,
+		[PLAIN_SHA_224]	= 28,
+		[PLAIN_SHA_256]	= 32,
+		[PLAIN_SHA_384]	= 48,
+		[PLAIN_SHA_512]	= 64
 };
 
 /**
@@ -115,6 +135,8 @@  struct aesni_mb_private {
 	/**< CPU vector instruction set mode */
 	unsigned max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	MB_MGR *mb_mgr;
+	/**< Multi-buffer instance */
 };
 
 /** AESNI Multi buffer queue pair */
@@ -123,8 +145,6 @@  struct aesni_mb_qp {
 	/**< Queue Pair Identifier */
 	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	/**< Unique Queue Pair Name */
-	const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
 	MB_MGR *mb_mgr;
 	/**< Multi-buffer instance */
 	struct rte_ring *ingress_queue;
@@ -238,7 +258,7 @@  struct aesni_mb_session {
  *
  */
 extern int
-aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
 		struct aesni_mb_session *sess,
 		const struct rte_crypto_sym_xform *xform);