[v3] crypto/qat: use intel-ipsec-mb for partial hash & aes

Message ID 20220525165218.10926-1-kai.ji@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series [v3] crypto/qat: use intel-ipsec-mb for partial hash & aes |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/github-robot: build success github build: passed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-abi-testing success Testing PASS

Commit Message

Ji, Kai May 25, 2022, 4:52 p.m. UTC
  Since openssl 3.0 now deprecates the low level API QAT required to
perform partial hash & aes operation when creating the session. This
patch add in qat_ipsec_mb_lib driver parameter to allow QAT PMD to
switch APIs between openssl and intel ipsec-mb library.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 doc/guides/cryptodevs/qat.rst        |  13 +
 drivers/common/qat/meson.build       |   7 +
 drivers/common/qat/qat_device.c      |   1 +
 drivers/common/qat/qat_device.h      |   1 +
 drivers/crypto/qat/qat_sym.c         |   3 +
 drivers/crypto/qat/qat_sym_session.c | 507 ++++++++++++++++++++++++---
 6 files changed, 489 insertions(+), 43 deletions(-)
  

Comments

Fan Zhang May 26, 2022, 8:42 a.m. UTC | #1
Hi Kai,

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Wednesday, May 25, 2022 5:52 PM
> To: dev@dpdk.org
> Cc: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v3] crypto/qat: use intel-ipsec-mb for partial hash & aes
> 
> Since openssl 3.0 now deprecates the low level API QAT required to
> perform partial hash & aes operation when creating the session. This
> patch add in qat_ipsec_mb_lib driver parameter to allow QAT PMD to
> switch APIs between openssl and intel ipsec-mb library.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---

<snip>

> +static int qat_sym_do_precomputes_ipsec_mb(enum
> icp_qat_hw_auth_algo hash_alg,
> +				const uint8_t *auth_key,
> +				uint16_t auth_keylen,
> +				uint8_t *p_state_buf,
> +				uint16_t *p_state_len,
> +				uint8_t aes_cmac)
> +{
> +	int block_size;
> +	uint8_t
> ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> +	uint8_t
> opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> +	int i;
> +
> +	IMB_MGR *m;
> +	m = alloc_mb_mgr(0);
> +	if (m == NULL)
> +		return -ENOMEM;
> +
> +	init_mb_mgr_auto(m, NULL);
> +
> +	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
> +
> +		/* CMAC */
> +		if (aes_cmac) {
> +			uint8_t *in = NULL;
> +			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
> +			uint8_t *k1, *k2;
> +
> +			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
> +
> +			in = rte_zmalloc("AES CMAC K1",
> +					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
> +
> +			if (in == NULL) {
> +				QAT_LOG(ERR, "Failed to alloc memory");
> +				return -ENOMEM;
> +			}
> +
> +			rte_memcpy(in, AES_CMAC_SEED,
> +				   ICP_QAT_HW_AES_128_KEY_SZ);
> +			rte_memcpy(p_state_buf, auth_key, auth_keylen);
> +
> +			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> +			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> +			IMB_AES_KEYEXP_128(m, p_state_buf, expkey,
> dust);
> +			k1 = p_state_buf +
> ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> +			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> +
> +			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1,
> k2);
> +			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
> +			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> +			rte_free(in);
> +			free_mb_mgr(m);
> +			return 0;
> +		}
> +
> +		static uint8_t qat_aes_xcbc_key_seed[
> +				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ]
> = {
> +			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +		};
> +
> +		uint8_t *in = NULL;
> +		uint8_t *out = p_state_buf;
> +		int x;
> +
> +		in = rte_zmalloc("working mem for key",
> +				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ,
> 16);
> +		if (in == NULL) {
> +			QAT_LOG(ERR, "Failed to alloc memory");
> +			return -ENOMEM;
> +		}
> +
> +		rte_memcpy(in, qat_aes_xcbc_key_seed,
> +				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +		for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
> +			if (aes_ipsecmb_job(in, out, m, auth_key,
> auth_keylen)) {
> +				rte_free(in -
> +				  (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
> +				memset(out -
> +				   (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> +				  0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +				return -EFAULT;
> +			}
> +
> +			in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> +			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> +		}
> +		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> +		rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
> +		free_mb_mgr(m);
> +		return 0;
> +
> +	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
> +		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
> +		uint8_t *in = NULL;
> +		uint8_t *out = p_state_buf;
> +
> +		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
> +				ICP_QAT_HW_GALOIS_LEN_A_SZ +
> +				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
> +		in = rte_zmalloc("working mem for key",
> +				ICP_QAT_HW_GALOIS_H_SZ, 16);
> +		if (in == NULL) {
> +			QAT_LOG(ERR, "Failed to alloc memory");
> +			return -ENOMEM;
> +		}
> +
> +		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
> +		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen))
> +			return -EFAULT;
> +
> +		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
> +				ICP_QAT_HW_GALOIS_LEN_A_SZ +
> +				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
> +		rte_free(in);
> +		free_mb_mgr(m);
> +		return 0;
> +	}
> +

From this point on, mb_mgr is not freed either error happens or the function exists
Normally.
 
> +	block_size = qat_hash_get_block_size(hash_alg);
> +	if (block_size < 0)
> +		return block_size;
> +	/* init ipad and opad from key and xor with fixed values */
> +	memset(ipad, 0, block_size);
> +	memset(opad, 0, block_size);
> +
> +	if (auth_keylen > (unsigned int)block_size) {
> +		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
> +		return -EFAULT;
> +	}
> +	rte_memcpy(ipad, auth_key, auth_keylen);
> +	rte_memcpy(opad, auth_key, auth_keylen);
> +
> +	for (i = 0; i < block_size; i++) {
> +		uint8_t *ipad_ptr = ipad + i;
> +		uint8_t *opad_ptr = opad + i;
> +		*ipad_ptr ^= HMAC_IPAD_VALUE;
> +		*opad_ptr ^= HMAC_OPAD_VALUE;
> +	}
> +
> +	/* do partial hash of ipad and copy to state1 */
> +	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf)) {
> +		memset(ipad, 0, block_size);
> +		memset(opad, 0, block_size);
> +		QAT_LOG(ERR, "ipad precompute failed");
> +		return -EFAULT;
> +	}
> +
> +	/*
> +	 * State len is a multiple of 8, so may be larger than the digest.
> +	 * Put the partial hash of opad state_len bytes after state1
> +	 */
> +	*p_state_len = qat_hash_get_state1_size(hash_alg);
> +	if (partial_hash_compute_ipsec_mb(hash_alg, opad, p_state_buf +
> *p_state_len)) {
> +		memset(ipad, 0, block_size);
> +		memset(opad, 0, block_size);
> +		QAT_LOG(ERR, "opad precompute failed");
> +		return -EFAULT;
> +	}
> +
> +	/*  don't leave data lying around */
> +	memset(ipad, 0, block_size);
> +	memset(opad, 0, block_size);
> +	return 0;
> +}
  

Patch

diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 785e041324..d92409b77e 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -287,6 +287,19 @@  by comma. When the same parameter is used more than once first occurrence of the
 is used.
 Maximum threshold that can be set is 32.
 
+Running QAT PMD with Intel IPSEC MB library for symmetric precomputes function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The QAT PMD use Openssl library for partial hash calculation in symmetirc precomputes function by
+default, the following parameter is allow QAT PMD switch over to multi-buffer job API if Intel
+IPSEC MB library installed on system.
+
+- qat_ipsec_mb_lib
+
+To use this feature the user must set the parameter on process start as a device additional parameter::
+
+  -a 03:01.1,qat_ipsec_mb_lib=1
+
 
 Device and driver naming
 ~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b7027f3164..bba75b5194 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -35,6 +35,13 @@  if qat_crypto and not libcrypto.found()
             'missing dependency, libcrypto')
 endif
 
+IMB_required_ver = '1.0.0'
+libipsecmb = cc.find_library('IPSec_MB', required: false)
+if libipsecmb.found()
+    ext_deps += libipsecmb
+    dpdk_conf.set('RTE_QAT_LIBIPSECMB', true)
+endif
+
 # The driver should not build if both compression and crypto are disabled
 #FIXME common code depends on compression files so check only compress!
 if not qat_compress # and not qat_crypto
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 6824d97050..db4b087d2b 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -364,6 +364,7 @@  static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	struct qat_pci_device *qat_pci_dev;
 	struct qat_dev_hw_spec_funcs *ops_hw;
 	struct qat_dev_cmd_param qat_dev_cmd_param[] = {
+			{ QAT_IPSEC_MB_LIB, 0 },
 			{ SYM_ENQ_THRESHOLD_NAME, 0 },
 			{ ASYM_ENQ_THRESHOLD_NAME, 0 },
 			{ COMP_ENQ_THRESHOLD_NAME, 0 },
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
index 85fae7b7c7..e1a32a7e87 100644
--- a/drivers/common/qat/qat_device.h
+++ b/drivers/common/qat/qat_device.h
@@ -16,6 +16,7 @@ 
 
 #define QAT_DEV_NAME_MAX_LEN	64
 
+#define QAT_IPSEC_MB_LIB "qat_ipsec_mb_lib"
 #define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
 #define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold"
 #define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index ca8c9a8124..3477cd89ad 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -15,6 +15,7 @@ 
 #include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
+int qat_ipsec_mb_lib;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
@@ -307,6 +308,8 @@  qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
 			internals->min_enq_burst_threshold =
 					qat_dev_cmd_param[i].val;
+		if (!strcmp(qat_dev_cmd_param[i].name, QAT_IPSEC_MB_LIB))
+			qat_ipsec_mb_lib = qat_dev_cmd_param[i].val;
 		i++;
 	}
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9d6a19c0be..fd3f38167d 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -7,6 +7,10 @@ 
 #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
 #include <openssl/evp.h>	/* Needed for bpi runt block processing */
 
+#ifdef RTE_QAT_LIBIPSECMB
+#include <intel-ipsec-mb.h>
+#endif
+
 #include <rte_memcpy.h>
 #include <rte_common.h>
 #include <rte_spinlock.h>
@@ -22,6 +26,12 @@ 
 #include "qat_sym_session.h"
 #include "qat_sym.h"
 
+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
+#include <openssl/provider.h>
+#endif
+
+extern int qat_ipsec_mb_lib;
+
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
 	0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
@@ -470,6 +480,21 @@  qat_sym_session_configure(struct rte_cryptodev *dev,
 		return -ENOMEM;
 	}
 
+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
+	OSSL_PROVIDER * legacy;
+	OSSL_PROVIDER *deflt;
+
+	/* Load Multiple providers into the default (NULL) library context */
+	legacy = OSSL_PROVIDER_load(NULL, "legacy");
+	if (legacy == NULL)
+		return -EINVAL;
+
+	deflt = OSSL_PROVIDER_load(NULL, "default");
+	if (deflt == NULL) {
+		OSSL_PROVIDER_unload(legacy);
+		return  -EINVAL;
+	}
+#endif
 	ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
 	if (ret != 0) {
 		QAT_LOG(ERR,
@@ -483,6 +508,10 @@  qat_sym_session_configure(struct rte_cryptodev *dev,
 	set_sym_session_private_data(sess, dev->driver_id,
 		sess_private_data);
 
+# if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
+	OSSL_PROVIDER_unload(legacy);
+	OSSL_PROVIDER_unload(deflt);
+# endif
 	return 0;
 }
 
@@ -1057,6 +1086,297 @@  static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	return -EFAULT;
 }
 
+#define HMAC_IPAD_VALUE	0x36
+#define HMAC_OPAD_VALUE	0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+
+#ifdef RTE_QAT_LIBIPSECMB
+static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
+		const uint8_t *key, uint16_t auth_keylen)
+{
+	int err;
+	struct IMB_JOB *job;
+	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+
+	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
+		IMB_AES_KEYEXP_128(m, key, expkey, dust);
+	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
+		IMB_AES_KEYEXP_192(m, key, expkey, dust);
+	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
+		IMB_AES_KEYEXP_256(m, key, expkey, dust);
+	else
+		return -EFAULT;
+
+	job = IMB_GET_NEXT_JOB(m);
+
+	job->src = in;
+	job->dst = out;
+	job->enc_keys = expkey;
+	job->key_len_in_bytes = auth_keylen;
+	job->msg_len_to_cipher_in_bytes = 16;
+	job->iv_len_in_bytes = 0;
+	job->cipher_direction = IMB_DIR_ENCRYPT;
+	job->cipher_mode = IMB_CIPHER_ECB;
+	job->hash_alg = IMB_AUTH_NULL;
+
+	while (IMB_FLUSH_JOB(m) != NULL)
+		;
+
+	job = IMB_SUBMIT_JOB(m);
+	if (job) {
+		if (job->status == IMB_STATUS_COMPLETED)
+			return 0;
+	}
+
+	err = imb_get_errno(m);
+	if (err)
+		QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
+
+	return -EFAULT;
+}
+
+static int
+partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
+{
+	int digest_size;
+	uint8_t digest[qat_hash_get_digest_size(
+			ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+	uint32_t *hash_state_out_be32;
+	uint64_t *hash_state_out_be64;
+	int i;
+	IMB_MGR *m;
+
+	m = alloc_mb_mgr(0);
+	if (m == NULL)
+		return -ENOMEM;
+	init_mb_mgr_auto(m, NULL);
+
+	/* Initialize to avoid gcc warning */
+	memset(digest, 0, sizeof(digest));
+
+	digest_size = qat_hash_get_digest_size(hash_alg);
+	if (digest_size <= 0)
+		return -EFAULT;
+
+	hash_state_out_be32 = (uint32_t *)data_out;
+	hash_state_out_be64 = (uint64_t *)data_out;
+
+	switch (hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+			*hash_state_out_be32 =
+				rte_bswap32(*(((uint32_t *)digest)+i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+			*hash_state_out_be32 =
+				rte_bswap32(*(((uint32_t *)digest)+i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+			*hash_state_out_be32 =
+				rte_bswap32(*(((uint32_t *)digest)+i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA384:
+		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
+		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+			*hash_state_out_be64 =
+				rte_bswap64(*(((uint64_t *)digest)+i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
+		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+			*hash_state_out_be64 =
+				rte_bswap64(*(((uint64_t *)digest)+i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_MD5:
+		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
+		break;
+	default:
+		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
+		return -EFAULT;
+	}
+	free_mb_mgr(m);
+	return 0;
+}
+
+static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
+				const uint8_t *auth_key,
+				uint16_t auth_keylen,
+				uint8_t *p_state_buf,
+				uint16_t *p_state_len,
+				uint8_t aes_cmac)
+{
+	int block_size;
+	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+	int i;
+
+	IMB_MGR *m;
+	m = alloc_mb_mgr(0);
+	if (m == NULL)
+		return -ENOMEM;
+
+	init_mb_mgr_auto(m, NULL);
+
+	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+
+		/* CMAC */
+		if (aes_cmac) {
+			uint8_t *in = NULL;
+			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
+			uint8_t *k1, *k2;
+
+			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
+
+			in = rte_zmalloc("AES CMAC K1",
+					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
+
+			if (in == NULL) {
+				QAT_LOG(ERR, "Failed to alloc memory");
+				return -ENOMEM;
+			}
+
+			rte_memcpy(in, AES_CMAC_SEED,
+				   ICP_QAT_HW_AES_128_KEY_SZ);
+			rte_memcpy(p_state_buf, auth_key, auth_keylen);
+
+			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
+			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
+			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+			rte_free(in);
+			free_mb_mgr(m);
+			return 0;
+		}
+
+		static uint8_t qat_aes_xcbc_key_seed[
+				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+		};
+
+		uint8_t *in = NULL;
+		uint8_t *out = p_state_buf;
+		int x;
+
+		in = rte_zmalloc("working mem for key",
+				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+		if (in == NULL) {
+			QAT_LOG(ERR, "Failed to alloc memory");
+			return -ENOMEM;
+		}
+
+		rte_memcpy(in, qat_aes_xcbc_key_seed,
+				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+		for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+			if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
+				rte_free(in -
+				  (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+				memset(out -
+				   (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+				  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+				return -EFAULT;
+			}
+
+			in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+		}
+		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+		rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+		free_mb_mgr(m);
+		return 0;
+
+	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+		uint8_t *in = NULL;
+		uint8_t *out = p_state_buf;
+
+		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+				ICP_QAT_HW_GALOIS_LEN_A_SZ +
+				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+		in = rte_zmalloc("working mem for key",
+				ICP_QAT_HW_GALOIS_H_SZ, 16);
+		if (in == NULL) {
+			QAT_LOG(ERR, "Failed to alloc memory");
+			return -ENOMEM;
+		}
+
+		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen))
+			return -EFAULT;
+
+		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+				ICP_QAT_HW_GALOIS_LEN_A_SZ +
+				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+		rte_free(in);
+		free_mb_mgr(m);
+		return 0;
+	}
+
+	block_size = qat_hash_get_block_size(hash_alg);
+	if (block_size < 0)
+		return block_size;
+	/* init ipad and opad from key and xor with fixed values */
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
+
+	if (auth_keylen > (unsigned int)block_size) {
+		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
+		return -EFAULT;
+	}
+	rte_memcpy(ipad, auth_key, auth_keylen);
+	rte_memcpy(opad, auth_key, auth_keylen);
+
+	for (i = 0; i < block_size; i++) {
+		uint8_t *ipad_ptr = ipad + i;
+		uint8_t *opad_ptr = opad + i;
+		*ipad_ptr ^= HMAC_IPAD_VALUE;
+		*opad_ptr ^= HMAC_OPAD_VALUE;
+	}
+
+	/* do partial hash of ipad and copy to state1 */
+	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf)) {
+		memset(ipad, 0, block_size);
+		memset(opad, 0, block_size);
+		QAT_LOG(ERR, "ipad precompute failed");
+		return -EFAULT;
+	}
+
+	/*
+	 * State len is a multiple of 8, so may be larger than the digest.
+	 * Put the partial hash of opad state_len bytes after state1
+	 */
+	*p_state_len = qat_hash_get_state1_size(hash_alg);
+	if (partial_hash_compute_ipsec_mb(hash_alg, opad, p_state_buf + *p_state_len)) {
+		memset(ipad, 0, block_size);
+		memset(opad, 0, block_size);
+		QAT_LOG(ERR, "opad precompute failed");
+		return -EFAULT;
+	}
+
+	/*  don't leave data lying around */
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
+	return 0;
+}
+#endif
 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 {
 	SHA_CTX ctx;
@@ -1124,6 +1444,20 @@  static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
+{
+	int i;
+
+	derived[0] = base[0] << 1;
+	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
+		derived[i] = base[i] << 1;
+		derived[i - 1] |= base[i] >> 7;
+	}
+
+	if (base[0] & 0x80)
+		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
+}
+
 static int
 partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 		uint8_t *data_in, uint8_t *data_out)
@@ -1192,25 +1526,6 @@  partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 
 	return 0;
 }
-#define HMAC_IPAD_VALUE	0x36
-#define HMAC_OPAD_VALUE	0x5c
-#define HASH_XCBC_PRECOMP_KEY_NUM 3
-
-static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
-
-static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
-{
-	int i;
-
-	derived[0] = base[0] << 1;
-	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
-		derived[i] = base[i] << 1;
-		derived[i - 1] |= base[i] >> 7;
-	}
-
-	if (base[0] & 0x80)
-		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
-}
 
 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
 				const uint8_t *auth_key,
@@ -1695,6 +2010,7 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 	uint32_t *aad_len = NULL;
 	uint32_t wordIndex  = 0;
 	uint32_t *pTempKey;
+	int ret = 0;
 
 	if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
 		ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
@@ -1766,9 +2082,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-1 HMAC */
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
-			authkeylen, cdesc->cd_cur_ptr, &state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
 		}
@@ -1784,9 +2113,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-224 HMAC */
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
-			authkeylen, cdesc->cd_cur_ptr, &state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
 		}
@@ -1802,9 +2144,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-256 HMAC */
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
-			authkeylen, cdesc->cd_cur_ptr,	&state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
 		}
@@ -1820,9 +2175,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-384 HMAC */
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
-			authkeylen, cdesc->cd_cur_ptr, &state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
 		}
@@ -1838,9 +2206,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-512 HMAC */
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
-			authkeylen, cdesc->cd_cur_ptr,	&state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
 		}
@@ -1851,9 +2232,23 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 
 		if (cdesc->aes_cmac)
 			memset(cdesc->cd_cur_ptr, 0, state1_size);
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
-			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
-			&state2_size, cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(
+				ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+				authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+				&state2_size, cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+				authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+				&state2_size, cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			cdesc->aes_cmac ? QAT_LOG(ERR,
 						  "(CMAC)precompute failed")
 					: QAT_LOG(ERR,
@@ -1865,9 +2260,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
 		state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
-		if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
-			authkeylen, cdesc->cd_cur_ptr + state1_size,
-			&state2_size, cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
+				authkeylen, cdesc->cd_cur_ptr + state1_size,
+				&state2_size, cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
+				authkeylen, cdesc->cd_cur_ptr + state1_size,
+				&state2_size, cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(GCM)precompute failed");
 			return -EFAULT;
 		}
@@ -1923,9 +2331,22 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
-		if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
-			authkeylen, cdesc->cd_cur_ptr, &state1_size,
-			cdesc->aes_cmac)) {
+		if (qat_ipsec_mb_lib) {
+#ifdef RTE_QAT_LIBIPSECMB
+			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
+				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+#else
+			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing");
+			return -EFAULT;
+#endif
+		} else {
+			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
+				authkeylen, cdesc->cd_cur_ptr, &state1_size,
+				cdesc->aes_cmac);
+		}
+
+		if (ret) {
 			QAT_LOG(ERR, "(MD5)precompute failed");
 			return -EFAULT;
 		}