[dpdk-dev,v6,1/2] cryptodev: API tidy and changes to support future extensions

Message ID 1457116189-10226-2-git-send-email-fiona.trahe@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Fiona Trahe March 4, 2016, 6:29 p.m. UTC
This patch splits symmetric specific definitions and
 functions away from the common crypto APIs to facilitate the future extension
 and expansion of the cryptodev framework, in order to allow  asymmetric
 crypto operations to be introduced at a later date, as well as to clean the
 logical structure of the public includes. The patch also introduces the _sym
 prefix to symmetric specific structure and functions to improve clarity in
 the API.

Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
 app/test/test_cryptodev.c                          | 164 +++---
 app/test/test_cryptodev_perf.c                     |  79 +--
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         |  44 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |   6 +-
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   4 +-
 drivers/crypto/qat/qat_crypto.c                    |  51 +-
 drivers/crypto/qat/qat_crypto.h                    |  10 +-
 drivers/crypto/qat/rte_qat_cryptodev.c             |   8 +-
 examples/l2fwd-crypto/main.c                       |  33 +-
 lib/librte_cryptodev/Makefile                      |   1 +
 lib/librte_cryptodev/rte_crypto.h                  | 563 +------------------
 lib/librte_cryptodev/rte_crypto_sym.h              | 613 +++++++++++++++++++++
 lib/librte_cryptodev/rte_cryptodev.c               |  39 +-
 lib/librte_cryptodev/rte_cryptodev.h               |  80 ++-
 lib/librte_cryptodev/rte_cryptodev_pmd.h           |  32 +-
 lib/librte_cryptodev/rte_cryptodev_version.map     |   6 +-
 lib/librte_mbuf_offload/rte_mbuf_offload.h         |  22 +-
 17 files changed, 915 insertions(+), 840 deletions(-)
 create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h
  

Patch

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 62f8fb0..951b443 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -57,13 +57,13 @@  struct crypto_testsuite_params {
 };
 
 struct crypto_unittest_params {
-	struct rte_crypto_xform cipher_xform;
-	struct rte_crypto_xform auth_xform;
+	struct rte_crypto_sym_xform cipher_xform;
+	struct rte_crypto_sym_xform auth_xform;
 
-	struct rte_cryptodev_session *sess;
+	struct rte_cryptodev_sym_session *sess;
 
 	struct rte_mbuf_offload *ol;
-	struct rte_crypto_op *op;
+	struct rte_crypto_sym_op *op;
 
 	struct rte_mbuf *obuf, *ibuf;
 
@@ -78,7 +78,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
 		struct crypto_unittest_params *ut_params);
 
 static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
 		struct crypto_unittest_params *ut_params,
 		struct crypto_testsuite_params *ts_param);
 
@@ -165,7 +165,8 @@  testsuite_setup(void)
 	ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
 			"MBUF_OFFLOAD_POOL",
 			NUM_MBUFS, MBUF_CACHE_SIZE,
-			DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+			DEFAULT_NUM_XFORMS *
+					sizeof(struct rte_crypto_sym_xform),
 			rte_socket_id());
 	if (ts_params->mbuf_ol_pool == NULL) {
 		RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -220,7 +221,7 @@  testsuite_setup(void)
 
 		ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
 		ts_params->conf.socket_id = SOCKET_ID_ANY;
-		ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+		ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
 
 		TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
 				&ts_params->conf),
@@ -275,7 +276,7 @@  ut_setup(void)
 	ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
 	ts_params->conf.socket_id = SOCKET_ID_ANY;
 	ts_params->conf.session_mp.nb_objs =
-			(gbl_cryptodev_type == RTE_CRYPTODEV_QAT_PMD) ?
+			(gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) ?
 					DEFAULT_NUM_OPS_INFLIGHT :
 					DEFAULT_NUM_OPS_INFLIGHT;
 
@@ -319,7 +320,7 @@  ut_teardown(void)
 
 	/* free crypto session structure */
 	if (ut_params->sess) {
-		rte_cryptodev_session_free(ts_params->valid_devs[0],
+		rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
 				ut_params->sess);
 		ut_params->sess = NULL;
 	}
@@ -464,7 +465,7 @@  test_queue_pair_descriptor_setup(void)
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
 
-	ts_params->conf.session_mp.nb_objs = dev_info.max_nb_sessions;
+	ts_params->conf.session_mp.nb_objs = dev_info.sym.max_nb_sessions;
 
 	TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
 			&ts_params->conf), "Failed to configure cryptodev %u",
@@ -766,7 +767,7 @@  test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
 	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -776,7 +777,7 @@  test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
 
 	/* Setup HMAC Parameters */
 
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -786,20 +787,21 @@  test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->cipher_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
 	ut_params->op = &ut_params->ol->op.crypto;
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -862,18 +864,18 @@  test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
 	ut_params->op = &ut_params->ol->op.crypto;
 
-	TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
+	TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(
 			ut_params->ol, 2),
 			"failed to allocate space for crypto transforms");
 
 	/* Set crypto operation data parameters */
-	ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 
 	/* cipher parameters */
 	ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
@@ -882,7 +884,7 @@  test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
 	ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* hash parameters */
-	ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
 
 	ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
 	ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
@@ -958,7 +960,7 @@  test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
 			DIGEST_BYTE_LENGTH_SHA1);
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = NULL;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -967,7 +969,7 @@  test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = &ut_params->cipher_xform;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -977,13 +979,14 @@  test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->auth_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->auth_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -991,7 +994,7 @@  test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1066,7 +1069,7 @@  test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
 	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1075,7 +1078,7 @@  test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1085,13 +1088,14 @@  test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->cipher_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1099,7 +1103,7 @@  test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1168,7 +1172,7 @@  test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
 			DIGEST_BYTE_LENGTH_SHA256);
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = NULL;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1177,7 +1181,7 @@  test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = &ut_params->cipher_xform;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1187,13 +1191,14 @@  test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->auth_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->auth_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1201,7 +1206,7 @@  test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1282,7 +1287,7 @@  test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
 	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1291,7 +1296,7 @@  test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1301,15 +1306,16 @@  test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->cipher_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->cipher_xform);
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1317,7 +1323,7 @@  test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1371,7 +1377,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
 		struct crypto_unittest_params *ut_params);
 
 static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
 		struct crypto_unittest_params *ut_params,
 		struct crypto_testsuite_params *ts_params);
 
@@ -1386,8 +1392,9 @@  test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
 			"Failed to create session params");
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->auth_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->auth_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
@@ -1400,7 +1407,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
 {
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = NULL;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1409,7 +1416,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = &ut_params->cipher_xform;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1422,7 +1429,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
 
 
 static int
-test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
+test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
 		struct crypto_unittest_params *ut_params,
 		struct crypto_testsuite_params *ts_params)
 {
@@ -1442,7 +1449,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1450,7 +1457,7 @@  test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1522,7 +1529,7 @@  test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 	TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1531,7 +1538,7 @@  test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1541,13 +1548,14 @@  test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->cipher_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->cipher_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1555,7 +1563,7 @@  test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->iv.data = (uint8_t *)
 		rte_pktmbuf_prepend(ut_params->ibuf,
@@ -1615,7 +1623,7 @@  test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 			DIGEST_BYTE_LENGTH_AES_XCBC);
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = NULL;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1624,7 +1632,7 @@  test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = &ut_params->cipher_xform;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1634,13 +1642,14 @@  test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
-			&ut_params->auth_xform);
+	ut_params->sess =
+		rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
+						&ut_params->auth_xform);
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1648,7 +1657,7 @@  test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
 			CIPHER_IV_LENGTH_AES_CBC);
@@ -1752,7 +1761,7 @@  test_multi_session(void)
 	struct crypto_unittest_params *ut_params = &unittest_params;
 
 	struct rte_cryptodev_info dev_info;
-	struct rte_cryptodev_session **sessions;
+	struct rte_cryptodev_sym_session **sessions;
 
 	uint16_t i;
 
@@ -1761,12 +1770,13 @@  test_multi_session(void)
 
 	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
 
-	sessions = rte_malloc(NULL, (sizeof(struct rte_cryptodev_session *) *
-			dev_info.max_nb_sessions) + 1, 0);
+	sessions = rte_malloc(NULL,
+			(sizeof(struct rte_cryptodev_sym_session *) *
+			dev_info.sym.max_nb_sessions) + 1, 0);
 
 	/* Create multiple crypto sessions*/
-	for (i = 0; i < dev_info.max_nb_sessions; i++) {
-		sessions[i] = rte_cryptodev_session_create(
+	for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+		sessions[i] = rte_cryptodev_sym_session_create(
 				ts_params->valid_devs[0],
 			&ut_params->auth_xform);
 		TEST_ASSERT_NOT_NULL(sessions[i],
@@ -1781,13 +1791,13 @@  test_multi_session(void)
 	}
 
 	/* Next session create should fail */
-	sessions[i] = rte_cryptodev_session_create(ts_params->valid_devs[0],
+	sessions[i] = rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
 			&ut_params->auth_xform);
 	TEST_ASSERT_NULL(sessions[i],
 			"Session creation succeeded unexpectedly!");
 
-	for (i = 0; i < dev_info.max_nb_sessions; i++)
-		rte_cryptodev_session_free(ts_params->valid_devs[0],
+	for (i = 0; i < dev_info.sym.max_nb_sessions; i++)
+		rte_cryptodev_sym_session_free(ts_params->valid_devs[0],
 				sessions[i]);
 
 	rte_free(sessions);
@@ -1806,7 +1816,7 @@  test_not_in_place_crypto(void)
 
 	/* Create multiple crypto sessions*/
 
-	ut_params->sess = rte_cryptodev_session_create(
+	ut_params->sess = rte_cryptodev_sym_session_create(
 			ts_params->valid_devs[0], &ut_params->auth_xform);
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1828,7 +1838,7 @@  test_not_in_place_crypto(void)
 
 	/* Generate Crypto op data structure */
 	ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
-				RTE_PKTMBUF_OL_CRYPTO);
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 	TEST_ASSERT_NOT_NULL(ut_params->ol,
 			"Failed to allocate pktmbuf offload");
 
@@ -1836,7 +1846,7 @@  test_not_in_place_crypto(void)
 
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+	rte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);
 
 	ut_params->op->digest.data = ut_params->digest;
 	ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1962,7 +1972,7 @@  static struct unit_test_suite cryptodev_aesni_mb_testsuite  = {
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
 {
-	gbl_cryptodev_type = RTE_CRYPTODEV_QAT_PMD;
+	gbl_cryptodev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
 	return unit_test_suite_runner(&cryptodev_qat_testsuite);
 }
 static struct test_command cryptodev_qat_cmd = {
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 728bcf0..42dd9bc 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -63,12 +63,12 @@  struct crypto_testsuite_params {
 #define MAX_NUM_OF_OPS_PER_UT	(128)
 
 struct crypto_unittest_params {
-	struct rte_crypto_xform cipher_xform;
-	struct rte_crypto_xform auth_xform;
+	struct rte_crypto_sym_xform cipher_xform;
+	struct rte_crypto_sym_xform auth_xform;
 
-	struct rte_cryptodev_session *sess;
+	struct rte_cryptodev_sym_session *sess;
 
-	struct rte_crypto_op *op;
+	struct rte_crypto_sym_op *op;
 	struct rte_mbuf_offload *ol;
 
 	struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
@@ -127,7 +127,7 @@  testsuite_setup(void)
 	ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
 				NUM_MBUFS, MBUF_CACHE_SIZE,
 				DEFAULT_NUM_XFORMS *
-				sizeof(struct rte_crypto_xform),
+				sizeof(struct rte_crypto_sym_xform),
 				rte_socket_id());
 		if (ts_params->mbuf_ol_pool == NULL) {
 			RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
@@ -179,7 +179,7 @@  testsuite_setup(void)
 
 	ts_params->conf.nb_queue_pairs = DEFAULT_NUM_QPS_PER_QAT_DEVICE;
 	ts_params->conf.socket_id = SOCKET_ID_ANY;
-	ts_params->conf.session_mp.nb_objs = info.max_nb_sessions;
+	ts_params->conf.session_mp.nb_objs = info.sym.max_nb_sessions;
 
 	TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->dev_id,
 			&ts_params->conf),
@@ -252,7 +252,7 @@  ut_teardown(void)
 
 	/* free crypto session structure */
 	if (ut_params->sess)
-		rte_cryptodev_session_free(ts_params->dev_id,
+		rte_cryptodev_sym_session_free(ts_params->dev_id,
 				ut_params->sess);
 
 	/* free crypto operation structure */
@@ -1713,7 +1713,7 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	}
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1723,7 +1723,7 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
@@ -1733,7 +1733,7 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+	ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
 		&ut_params->cipher_xform);
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1741,7 +1741,7 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 	/* Generate Crypto op data structure(s) */
 	for (b = 0; b < num_to_submit ; b++) {
 		tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
-				(const char *)data_params[0].expected.ciphertext,
+			(const char *)data_params[0].expected.ciphertext,
 				data_params[0].length, 0);
 		TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
 
@@ -1753,12 +1753,12 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 			DIGEST_BYTE_LENGTH_SHA256);
 
 		struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
-				ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
+			ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);
 		TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
 
-		struct rte_crypto_op *cop = &ol->op.crypto;
+		struct rte_crypto_sym_op *cop = &ol->op.crypto;
 
-		rte_crypto_op_attach_session(cop, ut_params->sess);
+		rte_crypto_sym_op_attach_session(cop, ut_params->sess);
 
 		cop->digest.data = ut_params->digest;
 		cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
@@ -1814,7 +1814,8 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 			rte_delay_ms(1);
 
 			start_cycles = rte_rdtsc_precise();
-			burst_received = rte_cryptodev_dequeue_burst(dev_num,
+			burst_received =
+				rte_cryptodev_dequeue_burst(dev_num,
 						0, rx_mbufs, burst_size);
 			if (burst_received == 0)
 				failed_polls++;
@@ -1824,10 +1825,13 @@  test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
 			total_cycles += end_cycles - start_cycles;
 		}
 		while (num_received != num_to_submit) {
-			if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
-				rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+			if (gbl_cryptodev_preftest_devtype ==
+						RTE_CRYPTODEV_AESNI_MB_PMD)
+				rte_cryptodev_enqueue_burst(dev_num,
+								0, NULL, 0);
 
-			burst_received = rte_cryptodev_dequeue_burst(dev_num,
+			burst_received =
+				rte_cryptodev_dequeue_burst(dev_num,
 						0, rx_mbufs, burst_size);
 			if (burst_received == 0)
 				failed_polls++;
@@ -1876,7 +1880,7 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 	}
 
 	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
+	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
@@ -1885,7 +1889,7 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 	ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
 
 	/* Setup HMAC Parameters */
-	ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
+	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
 
 	ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
@@ -1895,7 +1899,7 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 	ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
 
 	/* Create Crypto session*/
-	ut_params->sess = rte_cryptodev_session_create(ts_params->dev_id,
+	ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
 			&ut_params->cipher_xform);
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
@@ -1928,12 +1932,12 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 
 			struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
 						ts_params->mbuf_ol_pool,
-						RTE_PKTMBUF_OL_CRYPTO);
+						RTE_PKTMBUF_OL_CRYPTO_SYM);
 			TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
 
-			struct rte_crypto_op *cop = &ol->op.crypto;
+			struct rte_crypto_sym_op *cop = &ol->op.crypto;
 
-			rte_crypto_op_attach_session(cop, ut_params->sess);
+			rte_crypto_sym_op_attach_session(cop, ut_params->sess);
 
 			cop->digest.data = ut_params->digest;
 			cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
@@ -1957,15 +1961,19 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 		}
 		start_cycles = rte_rdtsc_precise();
 		while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
-			burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
-				((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
-				DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+			burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+					0, tx_mbufs,
+					((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)
+							< DEFAULT_BURST_SIZE) ?
+					DEFAULT_NUM_REQS_TO_SUBMIT-num_sent :
+							DEFAULT_BURST_SIZE);
 			if (burst_sent == 0)
 				retries++;
 			else
 				num_sent += burst_sent;
 
-			burst_received = rte_cryptodev_dequeue_burst(dev_num,
+			burst_received =
+				rte_cryptodev_dequeue_burst(dev_num,
 					0, rx_mbufs, DEFAULT_BURST_SIZE);
 			if (burst_received == 0)
 				failed_polls++;
@@ -1973,10 +1981,13 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 				num_received += burst_received;
 		}
 		while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
-			if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
-				rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+			if (gbl_cryptodev_preftest_devtype ==
+						RTE_CRYPTODEV_AESNI_MB_PMD)
+				rte_cryptodev_enqueue_burst(dev_num,
+								0, NULL, 0);
 
-			burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
+			burst_received =
+				rte_cryptodev_dequeue_burst(dev_num, 0,
 						rx_mbufs, DEFAULT_BURST_SIZE);
 			if (burst_received == 0)
 				failed_polls++;
@@ -1987,7 +1998,7 @@  test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
 		mmps = (double)num_received*mhz/(end_cycles - start_cycles);
 		throughput = mmps*data_params[index].length*8;
 		printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
-				data_params[index].length, num_sent, num_received);
+			data_params[index].length, num_sent, num_received);
 		printf("\t%.2f\t%u", mmps, throughput);
 		printf("\t\t%"PRIu64, retries);
 		for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
@@ -2044,7 +2055,7 @@  perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
 static int
 perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
 {
-	gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_PMD;
+	gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
 
 	return unit_test_suite_runner(&cryptodev_testsuite);
 }
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index c7d884c..61d93cd 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -109,7 +109,7 @@  calculate_auth_precomputes(hash_one_block_t one_block_hash,
 
 /** Get xform chain order */
 static int
-aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
 {
 	/*
 	 * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
@@ -119,12 +119,12 @@  aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
 	if (xform->next == NULL || xform->next->next != NULL)
 		return -1;
 
-	if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
-			xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 		return HASH_CIPHER;
 
-	if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
-				xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+				xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 		return CIPHER_HASH;
 
 	return -1;
@@ -134,11 +134,11 @@  aesni_mb_get_chain_order(const struct rte_crypto_xform *xform)
 static int
 aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
 		struct aesni_mb_session *sess,
-		const struct rte_crypto_xform *xform)
+		const struct rte_crypto_sym_xform *xform)
 {
 	hash_one_block_t hash_oneblock_fn;
 
-	if (xform->type != RTE_CRYPTO_XFORM_AUTH) {
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
 		MB_LOG_ERR("Crypto xform struct not of type auth");
 		return -1;
 	}
@@ -196,11 +196,11 @@  aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
 static int
 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
 		struct aesni_mb_session *sess,
-		const struct rte_crypto_xform *xform)
+		const struct rte_crypto_sym_xform *xform)
 {
 	aes_keyexp_t aes_keyexp_fn;
 
-	if (xform->type != RTE_CRYPTO_XFORM_CIPHER) {
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		MB_LOG_ERR("Crypto xform struct not of type cipher");
 		return -1;
 	}
@@ -259,10 +259,10 @@  aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
 int
 aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
 		struct aesni_mb_session *sess,
-		const struct rte_crypto_xform *xform)
+		const struct rte_crypto_sym_xform *xform)
 {
-	const struct rte_crypto_xform *auth_xform = NULL;
-	const struct rte_crypto_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
 
 	/* Select Crypto operation - hash then cipher / cipher then hash */
 	switch (aesni_mb_get_chain_order(xform)) {
@@ -296,11 +296,11 @@  aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
 
 /** Get multi buffer session */
 static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)
 {
 	struct aesni_mb_session *sess = NULL;
 
-	if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
+	if (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
 		if (unlikely(crypto_op->session->type !=
 				RTE_CRYPTODEV_AESNI_MB_PMD))
 			return NULL;
@@ -313,7 +313,7 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
 			return NULL;
 
 		sess = (struct aesni_mb_session *)
-			((struct rte_cryptodev_session *)_sess)->_private;
+			((struct rte_cryptodev_sym_session *)_sess)->_private;
 
 		if (unlikely(aesni_mb_set_session_parameters(qp->ops,
 				sess, crypto_op->xform) != 0)) {
@@ -339,7 +339,8 @@  get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
  */
 static JOB_AES_HMAC *
 process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
-		struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+		struct rte_crypto_sym_op *c_op,
+		struct aesni_mb_session *session)
 {
 	JOB_AES_HMAC *job;
 
@@ -432,14 +433,14 @@  static struct rte_mbuf *
 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 {
 	struct rte_mbuf *m;
-	struct rte_crypto_op *c_op;
+	struct rte_crypto_sym_op *c_op;
 
 	if (job->user_data == NULL)
 		return NULL;
 
 	/* handled retrieved job */
 	m = (struct rte_mbuf *)job->user_data;
-	c_op = (struct rte_crypto_op *)job->user_data2;
+	c_op = (struct rte_crypto_sym_op *)job->user_data2;
 
 	/* set status as successful by default */
 	c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -459,7 +460,7 @@  post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
 	}
 
 	/* Free session if a session-less crypto op */
-	if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
+	if (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
 		rte_mempool_put(qp->sess_mp, c_op->session);
 		c_op->session = NULL;
 	}
@@ -511,7 +512,8 @@  aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
 	int i, processed_jobs = 0;
 
 	for (i = 0; i < nb_bufs; i++) {
-		ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
+		ol = rte_pktmbuf_offload_get(bufs[i],
+				RTE_PKTMBUF_OL_CRYPTO_SYM);
 		if (unlikely(ol == NULL)) {
 			qp->qp_stats.enqueue_err_count++;
 			goto flush_jobs;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index e43b898..3cd9990 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -108,7 +108,7 @@  aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
 	if (dev_info != NULL) {
 		dev_info->dev_type = dev->dev_type;
 		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		dev_info->max_nb_sessions = internals->max_nb_sessions;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
 	}
 }
 
@@ -243,7 +243,7 @@  aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 /** Configure a aesni multi-buffer session from a crypto xform chain */
 static void *
 aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_xform *xform,	void *sess)
+		struct rte_crypto_sym_xform *xform,	void *sess)
 {
 	struct aesni_mb_private *internals = dev->data->dev_private;
 
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 2f98609..ab70c15 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -218,7 +218,7 @@  struct aesni_mb_session {
 extern int
 aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
 		struct aesni_mb_session *sess,
-		const struct rte_crypto_xform *xform);
+		const struct rte_crypto_sym_xform *xform);
 
 
 /** device specific operations function pointer structure */
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 828756b..e7b9027 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -89,37 +89,37 @@  void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
 }
 
 static int
-qat_get_cmd_id(const struct rte_crypto_xform *xform)
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
 	if (xform->next == NULL)
 		return -1;
 
 	/* Cipher Only */
-	if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
 		return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */
 
 	/* Authentication Only */
-	if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
 		return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */
 
 	/* Cipher then Authenticate */
-	if (xform->type == RTE_CRYPTO_XFORM_CIPHER &&
-			xform->next->type == RTE_CRYPTO_XFORM_AUTH)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 		return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 
 	/* Authenticate then Cipher */
-	if (xform->type == RTE_CRYPTO_XFORM_AUTH &&
-			xform->next->type == RTE_CRYPTO_XFORM_CIPHER)
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 		return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 
 	return -1;
 }
 
 static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_xform *xform)
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
 {
 	do {
-		if (xform->type == RTE_CRYPTO_XFORM_AUTH)
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 			return &xform->auth;
 
 		xform = xform->next;
@@ -129,10 +129,10 @@  qat_get_auth_xform(struct rte_crypto_xform *xform)
 }
 
 static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_xform *xform)
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
 {
 	do {
-		if (xform->type == RTE_CRYPTO_XFORM_CIPHER)
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 			return &xform->cipher;
 
 		xform = xform->next;
@@ -144,7 +144,7 @@  qat_get_cipher_xform(struct rte_crypto_xform *xform)
 
 void *
 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
-		struct rte_crypto_xform *xform, void *session_private)
+		struct rte_crypto_sym_xform *xform, void *session_private)
 {
 	struct qat_pmd_private *internals = dev->data->dev_private;
 
@@ -261,7 +261,7 @@  qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
 		auth_xform->digest_length))
 		goto error_out;
 
-	return (struct rte_cryptodev_session *)session;
+	return (struct rte_crypto_sym_session *)session;
 
 error_out:
 	rte_mempool_put(internals->sess_mp, session);
@@ -275,7 +275,7 @@  unsigned qat_crypto_sym_get_session_private_size(
 }
 
 
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
+uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
 		uint16_t nb_pkts)
 {
 	register struct qat_queue *queue;
@@ -327,7 +327,8 @@  kick_tail:
 }
 
 uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,
+				uint16_t nb_pkts)
 {
 	struct rte_mbuf_offload *ol;
 	struct qat_queue *queue;
@@ -343,12 +344,13 @@  qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
 			msg_counter != nb_pkts) {
 		rx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);
-		ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
-
+		ol = rte_pktmbuf_offload_get(rx_mbuf,
+					RTE_PKTMBUF_OL_CRYPTO_SYM);
 		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
 				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
 					resp_msg->comn_hdr.comn_status)) {
-			ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+			ol->op.crypto.status =
+					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 		} else {
 			ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		}
@@ -384,20 +386,21 @@  qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
 	struct icp_qat_fw_la_auth_req_params *auth_param;
 	register struct icp_qat_fw_la_bulk_req *qat_req;
 
-	ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
+	ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);
 	if (unlikely(ol == NULL)) {
 		PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
 				"to (%p) mbuf.", mbuf);
 		return -EINVAL;
 	}
 
-	if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+	if (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
 		PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
 				" requests mbuf (%p) is sessionless.", mbuf);
 		return -EINVAL;
 	}
 
-	if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+	if (unlikely(ol->op.crypto.session->type
+					!= RTE_CRYPTODEV_QAT_SYM_PMD)) {
 		PMD_DRV_LOG(ERR, "Session was not created for this device");
 		return -EINVAL;
 	}
@@ -520,8 +523,8 @@  void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
 				ADF_NUM_SYM_QPS_PER_BUNDLE *
 				ADF_NUM_BUNDLES_PER_DEV;
 
-		info->max_nb_sessions = internals->max_nb_sessions;
-		info->dev_type = RTE_CRYPTODEV_QAT_PMD;
+		info->sym.max_nb_sessions = internals->max_nb_sessions;
+		info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
 	}
 }
 
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index d680364..e9f71fe 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -109,16 +109,18 @@  qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
 
 extern void *
 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
-		struct rte_crypto_xform *xform, void *session_private);
+		struct rte_crypto_sym_xform *xform, void *session_private);
 
 extern void
 qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
 
 
 uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
 
 uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
 
 #endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index e500c1e..85700fc 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -89,11 +89,11 @@  crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
 		cryptodev->pci_dev->addr.devid,
 		cryptodev->pci_dev->addr.function);
 
-	cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
+	cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
 	cryptodev->dev_ops = &crypto_qat_ops;
 
-	cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
-	cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+	cryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;
+	cryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;
 
 
 	internals = cryptodev->data->dev_private;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index a950b74..93de786 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -124,13 +124,13 @@  struct l2fwd_crypto_options {
 
 	enum l2fwd_crypto_xform_chain xform_chain;
 
-	struct rte_crypto_xform cipher_xform;
+	struct rte_crypto_sym_xform cipher_xform;
 	uint8_t ckey_data[32];
 
-	struct rte_crypto_key iv_key;
+	struct rte_crypto_sym_key iv_key;
 	uint8_t ivkey_data[16];
 
-	struct rte_crypto_xform auth_xform;
+	struct rte_crypto_sym_xform auth_xform;
 	uint8_t akey_data[128];
 };
 
@@ -142,8 +142,11 @@  struct l2fwd_crypto_params {
 	unsigned digest_length;
 	unsigned block_size;
 
-	struct rte_crypto_key iv_key;
-	struct rte_cryptodev_session *session;
+	struct {
+		uint8_t *data;
+		uint16_t length;
+	} iv;
+	struct rte_cryptodev_sym_session *session;
 };
 
 /** lcore configuration */
@@ -372,7 +375,7 @@  l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
 	}
 
 	/* Set crypto operation data parameters */
-	rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+	rte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);
 
 	/* Append space for digest to end of packet */
 	ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
@@ -474,11 +477,11 @@  generate_random_key(uint8_t *key, unsigned length)
 		key[i] = rand() % 0xff;
 }
 
-static struct rte_cryptodev_session *
+static struct rte_cryptodev_sym_session *
 initialize_crypto_session(struct l2fwd_crypto_options *options,
 		uint8_t cdev_id)
 {
-	struct rte_crypto_xform *first_xform;
+	struct rte_crypto_sym_xform *first_xform;
 
 	if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
 		first_xform = &options->cipher_xform;
@@ -489,7 +492,7 @@  initialize_crypto_session(struct l2fwd_crypto_options *options,
 	}
 
 	/* Setup Cipher Parameters */
-	return rte_cryptodev_session_create(cdev_id, first_xform);
+	return rte_cryptodev_sym_session_create(cdev_id, first_xform);
 }
 
 static void
@@ -610,7 +613,7 @@  l2fwd_main_loop(struct l2fwd_crypto_options *options)
 				m = pkts_burst[j];
 				ol = rte_pktmbuf_offload_alloc(
 						l2fwd_mbuf_ol_pool,
-						RTE_PKTMBUF_OL_CRYPTO);
+						RTE_PKTMBUF_OL_CRYPTO_SYM);
 				/*
 				 * If we can't allocate a offload, then drop
 				 * the rest of the burst and dequeue and
@@ -689,7 +692,7 @@  parse_cryptodev_type(enum rte_cryptodev_type *type, char *optarg)
 		*type = RTE_CRYPTODEV_AESNI_MB_PMD;
 		return 0;
 	} else if (strcmp("QAT", optarg) == 0) {
-		*type = RTE_CRYPTODEV_QAT_PMD;
+		*type = RTE_CRYPTODEV_QAT_SYM_PMD;
 		return 0;
 	}
 
@@ -745,7 +748,7 @@  parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
 
 /** Parse crypto key command line argument */
 static int
-parse_key(struct rte_crypto_key *key __rte_unused,
+parse_key(struct rte_crypto_sym_key *key __rte_unused,
 		unsigned length __rte_unused, char *arg __rte_unused)
 {
 	printf("Currently an unsupported argument!\n");
@@ -982,7 +985,7 @@  l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
 	switch (options->cdev_type) {
 	case RTE_CRYPTODEV_AESNI_MB_PMD:
 		printf("cryptodev type: AES-NI MB PMD\n"); break;
-	case RTE_CRYPTODEV_QAT_PMD:
+	case RTE_CRYPTODEV_QAT_SYM_PMD:
 		printf("cryptodev type: QAT PMD\n"); break;
 	default:
 		break;
@@ -1179,7 +1182,7 @@  initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports)
 	unsigned i, cdev_id, cdev_count, enabled_cdev_count = 0;
 	int retval;
 
-	if (options->cdev_type == RTE_CRYPTODEV_QAT_PMD) {
+	if (options->cdev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
 		if (rte_cryptodev_count() < nb_ports)
 			return -1;
 	} else if (options->cdev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index 81fa3fc..0d59229 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -45,6 +45,7 @@  SRCS-y += rte_cryptodev.c
 
 # export include files
 SYMLINK-y-include += rte_crypto.h
+SYMLINK-y-include += rte_crypto_sym.h
 SYMLINK-y-include += rte_cryptodev.h
 SYMLINK-y-include += rte_cryptodev_pmd.h
 
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 42343a8..df0c0b8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -36,572 +36,15 @@ 
 /**
  * @file rte_crypto.h
  *
- * RTE Cryptographic Definitions
+ * RTE Cryptography Common Definitions
  *
- * Defines symmetric cipher and authentication algorithms and modes, as well
- * as supported symmetric crypto operation combinations.
  */
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-#include <rte_mbuf.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-
-/** Symmetric Cipher Algorithms */
-enum rte_crypto_cipher_algorithm {
-	RTE_CRYPTO_CIPHER_NULL = 1,
-	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
-
-	RTE_CRYPTO_CIPHER_3DES_CBC,
-	/**< Triple DES algorithm in CBC mode */
-	RTE_CRYPTO_CIPHER_3DES_CTR,
-	/**< Triple DES algorithm in CTR mode */
-	RTE_CRYPTO_CIPHER_3DES_ECB,
-	/**< Triple DES algorithm in ECB mode */
-
-	RTE_CRYPTO_CIPHER_AES_CBC,
-	/**< AES algorithm in CBC mode */
-	RTE_CRYPTO_CIPHER_AES_CCM,
-	/**< AES algorithm in CCM mode. When this cipher algorithm is used the
-	 * *RTE_CRYPTO_AUTH_AES_CCM* element of the
-	 * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
-	 * *rte_crypto_auth_xform* structure in the session context or in
-	 * the op_params of the crypto operation structure in the case of a
-	 * session-less crypto operation
-	 */
-	RTE_CRYPTO_CIPHER_AES_CTR,
-	/**< AES algorithm in Counter mode */
-	RTE_CRYPTO_CIPHER_AES_ECB,
-	/**< AES algorithm in ECB mode */
-	RTE_CRYPTO_CIPHER_AES_F8,
-	/**< AES algorithm in F8 mode */
-	RTE_CRYPTO_CIPHER_AES_GCM,
-	/**< AES algorithm in GCM mode. When this cipher algorithm is used the
-	 * *RTE_CRYPTO_AUTH_AES_GCM* element of the
-	 * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
-	 * *rte_crypto_auth_setup_data* structure in the session context or in
-	 * the op_params of the crypto operation structure in the case of a
-	 * session-less crypto operation.
-	 */
-	RTE_CRYPTO_CIPHER_AES_XTS,
-	/**< AES algorithm in XTS mode */
-
-	RTE_CRYPTO_CIPHER_ARC4,
-	/**< (A)RC4 cipher algorithm */
-
-	RTE_CRYPTO_CIPHER_KASUMI_F8,
-	/**< Kasumi algorithm in F8 mode */
-
-	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
-	/**< SNOW3G algorithm in UEA2 mode */
-
-	RTE_CRYPTO_CIPHER_ZUC_EEA3
-	/**< ZUC algorithm in EEA3 mode */
-};
-
-/** Symmetric Cipher Direction */
-enum rte_crypto_cipher_operation {
-	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
-	/**< Encrypt cipher operation */
-	RTE_CRYPTO_CIPHER_OP_DECRYPT
-	/**< Decrypt cipher operation */
-};
-
-/** Crypto key structure */
-struct rte_crypto_key {
-	uint8_t *data;	/**< pointer to key data */
-	phys_addr_t phys_addr;
-	size_t length;	/**< key length in bytes */
-};
-
-/**
- * Symmetric Cipher Setup Data.
- *
- * This structure contains data relating to Cipher (Encryption and Decryption)
- *  use to create a session.
- */
-struct rte_crypto_cipher_xform {
-	enum rte_crypto_cipher_operation op;
-	/**< This parameter determines if the cipher operation is an encrypt or
-	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
-	 * only encrypt operations are valid.
-	 */
-	enum rte_crypto_cipher_algorithm algo;
-	/**< Cipher algorithm */
-
-	struct rte_crypto_key key;
-	/**< Cipher key
-	 *
-	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
-	 * point to a concatenation of the AES encryption key followed by a
-	 * keymask. As per RFC3711, the keymask should be padded with trailing
-	 * bytes to match the length of the encryption key used.
-	 *
-	 * For AES-XTS mode of operation, two keys must be provided and
-	 * key.data must point to the two keys concatenated together (Key1 ||
-	 * Key2). The cipher key length will contain the total size of both
-	 * keys.
-	 *
-	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
-	 * 192 bits (24 bytes) or 256 bits (32 bytes).
-	 *
-	 * For the CCM mode of operation, the only supported key length is 128
-	 * bits (16 bytes).
-	 *
-	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
-	 * should be set to the combined length of the encryption key and the
-	 * keymask. Since the keymask and the encryption key are the same size,
-	 * key.length should be set to 2 x the AES encryption key length.
-	 *
-	 * For the AES-XTS mode of operation:
-	 *  - Two keys must be provided and key.length refers to total length of
-	 *    the two keys.
-	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
-	 *  - Both keys must have the same size.
-	 **/
-};
-
-/** Symmetric Authentication / Hash Algorithms */
-enum rte_crypto_auth_algorithm {
-	RTE_CRYPTO_AUTH_NULL = 1,
-	/**< NULL hash algorithm. */
-
-	RTE_CRYPTO_AUTH_AES_CBC_MAC,
-	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
-	RTE_CRYPTO_AUTH_AES_CCM,
-	/**< AES algorithm in CCM mode. This is an authenticated cipher. When
-	 * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
-	 * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
-	 * set up the related rte_crypto_cipher_setup_data structure in the
-	 * session context or the corresponding parameter in the crypto
-	 * operation data structures op_params parameter MUST be set for a
-	 * session-less crypto operation.
-	 */
-	RTE_CRYPTO_AUTH_AES_CMAC,
-	/**< AES CMAC algorithm. */
-	RTE_CRYPTO_AUTH_AES_GCM,
-	/**< AES algorithm in GCM mode. When this hash algorithm
-	 * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
-	 * rte_crypto_cipher_algorithm enum MUST be used to set up the related
-	 * rte_crypto_cipher_setup_data structure in the session context, or
-	 * the corresponding parameter in the crypto operation data structures
-	 * op_params parameter MUST be set for a session-less crypto operation.
-	 */
-	RTE_CRYPTO_AUTH_AES_GMAC,
-	/**< AES GMAC algorithm. When this hash algorithm
-	* is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
-	* rte_crypto_cipher_algorithm enum MUST be used to set up the related
-	* rte_crypto_cipher_setup_data structure in the session context,  or
-	* the corresponding parameter in the crypto operation data structures
-	* op_params parameter MUST be set for a session-less crypto operation.
-	*/
-	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
-	/**< AES XCBC algorithm. */
-
-	RTE_CRYPTO_AUTH_KASUMI_F9,
-	/**< Kasumi algorithm in F9 mode. */
-
-	RTE_CRYPTO_AUTH_MD5,
-	/**< MD5 algorithm */
-	RTE_CRYPTO_AUTH_MD5_HMAC,
-	/**< HMAC using MD5 algorithm */
-
-	RTE_CRYPTO_AUTH_SHA1,
-	/**< 128 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA1_HMAC,
-	/**< HMAC using 128 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA224,
-	/**< 224 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA224_HMAC,
-	/**< HMAC using 224 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA256,
-	/**< 256 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA256_HMAC,
-	/**< HMAC using 256 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA384,
-	/**< 384 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA384_HMAC,
-	/**< HMAC using 384 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA512,
-	/**< 512 bit SHA algorithm. */
-	RTE_CRYPTO_AUTH_SHA512_HMAC,
-	/**< HMAC using 512 bit SHA algorithm. */
-
-	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
-	/**< SNOW3G algorithm in UIA2 mode. */
-
-	RTE_CRYPTO_AUTH_ZUC_EIA3,
-	/**< ZUC algorithm in EIA3 mode */
-};
-
-/** Symmetric Authentication / Hash Operations */
-enum rte_crypto_auth_operation {
-	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
-	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
-};
-
-/**
- * Authentication / Hash transform data.
- *
- * This structure contains data relating to an authentication/hash crypto
- * transforms. The fields op, algo and digest_length are common to all
- * authentication transforms and MUST be set.
- */
-struct rte_crypto_auth_xform {
-	enum rte_crypto_auth_operation op;
-	/**< Authentication operation type */
-	enum rte_crypto_auth_algorithm algo;
-	/**< Authentication algorithm selection */
-
-	struct rte_crypto_key key;		/**< Authentication key data.
-	 * The authentication key length MUST be less than or equal to the
-	 * block size of the algorithm. It is the callers responsibility to
-	 * ensure that the key length is compliant with the standard being used
-	 * (for example RFC 2104, FIPS 198a).
-	 */
-
-	uint32_t digest_length;
-	/**< Length of the digest to be returned. If the verify option is set,
-	 * this specifies the length of the digest to be compared for the
-	 * session.
-	 *
-	 * If the value is less than the maximum length allowed by the hash,
-	 * the result shall be truncated.  If the value is greater than the
-	 * maximum length allowed by the hash then an error will be generated
-	 * by *rte_cryptodev_session_create* or by the
-	 * *rte_cryptodev_enqueue_burst* if using session-less APIs.
-	 */
-
-	uint32_t add_auth_data_length;
-	/**< The length of the additional authenticated data (AAD) in bytes.
-	 * The maximum permitted value is 240 bytes, unless otherwise specified
-	 * below.
-	 *
-	 * This field must be specified when the hash algorithm is one of the
-	 * following:
-	 *
-	 * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
-	 *   length of the IV (which should be 16).
-	 *
-	 * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM).  In this case, this is
-	 *   the length of the Additional Authenticated Data (called A, in NIST
-	 *   SP800-38D).
-	 *
-	 * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM).  In this case, this is
-	 *   the length of the associated data (called A, in NIST SP800-38C).
-	 *   Note that this does NOT include the length of any padding, or the
-	 *   18 bytes reserved at the start of the above field to store the
-	 *   block B0 and the encoded length.  The maximum permitted value in
-	 *   this case is 222 bytes.
-	 *
-	 * @note
-	 *  For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
-	 *  this field is not used and should be set to 0. Instead the length
-	 *  of the AAD data is specified in the message length to hash field of
-	 *  the rte_crypto_op_data structure.
-	 */
-};
-
-/** Crypto transformation types */
-enum rte_crypto_xform_type {
-	RTE_CRYPTO_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
-	RTE_CRYPTO_XFORM_AUTH,			/**< Authentication xform */
-	RTE_CRYPTO_XFORM_CIPHER			/**< Cipher xform  */
-};
-
-/**
- * Crypto transform structure.
- *
- * This is used to specify the crypto transforms required, multiple transforms
- * can be chained together to specify a chain transforms such as authentication
- * then cipher, or cipher then authentication. Each transform structure can
- * hold a single transform, the type field is used to specify which transform
- * is contained within the union
- */
-struct rte_crypto_xform {
-	struct rte_crypto_xform *next; /**< next xform in chain */
-
-	enum rte_crypto_xform_type type; /**< xform type */
-	union {
-		struct rte_crypto_auth_xform auth;
-		/**< Authentication / hash xform */
-		struct rte_crypto_cipher_xform cipher;
-		/**< Cipher xform */
-	};
-};
-
-/**
- * Crypto operation session type. This is used to specify whether a crypto
- * operation has session structure attached for immutable parameters or if all
- * operation information is included in the operation data structure.
- */
-enum rte_crypto_op_sess_type {
-	RTE_CRYPTO_OP_WITH_SESSION,	/**< Session based crypto operation */
-	RTE_CRYPTO_OP_SESSIONLESS	/**< Session-less crypto operation */
-};
-
-/** Status of crypto operation */
-enum rte_crypto_op_status {
-	RTE_CRYPTO_OP_STATUS_SUCCESS,
-	/**< Operation completed successfully */
-	RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
-	/**< Operation not yet submitted to a cryptodev */
-	RTE_CRYPTO_OP_STATUS_ENQUEUED,
-	/**< Operation is enqueued on device */
-	RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
-	/**< Authentication verification failed */
-	RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
-	/**< Operation failed due to invalid arguments in request */
-	RTE_CRYPTO_OP_STATUS_ERROR,
-	/**< Error handling operation */
-};
-
-/**
- * Cryptographic Operation Data.
- *
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
- */
-struct rte_crypto_op {
-	enum rte_crypto_op_sess_type type;
-	enum rte_crypto_op_status status;
-
-	struct {
-		struct rte_mbuf *m;	/**< Destination mbuf */
-		uint8_t offset;		/**< Data offset */
-	} dst;
-
-	union {
-		struct rte_cryptodev_session *session;
-		/**< Handle for the initialised session context */
-		struct rte_crypto_xform *xform;
-		/**< Session-less API crypto operation parameters */
-	};
-
-	struct {
-		struct {
-			 uint32_t offset;
-			 /**< Starting point for cipher processing, specified
-			  * as number of bytes from start of data in the source
-			  * buffer. The result of the cipher operation will be
-			  * written back into the output buffer starting at
-			  * this location.
-			  */
-
-			 uint32_t length;
-			 /**< The message length, in bytes, of the source buffer
-			  * on which the cryptographic operation will be
-			  * computed. This must be a multiple of the block size
-			  * if a block cipher is being used. This is also the
-			  * same as the result length.
-			  *
-			  * @note
-			  * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
-			  * this value should not include the length of the
-			  * padding or the length of the MAC; the driver will
-			  * compute the actual number of bytes over which the
-			  * encryption will occur, which will include these
-			  * values.
-			  *
-			  * @note
-			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
-			  * field should be set to 0.
-			  */
-		} to_cipher; /**< Data offsets and length for ciphering */
-
-		struct {
-			 uint32_t offset;
-			 /**< Starting point for hash processing, specified as
-			  * number of bytes from start of packet in source
-			  * buffer.
-			  *
-			  * @note
-			  * For CCM and GCM modes of operation, this field is
-			  * ignored. The field @ref additional_auth field
-			  * should be set instead.
-			  *
-			  * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
-			  * mode of operation, this field specifies the start
-			  * of the AAD data in the source buffer.
-			  */
-
-			 uint32_t length;
-			 /**< The message length, in bytes, of the source
-			  * buffer that the hash will be computed on.
-			  *
-			  * @note
-			  * For CCM and GCM modes of operation, this field is
-			  * ignored. The field @ref additional_auth field
-			  * should be set instead.
-			  *
-			  * @note
-			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
-			  * of operation, this field specifies the length of
-			  * the AAD data in the source buffer.
-			  */
-		} to_hash; /**< Data offsets and length for authentication */
-	} data;	/**< Details of data to be operated on */
-
-	struct {
-		uint8_t *data;
-		/**< Initialisation Vector or Counter.
-		 *
-		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
-		 * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
-		 * Vector (IV) value.
-		 *
-		 * - For block ciphers in CTR mode, this is the counter.
-		 *
-		 * - For GCM mode, this is either the IV (if the length is 96
-		 * bits) or J0 (for other sizes), where J0 is as defined by
-		 * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
-		 * needs to be allocated.
-		 *
-		 * - For CCM mode, the first byte is reserved, and the nonce
-		 * should be written starting at &iv[1] (to allow space for the
-		 * implementation to write in the flags in the first byte).
-		 * Note that a full 16 bytes should be allocated, even though
-		 * the length field will have a value less than this.
-		 *
-		 * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
-		 * 1619-2007.
-		 *
-		 * For optimum performance, the data pointed to SHOULD be
-		 * 8-byte aligned.
-		 */
-		phys_addr_t phys_addr;
-		size_t length;
-		/**< Length of valid IV data.
-		 *
-		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
-		 * mode, or for SNOW3G in UEA2 mode, this is the length of the
-		 * IV (which must be the same as the block length of the
-		 * cipher).
-		 *
-		 * - For block ciphers in CTR mode, this is the length of the
-		 * counter (which must be the same as the block length of the
-		 * cipher).
-		 *
-		 * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
-		 * which case data points to J0.
-		 *
-		 * - For CCM mode, this is the length of the nonce, which can
-		 * be in the range 7 to 13 inclusive.
-		 */
-	} iv;	/**< Initialisation vector parameters */
-
-	struct {
-		uint8_t *data;
-		/**< If this member of this structure is set this is a
-		 * pointer to the location where the digest result should be
-		 * inserted (in the case of digest generation) or where the
-		 * purported digest exists (in the case of digest
-		 * verification).
-		 *
-		 * At session creation time, the client specified the digest
-		 * result length with the digest_length member of the @ref
-		 * rte_crypto_auth_xform structure. For physical crypto
-		 * devices the caller must allocate at least digest_length of
-		 * physically contiguous memory at this location.
-		 *
-		 * For digest generation, the digest result will overwrite
-		 * any data at this location.
-		 *
-		 * @note
-		 * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
-		 * "digest result" read "authentication tag T".
-		 *
-		 * If this member is not set the digest result is understood
-		 * to be in the destination buffer for digest generation, and
-		 * in the source buffer for digest verification. The location
-		 * of the digest result in this case is immediately following
-		 * the region over which the digest is computed.
-		 */
-		phys_addr_t phys_addr;	/**< Physical address of digest */
-		uint32_t length;	/**< Length of digest */
-	} digest; /**< Digest parameters */
-
-	struct {
-		uint8_t *data;
-		/**< Pointer to Additional Authenticated Data (AAD) needed for
-		 * authenticated cipher mechanisms (CCM and GCM), and to the IV
-		 * for SNOW3G authentication
-		 * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
-		 * authentication mechanisms this pointer is ignored.
-		 *
-		 * The length of the data pointed to by this field is set up
-		 * for the session in the @ref rte_crypto_auth_xform structure
-		 * as part of the @ref rte_cryptodev_session_create function
-		 * call.  This length must not exceed 240 bytes.
-		 *
-		 * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
-		 * caller should setup this field as follows:
-		 *
-		 * - the nonce should be written starting at an offset of one
-		 *   byte into the array, leaving room for the implementation
-		 *   to write in the flags to the first byte.
-		 *
-		 * - the additional  authentication data itself should be
-		 *   written starting at an offset of 18 bytes into the array,
-		 *   leaving room for the length encoding in the first two
-		 *   bytes of the second block.
-		 *
-		 * - the array should be big enough to hold the above fields,
-		 *   plus any padding to round this up to the nearest multiple
-		 *   of the block size (16 bytes).  Padding will be added by
-		 *   the implementation.
-		 *
-		 * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
-		 * caller should setup this field as follows:
-		 *
-		 * - the AAD is written in starting at byte 0
-		 * - the array must be big enough to hold the AAD, plus any
-		 *   space to round this up to the nearest multiple of the
-		 *   block size (16 bytes).
-		 *
-		 * @note
-		 * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
-		 * operation, this field is not used and should be set to 0.
-		 * Instead the AAD data should be placed in the source buffer.
-		 */
-		phys_addr_t phys_addr;	/**< physical address */
-		uint32_t length;	/**< Length of digest */
-	} additional_auth;
-	/**< Additional authentication parameters */
-
-	struct rte_mempool *pool;
-	/**< mempool used to allocate crypto op */
-
-	void *user_data;
-	/**< opaque pointer for user data */
-};
-
-
-/**
- * Reset the fields of a crypto operation to their default values.
- *
- * @param	op	The crypto operation to be reset.
- */
-static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
-{
-	op->type = RTE_CRYPTO_OP_SESSIONLESS;
-	op->dst.m = NULL;
-	op->dst.offset = 0;
-}
-
-/** Attach a session to a crypto operation */
-static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
-		struct rte_cryptodev_session *sess)
-{
-	op->session = sess;
-	op->type = RTE_CRYPTO_OP_WITH_SESSION;
-}
+#include <rte_crypto_sym.h>
 
 #ifdef __cplusplus
 }
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
new file mode 100644
index 0000000..cb2b8f6
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -0,0 +1,613 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SYM_H_
+#define _RTE_CRYPTO_SYM_H_
+
+/**
+ * @file rte_crypto_sym.h
+ *
+ * RTE Definitions for Symmetric Cryptography
+ *
+ * Defines symmetric cipher and authentication algorithms and modes, as well
+ * as supported symmetric crypto operation combinations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+
+/** Symmetric Cipher Algorithms */
+enum rte_crypto_cipher_algorithm {
+	RTE_CRYPTO_CIPHER_NULL = 1,
+	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
+
+	RTE_CRYPTO_CIPHER_3DES_CBC,
+	/**< Triple DES algorithm in CBC mode */
+	RTE_CRYPTO_CIPHER_3DES_CTR,
+	/**< Triple DES algorithm in CTR mode */
+	RTE_CRYPTO_CIPHER_3DES_ECB,
+	/**< Triple DES algorithm in ECB mode */
+
+	RTE_CRYPTO_CIPHER_AES_CBC,
+	/**< AES algorithm in CBC mode */
+	RTE_CRYPTO_CIPHER_AES_CCM,
+	/**< AES algorithm in CCM mode. When this cipher algorithm is used the
+	 * *RTE_CRYPTO_AUTH_AES_CCM* element of the
+	 * *rte_crypto_hash_algorithm* enum MUST be used to set up the related
+	 * *rte_crypto_auth_xform* structure in the session context or in
+	 * the op_params of the crypto operation structure in the case of a
+	 * session-less crypto operation
+	 */
+	RTE_CRYPTO_CIPHER_AES_CTR,
+	/**< AES algorithm in Counter mode */
+	RTE_CRYPTO_CIPHER_AES_ECB,
+	/**< AES algorithm in ECB mode */
+	RTE_CRYPTO_CIPHER_AES_F8,
+	/**< AES algorithm in F8 mode */
+	RTE_CRYPTO_CIPHER_AES_GCM,
+	/**< AES algorithm in GCM mode. When this cipher algorithm is used the
+	 * *RTE_CRYPTO_AUTH_AES_GCM* element of the
+	 * *rte_crypto_auth_algorithm* enum MUST be used to set up the related
+	 * *rte_crypto_auth_setup_data* structure in the session context or in
+	 * the op_params of the crypto operation structure in the case of a
+	 * session-less crypto operation.
+	 */
+	RTE_CRYPTO_CIPHER_AES_XTS,
+	/**< AES algorithm in XTS mode */
+
+	RTE_CRYPTO_CIPHER_ARC4,
+	/**< (A)RC4 cipher algorithm */
+
+	RTE_CRYPTO_CIPHER_KASUMI_F8,
+	/**< Kasumi algorithm in F8 mode */
+
+	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+	/**< SNOW3G algorithm in UEA2 mode */
+
+	RTE_CRYPTO_CIPHER_ZUC_EEA3
+	/**< ZUC algorithm in EEA3 mode */
+};
+
+/** Symmetric Cipher Direction */
+enum rte_crypto_cipher_operation {
+	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+	/**< Encrypt cipher operation */
+	RTE_CRYPTO_CIPHER_OP_DECRYPT
+	/**< Decrypt cipher operation */
+};
+
+/**
+ * Symmetric Cipher Setup Data.
+ *
+ * This structure contains data relating to Cipher (Encryption and Decryption)
+ *  use to create a session.
+ */
+struct rte_crypto_cipher_xform {
+	enum rte_crypto_cipher_operation op;
+	/**< This parameter determines if the cipher operation is an encrypt or
+	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
+	 * only encrypt operations are valid.
+	 */
+	enum rte_crypto_cipher_algorithm algo;
+	/**< Cipher algorithm */
+
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} key;
+	/**< Cipher key
+	 *
+	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
+	 * point to a concatenation of the AES encryption key followed by a
+	 * keymask. As per RFC3711, the keymask should be padded with trailing
+	 * bytes to match the length of the encryption key used.
+	 *
+	 * For AES-XTS mode of operation, two keys must be provided and
+	 * key.data must point to the two keys concatenated together (Key1 ||
+	 * Key2). The cipher key length will contain the total size of both
+	 * keys.
+	 *
+	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
+	 * 192 bits (24 bytes) or 256 bits (32 bytes).
+	 *
+	 * For the CCM mode of operation, the only supported key length is 128
+	 * bits (16 bytes).
+	 *
+	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
+	 * should be set to the combined length of the encryption key and the
+	 * keymask. Since the keymask and the encryption key are the same size,
+	 * key.length should be set to 2 x the AES encryption key length.
+	 *
+	 * For the AES-XTS mode of operation:
+	 *  - Two keys must be provided and key.length refers to total length of
+	 *    the two keys.
+	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
+	 *  - Both keys must have the same size.
+	 **/
+};
+
+/** Symmetric Authentication / Hash Algorithms */
+enum rte_crypto_auth_algorithm {
+	RTE_CRYPTO_AUTH_NULL = 1,
+	/**< NULL hash algorithm. */
+
+	RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
+	RTE_CRYPTO_AUTH_AES_CCM,
+	/**< AES algorithm in CCM mode. This is an authenticated cipher. When
+	 * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM*
+	 * element of the *rte_crypto_cipher_algorithm* enum MUST be used to
+	 * set up the related rte_crypto_cipher_setup_data structure in the
+	 * session context or the corresponding parameter in the crypto
+	 * operation data structures op_params parameter MUST be set for a
+	 * session-less crypto operation.
+	 */
+	RTE_CRYPTO_AUTH_AES_CMAC,
+	/**< AES CMAC algorithm. */
+	RTE_CRYPTO_AUTH_AES_GCM,
+	/**< AES algorithm in GCM mode. When this hash algorithm
+	 * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+	 * rte_crypto_cipher_algorithm enum MUST be used to set up the related
+	 * rte_crypto_cipher_setup_data structure in the session context, or
+	 * the corresponding parameter in the crypto operation data structures
+	 * op_params parameter MUST be set for a session-less crypto operation.
+	 */
+	RTE_CRYPTO_AUTH_AES_GMAC,
+	/**< AES GMAC algorithm. When this hash algorithm
+	* is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
+	* rte_crypto_cipher_algorithm enum MUST be used to set up the related
+	* rte_crypto_cipher_setup_data structure in the session context,  or
+	* the corresponding parameter in the crypto operation data structures
+	* op_params parameter MUST be set for a session-less crypto operation.
+	*/
+	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	/**< AES XCBC algorithm. */
+
+	RTE_CRYPTO_AUTH_KASUMI_F9,
+	/**< Kasumi algorithm in F9 mode. */
+
+	RTE_CRYPTO_AUTH_MD5,
+	/**< MD5 algorithm */
+	RTE_CRYPTO_AUTH_MD5_HMAC,
+	/**< HMAC using MD5 algorithm */
+
+	RTE_CRYPTO_AUTH_SHA1,
+	/**< 128 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA1_HMAC,
+	/**< HMAC using 128 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA224,
+	/**< 224 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA224_HMAC,
+	/**< HMAC using 224 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA256,
+	/**< 256 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA256_HMAC,
+	/**< HMAC using 256 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA384,
+	/**< 384 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA384_HMAC,
+	/**< HMAC using 384 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA512,
+	/**< 512 bit SHA algorithm. */
+	RTE_CRYPTO_AUTH_SHA512_HMAC,
+	/**< HMAC using 512 bit SHA algorithm. */
+
+	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+	/**< SNOW3G algorithm in UIA2 mode. */
+
+	RTE_CRYPTO_AUTH_ZUC_EIA3,
+	/**< ZUC algorithm in EIA3 mode */
+};
+
+/** Symmetric Authentication / Hash Operations */
+enum rte_crypto_auth_operation {
+	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
+	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
+};
+
+/**
+ * Authentication / Hash transform data.
+ *
+ * This structure contains data relating to an authentication/hash crypto
+ * transforms. The fields op, algo and digest_length are common to all
+ * authentication transforms and MUST be set.
+ */
+struct rte_crypto_auth_xform {
+	enum rte_crypto_auth_operation op;
+	/**< Authentication operation type */
+	enum rte_crypto_auth_algorithm algo;
+	/**< Authentication algorithm selection */
+
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} key;
+	/**< Authentication key data.
+	 * The authentication key length MUST be less than or equal to the
+	 * block size of the algorithm. It is the callers responsibility to
+	 * ensure that the key length is compliant with the standard being used
+	 * (for example RFC 2104, FIPS 198a).
+	 */
+
+	uint32_t digest_length;
+	/**< Length of the digest to be returned. If the verify option is set,
+	 * this specifies the length of the digest to be compared for the
+	 * session.
+	 *
+	 * If the value is less than the maximum length allowed by the hash,
+	 * the result shall be truncated.  If the value is greater than the
+	 * maximum length allowed by the hash then an error will be generated
+	 * by *rte_cryptodev_sym_session_create* or by the
+	 * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+	 */
+
+	uint32_t add_auth_data_length;
+	/**< The length of the additional authenticated data (AAD) in bytes.
+	 * The maximum permitted value is 240 bytes, unless otherwise specified
+	 * below.
+	 *
+	 * This field must be specified when the hash algorithm is one of the
+	 * following:
+	 *
+	 * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the
+	 *   length of the IV (which should be 16).
+	 *
+	 * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM).  In this case, this is
+	 *   the length of the Additional Authenticated Data (called A, in NIST
+	 *   SP800-38D).
+	 *
+	 * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM).  In this case, this is
+	 *   the length of the associated data (called A, in NIST SP800-38C).
+	 *   Note that this does NOT include the length of any padding, or the
+	 *   18 bytes reserved at the start of the above field to store the
+	 *   block B0 and the encoded length.  The maximum permitted value in
+	 *   this case is 222 bytes.
+	 *
+	 * @note
+	 *  For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
+	 *  this field is not used and should be set to 0. Instead the length
+	 *  of the AAD data is specified in the message length to hash field of
+	 *  the rte_crypto_sym_op_data structure.
+	 */
+};
+
+/** Crypto transformation types */
+enum rte_crypto_sym_xform_type {
+	RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
+	RTE_CRYPTO_SYM_XFORM_AUTH,		/**< Authentication xform */
+	RTE_CRYPTO_SYM_XFORM_CIPHER		/**< Cipher xform  */
+};
+
+/**
+ * Symmetric crypto transform structure.
+ *
+ * This is used to specify the crypto transforms required, multiple transforms
+ * can be chained together to specify a chain transforms such as authentication
+ * then cipher, or cipher then authentication. Each transform structure can
+ * hold a single transform, the type field is used to specify which transform
+ * is contained within the union
+ */
+struct rte_crypto_sym_xform {
+	struct rte_crypto_sym_xform *next;
+	/**< next xform in chain */
+	enum rte_crypto_sym_xform_type type
+	; /**< xform type */
+	union {
+		struct rte_crypto_auth_xform auth;
+		/**< Authentication / hash xform */
+		struct rte_crypto_cipher_xform cipher;
+		/**< Cipher xform */
+	};
+};
+
+/**
+ * Crypto operation session type. This is used to specify whether a crypto
+ * operation has session structure attached for immutable parameters or if all
+ * operation information is included in the operation data structure.
+ */
+enum rte_crypto_sym_op_sess_type {
+	RTE_CRYPTO_SYM_OP_WITH_SESSION,	/**< Session based crypto operation */
+	RTE_CRYPTO_SYM_OP_SESSIONLESS	/**< Session-less crypto operation */
+};
+
+
+/** Status of crypto operation */
+enum rte_crypto_op_status {
+	RTE_CRYPTO_OP_STATUS_SUCCESS,
+	/**< Operation completed successfully */
+	RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+	/**< Operation not yet submitted to a cryptodev */
+	RTE_CRYPTO_OP_STATUS_ENQUEUED,
+	/**< Operation is enqueued on device */
+	RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+	/**< Authentication verification failed */
+	RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+	/**< Operation failed due to invalid arguments in request */
+	RTE_CRYPTO_OP_STATUS_ERROR,
+	/**< Error handling operation */
+};
+
+/**
+ * Cryptographic Operation Data.
+ *
+ * This structure contains data relating to performing cryptographic processing
+ * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()
+ * call for performing cipher, hash, or a combined hash and cipher operations.
+ */
+struct rte_crypto_sym_op {
+	enum rte_crypto_sym_op_sess_type type;
+	enum rte_crypto_op_status status;
+
+	struct {
+		struct rte_mbuf *m;	/**< Destination mbuf */
+		uint8_t offset;		/**< Data offset */
+	} dst;
+
+	union {
+		struct rte_cryptodev_sym_session *session;
+		/**< Handle for the initialised session context */
+		struct rte_crypto_sym_xform *xform;
+		/**< Session-less API crypto operation parameters */
+	};
+
+	struct {
+		struct {
+			 uint32_t offset;
+			 /**< Starting point for cipher processing, specified
+			  * as number of bytes from start of data in the source
+			  * buffer. The result of the cipher operation will be
+			  * written back into the output buffer starting at
+			  * this location.
+			  */
+
+			 uint32_t length;
+			 /**< The message length, in bytes, of the source buffer
+			  * on which the cryptographic operation will be
+			  * computed. This must be a multiple of the block size
+			  * if a block cipher is being used. This is also the
+			  * same as the result length.
+			  *
+			  * @note
+			  * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM,
+			  * this value should not include the length of the
+			  * padding or the length of the MAC; the driver will
+			  * compute the actual number of bytes over which the
+			  * encryption will occur, which will include these
+			  * values.
+			  *
+			  * @note
+			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
+			  * field should be set to 0.
+			  */
+		} to_cipher; /**< Data offsets and length for ciphering */
+
+		struct {
+			 uint32_t offset;
+			 /**< Starting point for hash processing, specified as
+			  * number of bytes from start of packet in source
+			  * buffer.
+			  *
+			  * @note
+			  * For CCM and GCM modes of operation, this field is
+			  * ignored. The field @ref additional_auth field
+			  * should be set instead.
+			  *
+			  * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
+			  * mode of operation, this field specifies the start
+			  * of the AAD data in the source buffer.
+			  */
+
+			 uint32_t length;
+			 /**< The message length, in bytes, of the source
+			  * buffer that the hash will be computed on.
+			  *
+			  * @note
+			  * For CCM and GCM modes of operation, this field is
+			  * ignored. The field @ref additional_auth field
+			  * should be set instead.
+			  *
+			  * @note
+			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
+			  * of operation, this field specifies the length of
+			  * the AAD data in the source buffer.
+			  */
+		} to_hash; /**< Data offsets and length for authentication */
+	} data;	/**< Details of data to be operated on */
+
+	struct {
+		uint8_t *data;
+		/**< Initialisation Vector or Counter.
+		 *
+		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+		 * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
+		 * Vector (IV) value.
+		 *
+		 * - For block ciphers in CTR mode, this is the counter.
+		 *
+		 * - For GCM mode, this is either the IV (if the length is 96
+		 * bits) or J0 (for other sizes), where J0 is as defined by
+		 * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
+		 * needs to be allocated.
+		 *
+		 * - For CCM mode, the first byte is reserved, and the nonce
+		 * should be written starting at &iv[1] (to allow space for the
+		 * implementation to write in the flags in the first byte).
+		 * Note that a full 16 bytes should be allocated, even though
+		 * the length field will have a value less than this.
+		 *
+		 * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
+		 * 1619-2007.
+		 *
+		 * For optimum performance, the data pointed to SHOULD be
+		 * 8-byte aligned.
+		 */
+		phys_addr_t phys_addr;
+		size_t length;
+		/**< Length of valid IV data.
+		 *
+		 * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
+		 * mode, or for SNOW3G in UEA2 mode, this is the length of the
+		 * IV (which must be the same as the block length of the
+		 * cipher).
+		 *
+		 * - For block ciphers in CTR mode, this is the length of the
+		 * counter (which must be the same as the block length of the
+		 * cipher).
+		 *
+		 * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
+		 * which case data points to J0.
+		 *
+		 * - For CCM mode, this is the length of the nonce, which can
+		 * be in the range 7 to 13 inclusive.
+		 */
+	} iv;	/**< Initialisation vector parameters */
+
+	struct {
+		uint8_t *data;
+		/**< If this member of this structure is set this is a
+		 * pointer to the location where the digest result should be
+		 * inserted (in the case of digest generation) or where the
+		 * purported digest exists (in the case of digest
+		 * verification).
+		 *
+		 * At session creation time, the client specified the digest
+		 * result length with the digest_length member of the @ref
+		 * rte_crypto_auth_xform structure. For physical crypto
+		 * devices the caller must allocate at least digest_length of
+		 * physically contiguous memory at this location.
+		 *
+		 * For digest generation, the digest result will overwrite
+		 * any data at this location.
+		 *
+		 * @note
+		 * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+		 * "digest result" read "authentication tag T".
+		 *
+		 * If this member is not set the digest result is understood
+		 * to be in the destination buffer for digest generation, and
+		 * in the source buffer for digest verification. The location
+		 * of the digest result in this case is immediately following
+		 * the region over which the digest is computed.
+		 */
+		phys_addr_t phys_addr;	/**< Physical address of digest */
+		uint32_t length;	/**< Length of digest */
+	} digest; /**< Digest parameters */
+
+	struct {
+		uint8_t *data;
+		/**< Pointer to Additional Authenticated Data (AAD) needed for
+		 * authenticated cipher mechanisms (CCM and GCM), and to the IV
+		 * for SNOW3G authentication
+		 * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+		 * authentication mechanisms this pointer is ignored.
+		 *
+		 * The length of the data pointed to by this field is set up
+		 * for the session in the @ref rte_crypto_auth_xform structure
+		 * as part of the @ref rte_cryptodev_sym_session_create function
+		 * call.  This length must not exceed 240 bytes.
+		 *
+		 * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
+		 * caller should setup this field as follows:
+		 *
+		 * - the nonce should be written starting at an offset of one
+		 *   byte into the array, leaving room for the implementation
+		 *   to write in the flags to the first byte.
+		 *
+		 * - the additional  authentication data itself should be
+		 *   written starting at an offset of 18 bytes into the array,
+		 *   leaving room for the length encoding in the first two
+		 *   bytes of the second block.
+		 *
+		 * - the array should be big enough to hold the above fields,
+		 *   plus any padding to round this up to the nearest multiple
+		 *   of the block size (16 bytes).  Padding will be added by
+		 *   the implementation.
+		 *
+		 * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+		 * caller should setup this field as follows:
+		 *
+		 * - the AAD is written in starting at byte 0
+		 * - the array must be big enough to hold the AAD, plus any
+		 *   space to round this up to the nearest multiple of the
+		 *   block size (16 bytes).
+		 *
+		 * @note
+		 * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+		 * operation, this field is not used and should be set to 0.
+		 * Instead the AAD data should be placed in the source buffer.
+		 */
+		phys_addr_t phys_addr;	/**< physical address */
+		uint32_t length;	/**< Length of digest */
+	} additional_auth;
+	/**< Additional authentication parameters */
+
+	struct rte_mempool *pool;
+	/**< mempool used to allocate crypto op */
+
+	void *user_data;
+	/**< opaque pointer for user data */
+};
+
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param	op	The crypto operation to be reset.
+ */
+static inline void
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
+{
+	op->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;
+	op->dst.m = NULL;
+	op->dst.offset = 0;
+}
+
+/** Attach a session to a crypto operation */
+static inline void
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
+		struct rte_cryptodev_sym_session *sess)
+{
+	op->session = sess;
+	op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_SYM_H_ */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 2838852..c7fef6a 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -572,8 +572,8 @@  rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
 }
 
 static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
-		unsigned obj_cache_size, int socket_id);
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+		unsigned nb_objs, unsigned obj_cache_size, int socket_id);
 
 int
 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
@@ -604,8 +604,10 @@  rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
 	}
 
 	/* Setup Session mempool for device */
-	return rte_crypto_session_pool_create(dev, config->session_mp.nb_objs,
-			config->session_mp.cache_size, config->socket_id);
+	return rte_cryptodev_sym_session_pool_create(dev,
+			config->session_mp.nb_objs,
+			config->session_mp.cache_size,
+			config->socket_id);
 }
 
 
@@ -911,12 +913,12 @@  rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
 
 
 static void
-rte_crypto_session_init(struct rte_mempool *mp,
+rte_cryptodev_sym_session_init(struct rte_mempool *mp,
 		void *opaque_arg,
 		void *_sess,
 		__rte_unused unsigned i)
 {
-	struct rte_cryptodev_session *sess = _sess;
+	struct rte_cryptodev_sym_session *sess = _sess;
 	struct rte_cryptodev *dev = opaque_arg;
 
 	memset(sess, 0, mp->elt_size);
@@ -930,8 +932,8 @@  rte_crypto_session_init(struct rte_mempool *mp,
 }
 
 static int
-rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
-		unsigned obj_cache_size, int socket_id)
+rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
+		unsigned nb_objs, unsigned obj_cache_size, int socket_id)
 {
 	char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	unsigned priv_sess_size;
@@ -951,7 +953,7 @@  rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
 		return -ENOMEM;
 	}
 
-	unsigned elt_size = sizeof(struct rte_cryptodev_session) +
+	unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
 			priv_sess_size;
 
 	dev->data->session_pool = rte_mempool_lookup(mp_name);
@@ -975,7 +977,8 @@  rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
 				0, /* private data size */
 				NULL, /* obj initialization constructor */
 				NULL, /* obj initialization constructor arg */
-				rte_crypto_session_init, /* obj constructor */
+				rte_cryptodev_sym_session_init,
+				/**< obj constructor*/
 				dev, /* obj constructor arg */
 				socket_id, /* socket id */
 				0); /* flags */
@@ -990,11 +993,12 @@  rte_crypto_session_pool_create(struct rte_cryptodev *dev, unsigned nb_objs,
 	return 0;
 }
 
-struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+		struct rte_crypto_sym_xform *xform)
 {
 	struct rte_cryptodev *dev;
-	struct rte_cryptodev_session *sess;
+	struct rte_cryptodev_sym_session *sess;
 	void *_sess;
 
 	if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
@@ -1010,7 +1014,7 @@  rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
 		return NULL;
 	}
 
-	sess = (struct rte_cryptodev_session *)_sess;
+	sess = (struct rte_cryptodev_sym_session *)_sess;
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
 	if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1026,8 +1030,9 @@  rte_cryptodev_session_create(uint8_t dev_id, struct rte_crypto_xform *xform)
 	return sess;
 }
 
-struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+		struct rte_cryptodev_sym_session *sess)
 {
 	struct rte_cryptodev *dev;
 
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 892375d..732e2b9 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@ 
 /*-
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -57,14 +57,14 @@  extern "C" {
 /**< Null crypto PMD device name */
 #define CRYPTODEV_NAME_AESNI_MB_PMD	("cryptodev_aesni_mb_pmd")
 /**< AES-NI Multi buffer PMD device name */
-#define CRYPTODEV_NAME_QAT_PMD		("cryptodev_qat_pmd")
-/**< Intel QAT PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	("cryptodev_qat_sym_pmd")
+/**< Intel QAT SYM PMD device name */
 
 /** Crypto device type */
 enum rte_cryptodev_type {
 	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
 	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer PMD */
-	RTE_CRYPTODEV_QAT_PMD,		/**< QAT PMD */
+	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT SYM PMD */
 };
 
 /* Logging Macros */
@@ -99,8 +99,11 @@  struct rte_cryptodev_info {
 
 	unsigned max_nb_queue_pairs;
 	/**< Maximum number of queues pairs supported by device. */
-	unsigned max_nb_sessions;
-	/**< Maximum number of sessions supported by device. */
+
+	struct {
+		unsigned max_nb_sessions;
+		/**< Maximum number of sessions supported by device. */
+	} sym;
 };
 
 #define RTE_CRYPTODEV_DETACHED  (0)
@@ -130,17 +133,6 @@  struct rte_cryptodev_qp_conf {
 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
 		enum rte_cryptodev_event_type event, void *cb_arg);
 
-#ifdef RTE_CRYPTODEV_PERF
-/**
- * Crypto Device performance counter statistics structure. This structure is
- * used for RDTSC counters for measuring crypto operations.
- */
-struct rte_cryptodev_perf_stats {
-	uint64_t t_accumlated;	/**< Accumulated time processing operation */
-	uint64_t t_min;		/**< Max time */
-	uint64_t t_max;		/**< Min time */
-};
-#endif
 
 /** Crypto Device statistics */
 struct rte_cryptodev_stats {
@@ -153,29 +145,8 @@  struct rte_cryptodev_stats {
 	/**< Total error count on operations enqueued */
 	uint64_t dequeue_err_count;
 	/**< Total error count on operations dequeued */
+};
 
-#ifdef RTE_CRYPTODEV_DETAILED_STATS
-	struct {
-		uint64_t encrypt_ops;	/**< Count of encrypt operations */
-		uint64_t encrypt_bytes;	/**< Number of bytes encrypted */
-
-		uint64_t decrypt_ops;	/**< Count of decrypt operations */
-		uint64_t decrypt_bytes;	/**< Number of bytes decrypted */
-	} cipher; /**< Cipher operations stats */
-
-	struct {
-		uint64_t generate_ops;	/**< Count of generate operations */
-		uint64_t bytes_hashed;	/**< Number of bytes hashed */
-
-		uint64_t verify_ops;	/**< Count of verify operations */
-		uint64_t bytes_verified;/**< Number of bytes verified */
-	} hash;	 /**< Hash operations stats */
-#endif
-
-#ifdef RTE_CRYPTODEV_PERF
-	struct rte_cryptodev_perf_stats op_perf; /**< Operations stats */
-#endif
-} __rte_cache_aligned;
 
 /**
  * Create a virtual crypto device
@@ -607,6 +578,23 @@  rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
 }
 
 
+/** Cryptodev symmetric crypto session */
+struct rte_cryptodev_sym_session {
+	struct {
+		uint8_t dev_id;
+		/**< Device Id */
+		enum rte_cryptodev_type type;
+		/** Crypto Device type session created on */
+		struct rte_mempool *mp;
+		/**< Mempool session allocated from */
+	} __rte_aligned(8);
+	/**< Public symmetric session details */
+
+	char _private[0];
+	/**< Private session material */
+};
+
+
 /**
  * Initialise a session for symmetric cryptographic operations.
  *
@@ -628,9 +616,9 @@  rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
  * @return
  *  Pointer to the created session or NULL
  */
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_create(uint8_t dev_id,
-		struct rte_crypto_xform *xform);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(uint8_t dev_id,
+		struct rte_crypto_sym_xform *xform);
 
 
 /**
@@ -638,15 +626,15 @@  rte_cryptodev_session_create(uint8_t dev_id,
  *
  * @param	dev_id		The device identifier.
  * @param	session		Session pointer previously allocated by
- *				*rte_cryptodev_session_create*.
+ *				*rte_cryptodev_sym_session_create*.
  *
  * @return
  *   NULL on successful freeing of session.
  *   Session pointer on failure to free session.
  */
-extern struct rte_cryptodev_session *
-rte_cryptodev_session_free(uint8_t dev_id,
-		struct rte_cryptodev_session *session);
+extern struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_free(uint8_t dev_id,
+		struct rte_cryptodev_sym_session *session);
 
 
 #ifdef __cplusplus
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 8270afa..7d049ea 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,6 +1,6 @@ 
 /*-
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -56,11 +56,6 @@  extern "C" {
 #include "rte_crypto.h"
 #include "rte_cryptodev.h"
 
-struct rte_cryptodev_stats;
-struct rte_cryptodev_info;
-struct rte_cryptodev_qp_conf;
-
-enum rte_cryptodev_event_type;
 
 #ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
 #define RTE_PMD_DEBUG_TRACE(...) \
@@ -80,7 +75,6 @@  struct rte_cryptodev_session {
 };
 
 struct rte_cryptodev_driver;
-struct rte_cryptodev;
 
 /**
  * Initialisation function of a crypto driver invoked for each matching
@@ -364,7 +358,7 @@  typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev);
  * - On success returns a pointer to a rte_mempool
  * - On failure returns a NULL pointer
  */
-typedef int (*cryptodev_create_session_pool_t)(
+typedef int (*cryptodev_sym_create_session_pool_t)(
 		struct rte_cryptodev *dev, unsigned nb_objs,
 		unsigned obj_cache_size, int socket_id);
 
@@ -378,7 +372,7 @@  typedef int (*cryptodev_create_session_pool_t)(
  *  - On success returns the size of the session structure for device
  *  - On failure returns 0
  */
-typedef unsigned (*cryptodev_get_session_private_size_t)(
+typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
 		struct rte_cryptodev *dev);
 
 /**
@@ -392,7 +386,7 @@  typedef unsigned (*cryptodev_get_session_private_size_t)(
  *  - Returns private session structure on success.
  *  - Returns NULL on failure.
  */
-typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
+typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool,
 		void *session_private);
 
 /**
@@ -406,14 +400,14 @@  typedef void (*cryptodev_initialize_session_t)(struct rte_mempool *mempool,
  *  - Returns private session structure on success.
  *  - Returns NULL on failure.
  */
-typedef void * (*cryptodev_configure_session_t)(struct rte_cryptodev *dev,
-		struct rte_crypto_xform *xform, void *session_private);
+typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform, void *session_private);
 
 /**
  * Free Crypto session.
  * @param	session		Cryptodev session structure to free
  */
-typedef void (*cryptodev_free_session_t)(struct rte_cryptodev *dev,
+typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
 		void *session_private);
 
 
@@ -427,9 +421,9 @@  struct rte_cryptodev_ops {
 	cryptodev_info_get_t dev_infos_get;	/**< Get device info. */
 
 	cryptodev_stats_get_t stats_get;
-	/**< Get generic device statistics. */
+	/**< Get device statistics. */
 	cryptodev_stats_reset_t stats_reset;
-	/**< Reset generic device statistics. */
+	/**< Reset device statistics. */
 
 	cryptodev_queue_pair_setup_t queue_pair_setup;
 	/**< Set up a device queue pair. */
@@ -442,13 +436,13 @@  struct rte_cryptodev_ops {
 	cryptodev_queue_pair_count_t queue_pair_count;
 	/**< Get count of the queue pairs. */
 
-	cryptodev_get_session_private_size_t session_get_size;
+	cryptodev_sym_get_session_private_size_t session_get_size;
 	/**< Return private session. */
-	cryptodev_initialize_session_t session_initialize;
+	cryptodev_sym_initialize_session_t session_initialize;
 	/**< Initialization function for private session data */
-	cryptodev_configure_session_t session_configure;
+	cryptodev_sym_configure_session_t session_configure;
 	/**< Configure a Crypto session. */
-	cryptodev_free_session_t session_clear;
+	cryptodev_sym_free_session_t session_clear;
 	/**< Clear a Crypto sessions private data. */
 };
 
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index ff8e93d..a46af6f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -16,8 +16,8 @@  DPDK_2.2 {
 	rte_cryptodev_pmd_driver_register;
 	rte_cryptodev_pmd_release_device;
 	rte_cryptodev_pmd_virtual_dev_init;
-	rte_cryptodev_session_create;
-	rte_cryptodev_session_free;
+	rte_cryptodev_sym_session_create;
+	rte_cryptodev_sym_session_free;
 	rte_cryptodev_socket_id;
 	rte_cryptodev_start;
 	rte_cryptodev_stats_get;
@@ -29,4 +29,4 @@  DPDK_2.2 {
 	rte_cryptodev_queue_pair_stop;
 
 	local: *;
-};
\ No newline at end of file
+};
diff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h
index 77993b6..5ce6058 100644
--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h
+++ b/lib/librte_mbuf_offload/rte_mbuf_offload.h
@@ -1,7 +1,7 @@ 
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -67,7 +67,7 @@  extern "C" {
 enum rte_mbuf_ol_op_type {
 	RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
 	/**< Off-load not specified */
-	RTE_PKTMBUF_OL_CRYPTO
+	RTE_PKTMBUF_OL_CRYPTO_SYM
 	/**< Crypto offload operation */
 };
 
@@ -84,7 +84,7 @@  struct rte_mbuf_offload {
 
 	enum rte_mbuf_ol_op_type type;	/**< offload type */
 	union {
-		struct rte_crypto_op crypto;	/**< Crypto operation */
+		struct rte_crypto_sym_op crypto;	/**< Crypto operation */
 	} op;
 };
 
@@ -194,8 +194,8 @@  __rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
 	ol->type = type;
 
 	switch (type) {
-	case RTE_PKTMBUF_OL_CRYPTO:
-		__rte_crypto_op_reset(&ol->op.crypto); break;
+	case RTE_PKTMBUF_OL_CRYPTO_SYM:
+		__rte_crypto_sym_op_reset(&ol->op.crypto); break;
 	default:
 		break;
 	}
@@ -278,24 +278,24 @@  __rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
  * - On success returns pointer to first crypto xform in crypto operations chain
  * - On failure returns NULL
  */
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
+static inline struct rte_crypto_sym_xform *
+rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,
 		unsigned nb_xforms)
 {
-	struct rte_crypto_xform *xform;
+	struct rte_crypto_sym_xform *xform;
 	void *priv_data;
 	uint16_t size;
 
-	size = sizeof(struct rte_crypto_xform) * nb_xforms;
+	size = sizeof(struct rte_crypto_sym_xform) * nb_xforms;
 	priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
 
 	if (priv_data == NULL)
 		return NULL;
 
-	ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
+	ol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;
 
 	do {
-		xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
 		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
 	} while (xform);