diff mbox

[dpdk-dev,v2,06/12] crypto/armv8: add PMD optimized for ARMv8 processors

Message ID 1481077985-4224-7-git-send-email-zbigniew.bodek@caviumnetworks.com (mailing list archive)
State Changes Requested, archived
Delegated to: Pablo de Lara Guarch
Headers show

Checks

Context Check Description
checkpatch/checkpatch success coding style OK

Commit Message

zbigniew.bodek@caviumnetworks.com Dec. 7, 2016, 2:32 a.m. UTC
From: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>

This patch introduces crypto poll mode driver
using ARMv8 cryptographic extensions.
CPU compatibility with this driver is detected in
run-time and virtual crypto device will not be
created if CPU doesn't provide:
AES, SHA1, SHA2 and NEON.

This PMD is optimized to provide performance boost
for chained crypto operations processing,
such as encryption + HMAC generation,
decryption + HMAC validation. In particular,
cipher only or hash only operations are
not provided.

The driver currently supports AES-128-CBC
in combination with:
SHA256 MAC, SHA256 HMAC and SHA1 HMAC and relies
on the low-level assembly code.

This patch adds driver's code only and does
not include it in the build system.

Signed-off-by: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
---
 drivers/crypto/armv8/Makefile                     |  72 ++
 drivers/crypto/armv8/asm/include/rte_armv8_defs.h |  80 ++
 drivers/crypto/armv8/rte_armv8_pmd.c              | 915 ++++++++++++++++++++++
 drivers/crypto/armv8/rte_armv8_pmd_ops.c          | 390 +++++++++
 drivers/crypto/armv8/rte_armv8_pmd_private.h      | 210 +++++
 drivers/crypto/armv8/rte_armv8_pmd_version.map    |   3 +
 6 files changed, 1670 insertions(+)
 create mode 100644 drivers/crypto/armv8/Makefile
 create mode 100644 drivers/crypto/armv8/asm/include/rte_armv8_defs.h
 create mode 100644 drivers/crypto/armv8/rte_armv8_pmd.c
 create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_ops.c
 create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_private.h
 create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_version.map

Comments

Pablo de Lara Dec. 21, 2016, 2:55 p.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of
> zbigniew.bodek@caviumnetworks.com
> Sent: Wednesday, December 07, 2016 2:33 AM
> To: De Lara Guarch, Pablo; jerin.jacob@caviumnetworks.com
> Cc: dev@dpdk.org; Zbigniew Bodek
> Subject: [dpdk-dev] [PATCH v2 06/12] crypto/armv8: add PMD optimized
> for ARMv8 processors
> 
> From: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
> 
> This patch introduces crypto poll mode driver
> using ARMv8 cryptographic extensions.
> CPU compatibility with this driver is detected in
> run-time and virtual crypto device will not be
> created if CPU doesn't provide:
> AES, SHA1, SHA2 and NEON.
> 
> This PMD is optimized to provide performance boost
> for chained crypto operations processing,
> such as encryption + HMAC generation,
> decryption + HMAC validation. In particular,
> cipher only or hash only operations are
> not provided.
> 
> The driver currently supports AES-128-CBC
> in combination with:
> SHA256 MAC, SHA256 HMAC and SHA1 HMAC and relies
> on the low-level assembly code.
> 
> This patch adds driver's code only and does
> not include it in the build system.
> 
> Signed-off-by: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
> ---
>  drivers/crypto/armv8/Makefile                     |  72 ++
>  drivers/crypto/armv8/asm/include/rte_armv8_defs.h |  80 ++
>  drivers/crypto/armv8/rte_armv8_pmd.c              | 915
> ++++++++++++++++++++++
>  drivers/crypto/armv8/rte_armv8_pmd_ops.c          | 390 +++++++++
>  drivers/crypto/armv8/rte_armv8_pmd_private.h      | 210 +++++
>  drivers/crypto/armv8/rte_armv8_pmd_version.map    |   3 +
>  6 files changed, 1670 insertions(+)
>  create mode 100644 drivers/crypto/armv8/Makefile
>  create mode 100644
> drivers/crypto/armv8/asm/include/rte_armv8_defs.h
>  create mode 100644 drivers/crypto/armv8/rte_armv8_pmd.c
>  create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_ops.c
>  create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_private.h
>  create mode 100644 drivers/crypto/armv8/rte_armv8_pmd_version.map
> 
...

> diff --git a/drivers/crypto/armv8/asm/include/rte_armv8_defs.h
> b/drivers/crypto/armv8/asm/include/rte_armv8_defs.h
> new file mode 100644
> index 0000000..ea05495
> --- /dev/null
> +++ b/drivers/crypto/armv8/asm/include/rte_armv8_defs.h
> @@ -0,0 +1,80 @@
...

> +
> +#ifndef _RTE_ARMV8_DEFS_H_
> +#define _RTE_ARMV8_DEFS_H_
> +
> +struct crypto_arg {
> +	struct {
> +		uint8_t		*key;
> +		uint8_t		*iv;
> +	} cipher;

Remove unnecessary tab above.

> +	struct {
> +		struct {
> +			uint8_t	*key;
> +			uint8_t *i_key_pad;
> +			uint8_t *o_key_pad;
> +		} hmac;
> +	} digest;
> +};

...

> diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c
> b/drivers/crypto/armv8/rte_armv8_pmd.c
> new file mode 100644
> index 0000000..0410bb0
> --- /dev/null
> +++ b/drivers/crypto/armv8/rte_armv8_pmd.c


> + * 3D array type for ARM Combined Mode crypto functions pointers.
> + * CRYPTO_CIPHER_MAX:			max cipher ID number
> + * CRYPTO_AUTH_MAX:			max auth ID number
> + * CRYPTO_CIPHER_KEYLEN_MAX:		max key length ID number
> + */
> +typedef const crypto_func_t
> +crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_
> CIPHER_KEYLEN_MAX];
> +
> +/* Evaluate to key length definition */
> +#define	KEYL(keyl)		(ARMV8_CRYPTO_CIPHER_KEYLEN_
> ## keyl)

I don't think a tab is necessary here after define (happens on other parts)

> +
> +/* Local aliases for supported ciphers */
> +#define	CIPH_AES_CBC		RTE_CRYPTO_CIPHER_AES_CBC
> +/* Local aliases for supported hashes */
> +#define	AUTH_SHA1_HMAC
> 	RTE_CRYPTO_AUTH_SHA1_HMAC
> +#define	AUTH_SHA256		RTE_CRYPTO_AUTH_SHA256
> +#define	AUTH_SHA256_HMAC	RTE_CRYPTO_AUTH_SHA256_HMAC

...

> diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
> b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
> new file mode 100644
> index 0000000..0f768f4
> --- /dev/null
> +++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c

...

> +	{	/* AES CBC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +			{.sym = {
> +				.xform_type =
> RTE_CRYPTO_SYM_XFORM_CIPHER,
> +				{.cipher = {
> +					.algo =
> RTE_CRYPTO_CIPHER_AES_CBC,
> +					.block_size = 16,
> +					.key_size = {
> +						.min = 16,
> +						.max = 32,
> +						.increment = 8

From what I read, this PMD only supports AES-128-CBC.
If that's right, then key_size should be .min = 16, .max = 16, .increment = 0.
diff mbox

Patch

diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
new file mode 100644
index 0000000..2d053a4
--- /dev/null
+++ b/drivers/crypto/armv8/Makefile
@@ -0,0 +1,72 @@ 
+#
+#   BSD LICENSE
+#
+#   Copyright (C) Cavium networks Ltd. 2016.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Cavium networks nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(SRCDIR)/asm/include
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_armv8_pmd_version.map
+
+VPATH += $(SRCDIR)/asm
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+# library asm files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += aes_core.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += sha1_core.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += sha256_core.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += aes128cbc_sha1_hmac.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += aes128cbc_sha256.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += aes128cbc_sha256_hmac.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += sha1_hmac_aes128cbc_dec.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += sha256_aes128cbc_dec.S
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += sha256_hmac_aes128cbc_dec.S
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/armv8/asm/include/rte_armv8_defs.h b/drivers/crypto/armv8/asm/include/rte_armv8_defs.h
new file mode 100644
index 0000000..ea05495
--- /dev/null
+++ b/drivers/crypto/armv8/asm/include/rte_armv8_defs.h
@@ -0,0 +1,80 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARMV8_DEFS_H_
+#define _RTE_ARMV8_DEFS_H_
+
+struct crypto_arg {
+	struct {
+		uint8_t		*key;
+		uint8_t		*iv;
+	} cipher;
+	struct {
+		struct {
+			uint8_t	*key;
+			uint8_t *i_key_pad;
+			uint8_t *o_key_pad;
+		} hmac;
+	} digest;
+};
+
+typedef struct crypto_arg crypto_arg_t;
+
+void aes128_key_sched_enc(uint8_t *expanded_key, const uint8_t *user_key);
+void aes128_key_sched_dec(uint8_t *expanded_key, const uint8_t *user_key);
+
+void aes128cbc_sha1_hmac(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void aes128cbc_sha256(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void aes128cbc_sha256_hmac(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void aes128cbc_dec_sha256(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void sha1_hmac_aes128cbc_dec(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void sha256_aes128cbc_dec(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void sha256_hmac_aes128cbc_dec(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+void sha256_aes128cbc(uint8_t *csrc, uint8_t *cdst, uint8_t *dsrc,
+			uint8_t *ddst, uint64_t len, crypto_arg_t *arg);
+
+int sha1_block_partial(uint8_t *init, const uint8_t *src, uint8_t *dst,
+			uint64_t len);
+int sha1_block(uint8_t *init, const uint8_t *src, uint8_t *dst, uint64_t len);
+
+int sha256_block_partial(uint8_t *init, const uint8_t *src, uint8_t *dst,
+			uint64_t len);
+int sha256_block(uint8_t *init, const uint8_t *src, uint8_t *dst, uint64_t len);
+
+#endif /* _RTE_ARMV8_DEFS_H_ */
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644
index 0000000..0410bb0
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -0,0 +1,915 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_armv8_defs.h"
+#include "rte_armv8_pmd_private.h"
+
+static int cryptodev_armv8_crypto_uninit(const char *name);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be decribed by a set of numbers:
+ * - order:	order of operations (cipher, auth) or (auth, cipher)
+ * - direction:	encryption or decryption
+ * - calg:	cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg:	authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl:	cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ *                   +--+CRYPTO_FUNC
+ *            +--+ENC|
+ *      +--+CA|
+ *      |     +--+DEC
+ * ORDER|
+ *      |     +--+ENC
+ *      +--+AC|
+ *            +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX:			max cipher ID number
+ * CRYPTO_AUTH_MAX:			max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX:		max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define	KEYL(keyl)		(ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define	CIPH_AES_CBC		RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define	AUTH_SHA1_HMAC		RTE_CRYPTO_AUTH_SHA1_HMAC
+#define	AUTH_SHA256		RTE_CRYPTO_AUTH_SHA256
+#define	AUTH_SHA256_HMAC	RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt:	cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt:	cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt:	authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt:	authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+	/* [cipher alg][auth alg][key length] = crypto_function, */
+	[CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+	[CIPH_AES_CBC][AUTH_SHA256][KEYL(128)] = aes128cbc_sha256,
+	[CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+	NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+	NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+	/* [cipher alg][auth alg][key length] = crypto_function, */
+	[CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+	[CIPH_AES_CBC][AUTH_SHA256][KEYL(128)] = sha256_aes128cbc_dec,
+	[CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+	&crypto_op_ca_encrypt,
+	&crypto_op_ca_decrypt,
+	NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+	&crypto_op_ac_encrypt,
+	&crypto_op_ac_decrypt,
+	NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth:	cipher first, authenticate after
+ * crypto_auth_cipher:	authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+	crypto_cipher_auth,
+	crypto_auth_cipher,
+	NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define	CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl)			\
+({									\
+	crypto_func_tbl_t *func_tbl =					\
+				(crypto_chain_order[(order)])[(cop)];	\
+									\
+	((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]);		\
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX:			max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX:		max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+	/* [cipher alg][key length] = key_expand_func, */
+	[CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+	/* [cipher alg][key length] = key_expand_func, */
+	[CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt:	keys for encryption
+ * crypto_key_sched_decrypt:	keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+	&crypto_key_sched_encrypt,
+	&crypto_key_sched_decrypt,
+	NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define	CRYPTO_GET_KEY_SCHED(cop, calg, keyl)				\
+({									\
+	crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)];	\
+									\
+	((*ks_tbl)[(calg)][KEYL(keyl)]);				\
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Global static parameter used to create a unique name for each
+ * ARMV8 crypto device.
+ */
+static unsigned int unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+	int ret;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
+			unique_name_id++);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+	/*
+	 * This driver currently covers only chained operations.
+	 * Ignore only cipher or only authentication operations
+	 * or chains longer than 2 xform structures.
+	 */
+	if (xform->next == NULL || xform->next->next != NULL)
+		return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+	}
+
+	return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+				const struct rte_crypto_sym_xform *xform)
+{
+	size_t i;
+
+	/* Generate i_key_pad and o_key_pad */
+	memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+	rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+							xform->auth.key.length);
+	memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+	rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+							xform->auth.key.length);
+	/*
+	 * XOR key with IPAD/OPAD values to obtain i_key_pad
+	 * and o_key_pad.
+	 * Byte-by-byte operation may seem to be the less efficient
+	 * here but in fact it's the opposite.
+	 * The result ASM code is likely operate on NEON registers
+	 * (load auth key to Qx, load IPAD/OPAD to multiple
+	 * elements of Qy, eor 128 bits at once).
+	 */
+	for (i = 0; i < SHA_BLOCK_MAX; i++) {
+		sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+		sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+	}
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+			const struct rte_crypto_sym_xform *xform)
+{
+	uint8_t partial[64] = { 0 };
+	int error;
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		/*
+		 * Generate authentication key, i_key_pad and o_key_pad.
+		 */
+		/* Zero memory under key */
+		memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
+
+		if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
+			/*
+			 * In case the key is longer than 160 bits
+			 * the algorithm will use SHA1(key) instead.
+			 */
+			error = sha1_block(NULL, xform->auth.key.data,
+				sess->auth.hmac.key, xform->auth.key.length);
+			if (error != 0)
+				return -1;
+		} else {
+			/*
+			 * Now copy the given authentication key to the session
+			 * key assuming that the session key is zeroed there is
+			 * no need for additional zero padding if the key is
+			 * shorter than SHA1_AUTH_KEY_LENGTH.
+			 */
+			rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+							xform->auth.key.length);
+		}
+
+		/* Prepare HMAC padding: key|pattern */
+		auth_hmac_pad_prepare(sess, xform);
+		/*
+		 * Calculate partial hash values for i_key_pad and o_key_pad.
+		 * Will be used as initialization state for final HMAC.
+		 */
+		error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+		    partial, SHA1_BLOCK_SIZE);
+		if (error != 0)
+			return -1;
+		memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+		error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+		    partial, SHA1_BLOCK_SIZE);
+		if (error != 0)
+			return -1;
+		memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		/*
+		 * Generate authentication key, i_key_pad and o_key_pad.
+		 */
+		/* Zero memory under key */
+		memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
+
+		if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
+			/*
+			 * In case the key is longer than 256 bits
+			 * the algorithm will use SHA256(key) instead.
+			 */
+			error = sha256_block(NULL, xform->auth.key.data,
+				sess->auth.hmac.key, xform->auth.key.length);
+			if (error != 0)
+				return -1;
+		} else {
+			/*
+			 * Now copy the given authentication key to the session
+			 * key assuming that the session key is zeroed there is
+			 * no need for additional zero padding if the key is
+			 * shorter than SHA256_AUTH_KEY_LENGTH.
+			 */
+			rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+							xform->auth.key.length);
+		}
+
+		/* Prepare HMAC padding: key|pattern */
+		auth_hmac_pad_prepare(sess, xform);
+		/*
+		 * Calculate partial hash values for i_key_pad and o_key_pad.
+		 * Will be used as initialization state for final HMAC.
+		 */
+		error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+		    partial, SHA256_BLOCK_SIZE);
+		if (error != 0)
+			return -1;
+		memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+		error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+		    partial, SHA256_BLOCK_SIZE);
+		if (error != 0)
+			return -1;
+		memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+			const struct rte_crypto_sym_xform *xform)
+{
+	crypto_key_sched_t cipher_key_sched;
+
+	cipher_key_sched = sess->cipher.key_sched;
+	if (likely(cipher_key_sched != NULL)) {
+		/* Set up cipher session key */
+		cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+	}
+
+	return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+		const struct rte_crypto_sym_xform *cipher_xform,
+		const struct rte_crypto_sym_xform *auth_xform)
+{
+	enum armv8_crypto_chain_order order;
+	enum armv8_crypto_cipher_operation cop;
+	enum rte_crypto_cipher_algorithm calg;
+	enum rte_crypto_auth_algorithm aalg;
+
+	/* Validate and prepare scratch order of combined operations */
+	switch (sess->chain_order) {
+	case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+	case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+		order = sess->chain_order;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Select cipher direction */
+	sess->cipher.direction = cipher_xform->cipher.op;
+	/* Select cipher key */
+	sess->cipher.key.length = cipher_xform->cipher.key.length;
+	/* Set cipher direction */
+	cop = sess->cipher.direction;
+	/* Set cipher algorithm */
+	calg = cipher_xform->cipher.algo;
+
+	/* Select cipher algo */
+	switch (calg) {
+	/* Cover supported cipher algorithms */
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = calg;
+		/* IV len is always 16 bytes (block size) for AES CBC */
+		sess->cipher.iv_len = 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Select auth generate/verify */
+	sess->auth.operation = auth_xform->auth.op;
+
+	/* Select auth algo */
+	switch (auth_xform->auth.algo) {
+	/* Cover supported hash algorithms */
+	case RTE_CRYPTO_AUTH_SHA256:
+		aalg = auth_xform->auth.algo;
+		sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_AUTH;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+	case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+		aalg = auth_xform->auth.algo;
+		sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Verify supported key lengths and extract proper algorithm */
+	switch (cipher_xform->cipher.key.length << 3) {
+	case 128:
+		sess->crypto_func =
+				CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+		sess->cipher.key_sched =
+				CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+		break;
+	case 192:
+		sess->crypto_func =
+				CRYPTO_GET_ALGO(order, cop, calg, aalg, 192);
+		sess->cipher.key_sched =
+				CRYPTO_GET_KEY_SCHED(cop, calg, 192);
+		break;
+	case 256:
+		sess->crypto_func =
+				CRYPTO_GET_ALGO(order, cop, calg, aalg, 256);
+		sess->cipher.key_sched =
+				CRYPTO_GET_KEY_SCHED(cop, calg, 256);
+		break;
+	default:
+		sess->crypto_func = NULL;
+		sess->cipher.key_sched = NULL;
+		return -EINVAL;
+	}
+
+	if (unlikely(sess->crypto_func == NULL)) {
+		/*
+		 * If we got here that means that there must be a bug
+		 * in the algorithms selection above. Nevertheless keep
+		 * it here to catch bug immediately and avoid NULL pointer
+		 * dereference in OPs processing.
+		 */
+		ARMV8_CRYPTO_LOG_ERR(
+			"No appropriate crypto function for given parameters");
+		return -EINVAL;
+	}
+
+	/* Set up cipher session prerequisites */
+	if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+		return -EINVAL;
+
+	/* Set up authentication session prerequisites */
+	if (auth_set_prerequisites(sess, auth_xform) != 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	bool is_chained_op;
+	int ret;
+
+	/* Filter out spurious/broken requests */
+	if (xform == NULL)
+		return -EINVAL;
+
+	sess->chain_order = armv8_crypto_get_chain_order(xform);
+	switch (sess->chain_order) {
+	case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		is_chained_op = true;
+		break;
+	case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		is_chained_op = true;
+		break;
+	default:
+		is_chained_op = false;
+		return -EINVAL;
+	}
+
+	if (is_chained_op) {
+		ret = armv8_crypto_set_session_chained_parameters(sess,
+						cipher_xform, auth_xform);
+		if (unlikely(ret != 0)) {
+			ARMV8_CRYPTO_LOG_ERR(
+			"Invalid/unsupported chained (cipher/auth) parameters");
+			return -EINVAL;
+		}
+	} else {
+		ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/** Provide session for operation */
+static struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+	struct armv8_crypto_session *sess = NULL;
+
+	if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		/* get existing session */
+		if (likely(op->sym->session != NULL &&
+				op->sym->session->dev_type ==
+				RTE_CRYPTODEV_ARMV8_PMD)) {
+			sess = (struct armv8_crypto_session *)
+				op->sym->session->_private;
+		}
+	} else {
+		/* provide internal session */
+		void *_sess = NULL;
+
+		if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+			sess = (struct armv8_crypto_session *)
+				((struct rte_cryptodev_sym_session *)_sess)
+				->_private;
+
+			if (unlikely(armv8_crypto_set_session_parameters(
+					sess, op->sym->xform) != 0)) {
+				rte_mempool_put(qp->sess_mp, _sess);
+				sess = NULL;
+			} else
+				op->sym->session = _sess;
+		}
+	}
+
+	if (sess == NULL)
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static void
+process_armv8_chained_op
+		(struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+		struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+	crypto_func_t crypto_func;
+	crypto_arg_t arg;
+	uint8_t *src, *dst;
+	uint8_t *adst, *asrc;
+	uint64_t srclen;
+
+	srclen = op->sym->cipher.data.length;
+	ARMV8_CRYPTO_ASSERT(
+		op->sym->cipher.data.length == op->sym->auth.data.length);
+
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+			op->sym->cipher.data.offset);
+	dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+			op->sym->cipher.data.offset);
+
+	switch (sess->chain_order) {
+	case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+		asrc = dst;
+		break;
+	case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+		asrc = src;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return;
+	}
+
+	switch (sess->auth.mode) {
+	case ARMV8_CRYPTO_AUTH_AS_AUTH:
+		/* Nothing to do here, just verify correct option */
+		break;
+	case ARMV8_CRYPTO_AUTH_AS_HMAC:
+		arg.digest.hmac.key = sess->auth.hmac.key;
+		arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+		arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return;
+	}
+
+	if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+		adst = op->sym->auth.digest.data;
+		if (adst == NULL) {
+			adst = rte_pktmbuf_mtod_offset(mbuf_dst,
+					uint8_t *,
+					op->sym->auth.data.offset +
+					op->sym->auth.data.length);
+		}
+	} else {
+		adst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+				op->sym->auth.digest.length);
+	}
+
+	if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return;
+	}
+
+	arg.cipher.iv = op->sym->cipher.iv.data;
+	arg.cipher.key = sess->cipher.key.data;
+	/* Acquire combined mode function */
+	crypto_func = sess->crypto_func;
+	ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+	crypto_func(src, dst, asrc, adst, srclen, &arg);
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		if (memcmp(adst, op->sym->auth.digest.data,
+				op->sym->auth.digest.length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		}
+	}
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+		struct armv8_crypto_session *sess)
+{
+	struct rte_mbuf *msrc, *mdst;
+	int retval;
+
+	msrc = op->sym->m_src;
+	mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	switch (sess->chain_order) {
+	case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+	case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+		process_armv8_chained_op(op, sess, msrc, mdst);
+		break;
+	default:
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		break;
+	}
+
+	/* Free session if a session-less crypto op */
+	if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct armv8_crypto_session));
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+		retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+	else
+		retval = -1;
+
+	return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct armv8_crypto_session *sess;
+	struct armv8_crypto_qp *qp = queue_pair;
+	int i, retval;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_session(qp, ops[i]);
+		if (unlikely(sess == NULL))
+			goto enqueue_err;
+
+		retval = process_op(qp, ops[i], sess);
+		if (unlikely(retval < 0))
+			goto enqueue_err;
+	}
+
+	qp->stats.enqueued_count += i;
+	return i;
+
+enqueue_err:
+	if (ops[i] != NULL)
+		ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	qp->stats.enqueue_err_count++;
+	return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct armv8_crypto_qp *qp = queue_pair;
+
+	unsigned int nb_dequeued = 0;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+			(void **)ops, nb_ops);
+	qp->stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(const char *name,
+		struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct armv8_crypto_private *internals;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+		ARMV8_CRYPTO_LOG_ERR(
+			"AES instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for support for SHA instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+	    !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+		ARMV8_CRYPTO_LOG_ERR(
+			"SHA1/SHA2 instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* Check CPU for support for Advance SIMD instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+		ARMV8_CRYPTO_LOG_ERR(
+			"Advanced SIMD instructions not supported by CPU");
+		return -EFAULT;
+	}
+
+	/* create a unique device name */
+	if (create_unique_device_name(crypto_dev_name,
+			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+		ARMV8_CRYPTO_LOG_ERR("failed to create unique cryptodev name");
+		return -EINVAL;
+	}
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+				sizeof(struct armv8_crypto_private),
+				init_params->socket_id);
+	if (dev == NULL) {
+		ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+	dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+	/* register rx/tx burst functions for data path */
+	dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+	dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+
+	return 0;
+
+init_error:
+	ARMV8_CRYPTO_LOG_ERR(
+		"driver %s: cryptodev_armv8_crypto_create failed", name);
+
+	cryptodev_armv8_crypto_uninit(crypto_dev_name);
+	return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(const char *name,
+		const char *input_args)
+{
+	struct rte_crypto_vdev_init_params init_params = {
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id()
+	};
+
+	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	return cryptodev_armv8_crypto_create(name, &init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(const char *name)
+{
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD,
+		"Closing ARMv8 crypto device %s on numa socket %u\n",
+		name, rte_socket_id());
+
+	return 0;
+}
+
+static struct rte_vdev_driver armv8_crypto_drv = {
+	.probe = cryptodev_armv8_crypto_init,
+	.remove = cryptodev_armv8_crypto_uninit
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+	"max_nb_queue_pairs=<int> "
+	"max_nb_sessions=<int> "
+	"socket_id=<int>");
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644
index 0000000..0f768f4
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -0,0 +1,390 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_armv8_defs.h"
+#include "rte_armv8_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities
+	armv8_crypto_pmd_capabilities[] = {
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+				{.auth = {
+					.algo = RTE_CRYPTO_AUTH_SHA256,
+					.block_size = 64,
+					.key_size = {
+						.min = 0,
+						.max = 0,
+						.increment = 0
+					},
+					.digest_size = {
+						.min = 32,
+						.max = 32,
+						.increment = 0
+					},
+					.aad_size = { 0 }
+				}, }
+			}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+				{.auth = {
+					.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+					.block_size = 64,
+					.key_size = {
+						.min = 16,
+						.max = 128,
+						.increment = 0
+					},
+					.digest_size = {
+						.min = 20,
+						.max = 20,
+						.increment = 0
+					},
+					.aad_size = { 0 }
+				}, }
+			}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+				{.auth = {
+					.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+					.block_size = 64,
+					.key_size = {
+						.min = 16,
+						.max = 128,
+						.increment = 0
+					},
+					.digest_size = {
+						.min = 32,
+						.max = 32,
+						.increment = 0
+					},
+					.aad_size = { 0 }
+				}, }
+			}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+				{.cipher = {
+					.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+					.block_size = 16,
+					.key_size = {
+						.min = 16,
+						.max = 32,
+						.increment = 8
+					},
+					.iv_size = {
+						.min = 16,
+						.max = 16,
+						.increment = 0
+					}
+				}, }
+			}, }
+	},
+
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct armv8_crypto_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->dev_type = dev->dev_type;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = armv8_crypto_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+
+	return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct armv8_crypto_qp *qp)
+{
+	unsigned int n;
+
+	n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+		unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->prod.size >= ring_size) {
+			ARMV8_CRYPTO_LOG_INFO(
+				"Reusing existing ring %s for processed ops",
+				 qp->name);
+			return r;
+		}
+
+		ARMV8_CRYPTO_LOG_ERR(
+			"Unable to reuse existing ring %s for processed ops",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id)
+{
+	struct armv8_crypto_qp *qp = NULL;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		armv8_crypto_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+		goto qp_setup_cleanup;
+
+	qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_ops == NULL)
+		goto qp_setup_cleanup;
+
+	qp->sess_mp = dev->data->session_pool;
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp)
+		rte_free(qp);
+
+	return -1;
+}
+
+/** Start queue pair */
+static int
+armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+		struct rte_crypto_sym_xform *xform, void *sess)
+{
+	if (unlikely(sess == NULL)) {
+		ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+		return NULL;
+	}
+
+	if (armv8_crypto_set_session_parameters(
+			sess, xform) != 0) {
+		ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+		return NULL;
+	}
+
+	return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+				void *sess)
+{
+
+	/* Zero out the whole structure */
+	if (sess)
+		memset(sess, 0, sizeof(struct armv8_crypto_session));
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+		.dev_configure		= armv8_crypto_pmd_config,
+		.dev_start		= armv8_crypto_pmd_start,
+		.dev_stop		= armv8_crypto_pmd_stop,
+		.dev_close		= armv8_crypto_pmd_close,
+
+		.stats_get		= armv8_crypto_pmd_stats_get,
+		.stats_reset		= armv8_crypto_pmd_stats_reset,
+
+		.dev_infos_get		= armv8_crypto_pmd_info_get,
+
+		.queue_pair_setup	= armv8_crypto_pmd_qp_setup,
+		.queue_pair_release	= armv8_crypto_pmd_qp_release,
+		.queue_pair_start	= armv8_crypto_pmd_qp_start,
+		.queue_pair_stop	= armv8_crypto_pmd_qp_stop,
+		.queue_pair_count	= armv8_crypto_pmd_qp_count,
+
+		.session_get_size	= armv8_crypto_pmd_session_get_size,
+		.session_configure	= armv8_crypto_pmd_session_configure,
+		.session_clear		= armv8_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644
index 0000000..fc1dae4
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -0,0 +1,210 @@ 
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+			__func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+			__func__, __LINE__, ## args)
+
+#define	ARMV8_CRYPTO_ASSERT(con)				\
+do {								\
+	if (!(con)) {						\
+		rte_panic("%s(): "				\
+		    con "condition failed, line %u", __func__);	\
+	}							\
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define	ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define	NBBY		8		/* Number of bits in a byte */
+#define	BYTE_LENGTH(x)	((x) / 8)	/* Number of bytes in x (roun down) */
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+	ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+	ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+	ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+	ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+	ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+	ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+	ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+	ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+	ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+	ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+	ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+	ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+	ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+		ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+	ARMV8_CRYPTO_AUTH_AS_AUTH,
+	ARMV8_CRYPTO_AUTH_AS_HMAC,
+	ARMV8_CRYPTO_AUTH_AS_CIPHER,
+	ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+	ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define	CRYPTO_ORDER_MAX		ARMV8_CRYPTO_CHAIN_LIST_END
+#define	CRYPTO_CIPHER_OP_MAX		ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define	CRYPTO_CIPHER_KEYLEN_MAX	ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define	CRYPTO_CIPHER_MAX		RTE_CRYPTO_CIPHER_LIST_END
+#define	CRYPTO_AUTH_MAX			RTE_CRYPTO_AUTH_LIST_END
+
+#define	HMAC_IPAD_VALUE			(0x36)
+#define	HMAC_OPAD_VALUE			(0x5C)
+
+#define	SHA256_AUTH_KEY_LENGTH		(BYTE_LENGTH(256))
+#define	SHA256_BLOCK_SIZE		(BYTE_LENGTH(512))
+
+#define	SHA1_AUTH_KEY_LENGTH		(BYTE_LENGTH(160))
+#define	SHA1_BLOCK_SIZE			(BYTE_LENGTH(512))
+
+#define	SHA_AUTH_KEY_MAX		SHA256_AUTH_KEY_LENGTH
+#define	SHA_BLOCK_MAX			SHA256_BLOCK_SIZE
+
+typedef void (*crypto_func_t)(uint8_t *, uint8_t *, uint8_t *, uint8_t *,
+				uint64_t, crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+	unsigned int max_nb_qpairs;
+	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;
+	/**< Max number of sessions */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_ops;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_cryptodev_stats stats;
+	/**< Queue pair statistics */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+	enum armv8_crypto_chain_order chain_order;
+	/**< chain order mode */
+	crypto_func_t crypto_func;
+	/**< cryptographic function to use for this session */
+
+	/** Cipher Parameters */
+	struct {
+		enum rte_crypto_cipher_operation direction;
+		/**< cipher operation direction */
+		enum rte_crypto_cipher_algorithm algo;
+		/**< cipher algorithm */
+		int iv_len;
+		/**< IV length */
+
+		struct {
+			uint8_t data[256];
+			/**< key data */
+			size_t length;
+			/**< key length in bytes */
+		} key;
+
+		crypto_key_sched_t key_sched;
+		/**< Key schedule function */
+	} cipher;
+
+	/** Authentication Parameters */
+	struct {
+		enum rte_crypto_auth_operation operation;
+		/**< auth operation generate or verify */
+		enum armv8_crypto_auth_mode mode;
+		/**< auth operation mode */
+
+		union {
+			struct {
+				/* Add data if needed */
+			} auth;
+
+			struct {
+				uint8_t i_key_pad[SHA_BLOCK_MAX]
+							__rte_cache_aligned;
+				/**< inner pad (max supported block length) */
+				uint8_t o_key_pad[SHA_BLOCK_MAX]
+							__rte_cache_aligned;
+				/**< outer pad (max supported block length) */
+				uint8_t key[SHA_AUTH_KEY_MAX];
+				/**< HMAC key (max supported length)*/
+			} hmac;
+		};
+	} auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+		struct armv8_crypto_session *sess,
+		const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_version.map b/drivers/crypto/armv8/rte_armv8_pmd_version.map
new file mode 100644
index 0000000..1f84b68
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd_version.map
@@ -0,0 +1,3 @@ 
+DPDK_17.02 {
+	local: *;
+};