[v8,1/3] common/qat: add support for GEN LCE device

Message ID 20240229194510.2741004-2-ciara.power@intel.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series add QAT GEN LCE device |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Power, Ciara Feb. 29, 2024, 7:45 p.m. UTC
  From: Nishikant Nayak <nishikanta.nayak@intel.com>

Support is added for a new QAT device generation, GEN LCE.

This generation works slightly differently to previous
generations such as GEN 4, so many new files, functions and
macros are needed specifically for this generation.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal@intel.com>
---
v7:
    - Squashed patch 1 + 2 together.
    - Updated commit message.
    - Added new signed off to cover changes made by
      second developer in v7.
    - Fixed copyright year for new files.
    - Utilised 100 char line limit.
v6:
    - Removed unused PCI device IDs from the device list.
    - Updated documentation and release note.
v5:
    - Replaced usage of __u8 with uint8_t.
v3:
    - Removed use of linux/kernel.h macro to fix ARM compilation.
    - Fixed typo in commit body and code comment.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Fixed code formatting
---
 .mailmap                                      |   1 +
 doc/guides/cryptodevs/qat.rst                 |   1 +
 doc/guides/rel_notes/release_24_03.rst        |   1 +
 drivers/common/qat/dev/qat_dev_gen_lce.c      | 295 +++++++++++++++++
 drivers/common/qat/meson.build                |   2 +
 .../qat/qat_adf/adf_transport_access_macros.h |   1 +
 .../adf_transport_access_macros_gen_lce.h     |  51 +++
 .../adf_transport_access_macros_gen_lcevf.h   |  48 +++
 drivers/common/qat/qat_adf/icp_qat_fw.h       |  34 ++
 drivers/common/qat/qat_adf/icp_qat_fw_la.h    |  59 +++-
 drivers/common/qat/qat_common.h               |   1 +
 drivers/common/qat/qat_device.c               |   5 +
 .../crypto/qat/dev/qat_crypto_pmd_gen_lce.c   | 310 ++++++++++++++++++
 drivers/crypto/qat/qat_sym.h                  |   6 +
 14 files changed, 814 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/qat/dev/qat_dev_gen_lce.c
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
 create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
 create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
  

Patch

diff --git a/.mailmap b/.mailmap
index 58cca13ef6..8008e5a899 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1036,6 +1036,7 @@  Ning Li <muziding001@163.com> <lining18@jd.com>
 Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>
 Nir Efrati <nir.efrati@intel.com>
 Nirmoy Das <ndas@suse.de>
+Nishikant Nayak <nishikanta.nayak@intel.com>
 Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>
 Nitin Saxena <nitin.saxena@caviumnetworks.com>
 Nitzan Weller <nitzanwe@mellanox.com>
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 28945bb5f3..68d792e4cc 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -28,6 +28,7 @@  poll mode crypto driver support for the following hardware accelerator devices:
 * ``Intel QuickAssist Technology 4xxx``
 * ``Intel QuickAssist Technology 300xx``
 * ``Intel QuickAssist Technology 420xx``
+* ``Intel QuickAssist Technology apfxx``
 
 
 Features
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 439d354cd8..dc498a29ce 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -137,6 +137,7 @@  New Features
     devices in QAT crypto driver.
   * Enabled ZUC256 cipher and auth algorithm for wireless slice
     enabled GEN3 and GEN5 devices.
+  * Added support for GEN LCE (1454) device, for AES-GCM only.
 
 * **Updated Marvell cnxk crypto driver.**
 
diff --git a/drivers/common/qat/dev/qat_dev_gen_lce.c b/drivers/common/qat/dev/qat_dev_gen_lce.c
new file mode 100644
index 0000000000..6514321c32
--- /dev/null
+++ b/drivers/common/qat/dev/qat_dev_gen_lce.c
@@ -0,0 +1,295 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_pci.h>
+#include <rte_vfio.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_transport_access_macros_gen_lcevf.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_pf2vf.h"
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define BITS_PER_ULONG		(sizeof(unsigned long) * 8)
+
+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX	VFIO_PCI_NUM_REGIONS
+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 2)
+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX	(VFIO_PCI_NUM_REGIONS + 4)
+#define LCE_DEVICE_NAME_SIZE			64
+#define LCE_DEVICE_MAX_BANKS			2080
+#define LCE_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define LCE_DEVICE_BITMAP_SIZE	LCE_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_ULONG)
+
+/* QAT GEN_LCE specific macros */
+#define QAT_GEN_LCE_BUNDLE_NUM		LCE_DEVICE_MAX_BANKS
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM	1
+
+/**
+ * struct lce_vfio_dev_cap - LCE device capabilities
+ *
+ * Device level capabilities and service level capabilities
+ */
+struct lce_vfio_dev_cap {
+	uint16_t device_num;
+	uint16_t device_type;
+	uint32_t capability_mask;
+	uint32_t extended_capabilities;
+	uint16_t max_banks;
+	uint16_t max_rings_per_bank;
+	uint16_t arb_mask;
+	uint16_t services;
+	uint16_t pkg_id;
+	uint16_t node_id;
+	uint8_t device_name[LCE_DEVICE_NAME_SIZE];
+};
+
+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */
+struct lce_vfio_dev_cy_cap {
+	uint32_t nr_banks;
+	unsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];
+};
+
+struct lce_qat_domain {
+	uint32_t nid        :3;
+	uint32_t fid        :7;
+	uint32_t ftype      :2;
+	uint32_t vfid       :13;
+	uint32_t rid        :4;
+	uint32_t vld        :1;
+	uint32_t desc_over  :1;
+	uint32_t pasid_vld  :1;
+	uint32_t pasid      :20;
+};
+
+struct lce_qat_buf_domain {
+	uint32_t bank_id:   20;
+	uint32_t type:      4;
+	uint32_t resv:      8;
+	struct lce_qat_domain dom;
+};
+
+struct qat_dev_gen_lce_extra {
+	struct qat_qp_hw_data
+	    qp_gen_lce_data[QAT_GEN_LCE_BUNDLE_NUM][QAT_GEN4_QPS_PER_BUNDLE_NUM];
+};
+
+static struct qat_pf2vf_dev qat_pf2vf_gen_lce = {
+	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static int
+qat_select_valid_queue_gen_lce(struct qat_pci_device *qat_dev, int qp_id,
+			    enum qat_service_type service_type)
+{
+	int i = 0, valid_qps = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	for (; i < QAT_GEN_LCE_BUNDLE_NUM; i++) {
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service_type) {
+			if (valid_qps == qp_id)
+				return i;
+			++valid_qps;
+		}
+	}
+	return -1;
+}
+
+static const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen_lce(struct qat_pci_device *qat_dev,
+			enum qat_service_type service_type, uint16_t qp_id)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	int ring_pair = qat_select_valid_queue_gen_lce(qat_dev, qp_id, service_type);
+
+	if (ring_pair < 0)
+		return NULL;
+
+	return &dev_extra->qp_gen_lce_data[ring_pair][0];
+}
+
+static int
+qat_qp_rings_per_service_gen_lce(struct qat_pci_device *qat_dev,
+			      enum qat_service_type service)
+{
+	int i = 0, count = 0, max_ops_per_srv = 0;
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+
+	max_ops_per_srv = QAT_GEN_LCE_BUNDLE_NUM;
+	for (i = 0, count = 0; i < max_ops_per_srv; i++)
+		if (dev_extra->qp_gen_lce_data[i][0].service_type == service)
+			count++;
+	return count;
+}
+
+static int qat_dev_read_config_gen_lce(struct qat_pci_device *qat_dev)
+{
+	struct qat_dev_gen_lce_extra *dev_extra = qat_dev->dev_private;
+	struct qat_qp_hw_data *hw_data;
+
+	/** Enable only crypto ring: RP-0 */
+	hw_data = &dev_extra->qp_gen_lce_data[0][0];
+	memset(hw_data, 0, sizeof(*hw_data));
+
+	hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+	hw_data->tx_msg_size = 128;
+	hw_data->rx_msg_size = 32;
+
+	hw_data->tx_ring_num = 0;
+	hw_data->rx_ring_num = 1;
+
+	hw_data->hw_bundle_num = 0;
+
+	return 0;
+}
+
+static void qat_qp_build_ring_base_gen_lce(void *io_addr, struct qat_queue *queue)
+{
+	uint64_t queue_base;
+
+	queue_base = BUILD_RING_BASE_ADDR_GEN_LCE(queue->base_phys_addr, queue->queue_size);
+	WRITE_CSR_RING_BASE_GEN_LCEVF(io_addr, queue->hw_bundle_number,
+			queue->hw_queue_number, queue_base);
+}
+
+static void
+qat_qp_adf_arb_enable_gen_lce(const struct qat_queue *txq,
+			   void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value |= 0x01;
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_arb_disable_gen_lce(const struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock)
+{
+	uint32_t arb_csr_offset = 0, value;
+
+	rte_spinlock_lock(lock);
+	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+			(ADF_RING_BUNDLE_SIZE_GEN_LCE * txq->hw_bundle_number);
+	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, arb_csr_offset);
+	value &= ~(0x01);
+	ADF_CSR_WR(base_addr, arb_csr_offset, value);
+	rte_spinlock_unlock(lock);
+}
+
+static void
+qat_qp_adf_configure_queues_gen_lce(struct qat_qp *qp)
+{
+	uint32_t q_tx_config, q_resp_config;
+	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
+
+	/* q_tx/rx->queue_size is initialized as per bundle config register */
+	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
+
+	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_tx->hw_bundle_number,
+			q_tx->hw_queue_number, q_tx_config);
+	WRITE_CSR_RING_CONFIG_GEN_LCEVF(qp->mmap_bar_addr, q_rx->hw_bundle_number,
+			q_rx->hw_queue_number, q_resp_config);
+}
+
+static void
+qat_qp_csr_write_tail_gen_lce(struct qat_qp *qp, struct qat_queue *q)
+{
+	WRITE_CSR_RING_TAIL_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, q->tail);
+}
+
+static void
+qat_qp_csr_write_head_gen_lce(struct qat_qp *qp, struct qat_queue *q, uint32_t new_head)
+{
+	WRITE_CSR_RING_HEAD_GEN_LCEVF(qp->mmap_bar_addr, q->hw_bundle_number,
+				   q->hw_queue_number, new_head);
+}
+
+static void
+qat_qp_csr_setup_gen_lce(struct qat_pci_device *qat_dev, void *io_addr, struct qat_qp *qp)
+{
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->tx_q);
+	qat_qp_build_ring_base_gen_lce(io_addr, &qp->rx_q);
+	qat_qp_adf_configure_queues_gen_lce(qp);
+	qat_qp_adf_arb_enable_gen_lce(&qp->tx_q, qp->mmap_bar_addr, &qat_dev->arb_csr_lock);
+}
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen_lce = {
+	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen_lce,
+	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen_lce,
+	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen_lce,
+	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen_lce,
+	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen_lce,
+	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen_lce,
+	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen_lce,
+	.qat_qp_csr_setup = qat_qp_csr_setup_gen_lce,
+	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen_lce,
+};
+
+static int
+qat_reset_ring_pairs_gen_lce(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static const struct rte_mem_resource*
+qat_dev_get_transport_bar_gen_lce(struct rte_pci_device *pci_dev)
+{
+	return &pci_dev->mem_resource[0];
+}
+
+static int
+qat_dev_get_misc_bar_gen_lce(struct rte_mem_resource **mem_resource,
+			  struct rte_pci_device *pci_dev)
+{
+	*mem_resource = &pci_dev->mem_resource[2];
+	return 0;
+}
+
+static int
+qat_dev_get_extra_size_gen_lce(void)
+{
+	return sizeof(struct qat_dev_gen_lce_extra);
+}
+
+static int
+qat_dev_get_slice_map_gen_lce(uint32_t *map __rte_unused,
+	const struct rte_pci_device *pci_dev __rte_unused)
+{
+	return 0;
+}
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen_lce = {
+	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen_lce,
+	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen_lce,
+	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen_lce,
+	.qat_dev_read_config = qat_dev_read_config_gen_lce,
+	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen_lce,
+	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen_lce,
+};
+
+RTE_INIT(qat_dev_gen_lce_init)
+{
+	qat_qp_hw_spec[QAT_GEN_LCE] = &qat_qp_hw_spec_gen_lce;
+	qat_dev_hw_spec[QAT_GEN_LCE] = &qat_dev_hw_spec_gen_lce;
+	qat_gen_config[QAT_GEN_LCE].dev_gen = QAT_GEN_LCE;
+	qat_gen_config[QAT_GEN_LCE].pf2vf_dev = &qat_pf2vf_gen_lce;
+}
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index d79085258f..3893b127dd 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -83,6 +83,7 @@  sources += files(
         'dev/qat_dev_gen3.c',
         'dev/qat_dev_gen4.c',
         'dev/qat_dev_gen5.c',
+        'dev/qat_dev_gen_lce.c',
 )
 includes += include_directories(
         'qat_adf',
@@ -111,6 +112,7 @@  if qat_crypto
             'dev/qat_crypto_pmd_gen3.c',
             'dev/qat_crypto_pmd_gen4.c',
             'dev/qat_crypto_pmd_gen5.c',
+            'dev/qat_crypto_pmd_gen_lce.c',
         ]
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 12a7258c60..19bd812419 100644
--- a/drivers/common/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -47,6 +47,7 @@ 
 #define ADF_RING_SIZE_512 0x03
 #define ADF_RING_SIZE_4K 0x06
 #define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_64K 0x0A
 #define ADF_RING_SIZE_4M 0x10
 #define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
new file mode 100644
index 0000000000..eac0d30f49
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lce.h
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCE_H
+
+#include "adf_transport_access_macros.h"
+
+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
+#define ADF_RING_BUNDLE_SIZE_GEN_LCE 0x2000
+#define ADF_RING_CSR_RING_CONFIG_GEN_LCE 0x1000
+#define ADF_RING_CSR_RING_LBASE_GEN_LCE 0x1040
+#define ADF_RING_CSR_RING_UBASE_GEN_LCE 0x1080
+
+#define BUILD_RING_BASE_ADDR_GEN_LCE(addr, size) \
+	((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
+
+#define WRITE_CSR_RING_BASE_GEN_LCE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);	\
+	ADF_CSR_WR(csr_base_addr,	\
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),		\
+		l_base);						\
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) +			\
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+
+#define WRITE_CSR_RING_HEAD_GEN_LCE(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((u8 *)(csr_base_addr), \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+
+#endif
diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
new file mode 100644
index 0000000000..3c7232de12
--- /dev/null
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen_lcevf.h
@@ -0,0 +1,48 @@ 
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+#define ADF_TRANSPORT_ACCESS_MACROS_GEN_LCEVF_H
+
+#include "adf_transport_access_macros.h"
+#include "adf_transport_access_macros_gen_lce.h"
+
+#define ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF 0x0
+
+#define WRITE_CSR_RING_BASE_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_LBASE_GEN_LCE + (ring << 2),	\
+		l_base);	\
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_UBASE_GEN_LCE + (ring << 2),		\
+		u_base);	\
+} while (0)
+
+#define WRITE_CSR_RING_CONFIG_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF,	\
+		 (ADF_RING_BUNDLE_SIZE_GEN_LCE * bank) + \
+		ADF_RING_CSR_RING_CONFIG_GEN_LCE + (ring << 2), value)
+
+#define WRITE_CSR_RING_TAIL_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_HEAD_GEN_LCEVF(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))
+
+#define WRITE_CSR_RING_SRV_ARB_EN_GEN_LCEVF(csr_base_addr, bank, value) \
+	ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN_LCEVF, \
+		(ADF_RING_BUNDLE_SIZE_GEN_LCE * (bank)) + \
+		ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+#endif
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index dd7c926140..4c7bbf0f54 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -57,6 +57,12 @@  struct icp_qat_fw_comn_req_hdr_cd_pars {
 	} u;
 };
 
+struct lce_key_buff_desc {
+	uint64_t keybuff;
+	uint32_t keybuff_resrvd1;
+	uint32_t keybuff_resrvd2;
+};
+
 struct icp_qat_fw_comn_req_mid {
 	uint64_t opaque_data;
 	uint64_t src_data_addr;
@@ -124,6 +130,12 @@  struct icp_qat_fw_comn_resp {
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS 0
 #define ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK 0x1
 
+/* GEN_LCE specific Common Header fields */
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS 5
+#define ICP_QAT_FW_COMN_DESC_LAYOUT_MASK 0x3
+#define ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT 3
+#define ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR 0
+
 #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
 	icp_qat_fw_comn_req_hdr_t.service_type
 
@@ -169,6 +181,12 @@  struct icp_qat_fw_comn_resp {
 	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
 	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
 
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(valid, desc_layout) \
+	((((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) | \
+	(((desc_layout) & ICP_QAT_FW_COMN_DESC_LAYOUT_MASK) << \
+	ICP_QAT_FW_COMN_DESC_LAYOUT_BITPOS))
+
 #define QAT_COMN_PTR_TYPE_BITPOS 0
 #define QAT_COMN_PTR_TYPE_MASK 0x1
 #define QAT_COMN_CD_FLD_TYPE_BITPOS 1
@@ -178,10 +196,20 @@  struct icp_qat_fw_comn_resp {
 #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
 #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
 
+/* GEN_LCE specific Common Request Flags fields */
+#define QAT_COMN_KEYBUF_USAGE_BITPOS 1
+#define QAT_COMN_KEYBUF_USAGE_MASK 0x1
+#define QAT_COMN_KEY_BUFFER_USED 1
+
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
 	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
 
+#define ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(ptr, keybuf) \
+	((((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS) | \
+	 (((keybuf) & QAT_COMN_PTR_TYPE_MASK) << \
+	   QAT_COMN_KEYBUF_USAGE_BITPOS))
+
 #define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
 	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
 
@@ -247,6 +275,8 @@  struct icp_qat_fw_comn_resp {
 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
 #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_INVALID_PARAM_BITPOS 1
+#define QAT_COMN_RESP_INVALID_PARAM_MASK 0x1
 #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
 #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
 
@@ -278,6 +308,10 @@  struct icp_qat_fw_comn_resp {
 	QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
 	QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
 
+#define ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_INVALID_PARAM_BITPOS, \
+	QAT_COMN_RESP_INVALID_PARAM_MASK)
+
 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 134c309355..67fc25c919 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -22,14 +22,24 @@  enum icp_qat_fw_la_cmd_id {
 	ICP_QAT_FW_LA_CMD_DELIMITER = 18
 };
 
+/* In GEN_LCE Command ID 4 corresponds to AEAD */
+#define ICP_QAT_FW_LA_CMD_AEAD 4
+
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
 #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
 
+/* GEN_LCE Hash, HMAC and GCM Verification Status */
+#define ICP_QAT_FW_LA_VER_STATUS_FAIL ICP_QAT_FW_COMN_GEN_LCE_STATUS_FLAG_ERROR
+
+
 struct icp_qat_fw_la_bulk_req {
 	struct icp_qat_fw_comn_req_hdr comn_hdr;
-	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	union {
+		struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+		struct lce_key_buff_desc key_buff;
+	};
 	struct icp_qat_fw_comn_req_mid comn_mid;
 	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
 	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
@@ -90,6 +100,21 @@  struct icp_qat_fw_la_bulk_req {
 #define QAT_LA_USE_WAT_SLICE_BITPOS 3
 #define QAT_LA_USE_WAT_SLICE 1
 #define QAT_LA_USE_WAT_SLICE_MASK 0x1
+
+/* GEN_LCE specific Crypto Flags fields */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS 6
+#define ICP_QAT_FW_SYM_AEAD_ALGO_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_SIZE_BITPOS 9
+#define ICP_QAT_FW_SYM_IV_SIZE_MASK 0x3
+#define ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS 11
+#define ICP_QAT_FW_SYM_IV_IN_DESC_MASK 0x1
+#define ICP_QAT_FW_SYM_IV_IN_DESC_VALID 1
+#define ICP_QAT_FW_SYM_DIRECTION_BITPOS 15
+#define ICP_QAT_FW_SYM_DIRECTION_MASK 0x1
+
+/* In GEN_LCE AEAD AES GCM Algorithm has ID 0 */
+#define QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE 0
+
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -212,6 +237,23 @@  struct icp_qat_fw_la_bulk_req {
 	QAT_LA_USE_WAT_SLICE_BITPOS, \
 	QAT_LA_USE_WAT_SLICE_MASK)
 
+/* GEN_LCE specific Crypto Flags operations */
+#define ICP_QAT_FW_SYM_AEAD_ALGO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_AEAD_ALGO_BITPOS, \
+	ICP_QAT_FW_SYM_AEAD_ALGO_MASK)
+
+#define ICP_QAT_FW_SYM_IV_SIZE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_SIZE_BITPOS, \
+	ICP_QAT_FW_SYM_IV_SIZE_MASK)
+
+#define ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_IV_IN_DESC_BITPOS, \
+	ICP_QAT_FW_SYM_IV_IN_DESC_MASK)
+
+#define ICP_QAT_FW_SYM_DIR_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, ICP_QAT_FW_SYM_DIRECTION_BITPOS, \
+	ICP_QAT_FW_SYM_DIRECTION_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
@@ -434,4 +476,19 @@  struct icp_qat_fw_la_cipher_20_req_params {
 	uint8_t    spc_auth_res_sz;
 };
 
+struct icp_qat_fw_la_cipher_30_req_params {
+		uint32_t   spc_aad_sz;
+		uint8_t    cipher_length;
+		uint8_t    reserved[2];
+		uint8_t    spc_auth_res_sz;
+		union {
+				uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+				struct {
+						uint64_t cipher_IV_ptr;
+						uint64_t resrvd1;
+			} s;
+
+		} u;
+};
+
 #endif
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index dc48a2e1ee..44a8dff802 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -22,6 +22,7 @@  enum qat_device_gen {
 	QAT_GEN3,
 	QAT_GEN4,
 	QAT_GEN5,
+	QAT_GEN_LCE,
 	QAT_N_GENS
 };
 
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 0ccc3f85fd..a77c628256 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -68,6 +68,9 @@  static const struct rte_pci_id pci_id_qat_map[] = {
 		{
 			RTE_PCI_DEVICE(0x8086, 0x4947),
 		},
+		{
+			RTE_PCI_DEVICE(0x8086, 0x1454),
+		},
 		{.device_id = 0},
 };
 
@@ -208,6 +211,8 @@  pick_gen(const struct rte_pci_device *pci_dev)
 		return QAT_GEN4;
 	case 0x4947:
 		return QAT_GEN5;
+	case 0x1454:
+		return QAT_GEN_LCE;
 	default:
 		QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
 		return QAT_N_GENS;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
new file mode 100644
index 0000000000..7298916f2a
--- /dev/null
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen_lce.c
@@ -0,0 +1,310 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen_lce[] = {
+	QAT_SYM_AEAD_CAP(AES_GCM,
+		CAP_SET(block_size, 16),
+		CAP_RNG(key_size, 32, 32, 0), CAP_RNG(digest_size, 16, 16, 0),
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+qat_sgl_add_buffer_gen_lce(void *list_in, uint64_t addr, uint32_t len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr;
+
+	nr = list->num_bufs;
+
+	if (nr >= QAT_SYM_SGL_MAX_NUMBER) {
+		QAT_DP_LOG(ERR, "Adding %d entry failed, no empty SGL buffer", nr);
+		return -EINVAL;
+	}
+
+	list->buffers[nr].len = len;
+	list->buffers[nr].resrvd = 0;
+	list->buffers[nr].addr = addr;
+
+	list->num_bufs++;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+	QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+		nr, list->buffers[nr].len, list->buffers[nr].addr);
+#endif
+	return 0;
+}
+
+static int
+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,
+		void *list_in, uint32_t data_len)
+{
+	struct qat_sgl *list = (struct qat_sgl *)list_in;
+	uint32_t nr, buf_len;
+	int res = -EINVAL;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	uint32_t start_idx = list->num_bufs;
+#endif
+
+	/* Append to the existing list */
+	nr = list->num_bufs;
+
+	for (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			/* Jump to next mbuf */
+			continue;
+		}
+
+		list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+		list->buffers[nr].resrvd = 0;
+		list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+		offset = 0;
+		buf_len += list->buffers[nr].len;
+
+		if (buf_len >= data_len) {
+			list->buffers[nr].len -= buf_len - data_len;
+			res = 0;
+			break;
+		}
+		++nr;
+	}
+
+	if (unlikely(res != 0)) {
+		if (nr == QAT_SYM_SGL_MAX_NUMBER)
+			QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+					QAT_SYM_SGL_MAX_NUMBER);
+		else
+			QAT_DP_LOG(ERR, "Mbuf chain is too short");
+	} else {
+
+		list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+		for (nr = start_idx; nr < list->num_bufs; nr++) {
+			QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+					nr, list->buffers[nr].len,
+					list->buffers[nr].addr);
+		}
+#endif
+	}
+
+	return res;
+}
+
+static int
+qat_sym_build_op_aead_gen_lce(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_op *op = in_op;
+	uint64_t digest_phys_addr, aad_phys_addr;
+	uint16_t iv_len, aad_len, digest_len, key_len;
+	uint32_t cipher_ofs, iv_offset, cipher_len;
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct icp_qat_fw_la_cipher_30_req_params *cipher_param;
+	enum icp_qat_hw_cipher_dir dir;
+	bool is_digest_adjacent = false;
+
+	if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||
+		ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||
+		ctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {
+
+		QAT_DP_LOG(ERR, "Not supported (cmd: %d, alg: %d, mode: %d). "
+			"GEN_LCE PMD only supports AES-256 AEAD mode",
+			ctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);
+		return -EINVAL;
+	}
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+	cipher_param = (void *)&qat_req->serv_specif_rqpars;
+
+	dir = ctx->qat_dir;
+
+	aad_phys_addr = op->sym->aead.aad.phys_addr;
+	aad_len = ctx->aad_len;
+
+	iv_offset = ctx->cipher_iv.offset;
+	iv_len = ctx->cipher_iv.length;
+
+	cipher_ofs = op->sym->aead.data.offset;
+	cipher_len = op->sym->aead.data.length;
+
+	digest_phys_addr = op->sym->aead.digest.phys_addr;
+	digest_len = ctx->digest_length;
+
+	/* Up to 16B IV can be directly embedded in descriptor.
+	 *  GCM supports only 12B IV for GEN LCE
+	 */
+	if (iv_len != GCM_IV_LENGTH_GEN_LCE) {
+		QAT_DP_LOG(ERR, "iv_len: %d not supported. Must be 12B.", iv_len);
+		return -EINVAL;
+	}
+
+	rte_memcpy(cipher_param->u.cipher_IV_array,
+			rte_crypto_op_ctod_offset(op, uint8_t*, iv_offset), iv_len);
+
+	/* Always SGL */
+	RTE_ASSERT((qat_req->comn_hdr.comn_req_flags & ICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);
+	/* Always inplace */
+	RTE_ASSERT(op->sym->m_dst == NULL);
+
+	/* Key buffer address is already programmed by reusing the
+	 * content-descriptor buffer
+	 */
+	key_len = ctx->auth_key_length;
+
+	cipher_param->spc_aad_sz = aad_len;
+	cipher_param->cipher_length = key_len;
+	cipher_param->spc_auth_res_sz = digest_len;
+
+	/* Knowing digest is contiguous to cipher-text helps optimizing SGL */
+	if (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len) == digest_phys_addr)
+		is_digest_adjacent = true;
+
+	/* SRC-SGL: 3 entries:
+	 * a) AAD
+	 * b) cipher
+	 * c) digest (only for decrypt and buffer is_NOT_adjacent)
+	 *
+	 */
+	cookie->qat_sgl_src.num_bufs = 0;
+	if (aad_len)
+		qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src, aad_phys_addr, aad_len);
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_src,
+				cipher_len);
+
+		/* Digest buffer in decrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_DECRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_src,
+					digest_phys_addr, digest_len);
+	}
+
+	/* (in-place) DST-SGL: 2 entries:
+	 * a) cipher
+	 * b) digest (only for encrypt and buffer is_NOT_adjacent)
+	 */
+	cookie->qat_sgl_dst.num_bufs = 0;
+
+	if (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len + digest_len);
+	} else {
+		qat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs, &cookie->qat_sgl_dst,
+				cipher_len);
+
+		/* Digest buffer in Encrypt job */
+		if (dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+			qat_sgl_add_buffer_gen_lce(&cookie->qat_sgl_dst,
+					digest_phys_addr, digest_len);
+	}
+
+	/* Length values in 128B descriptor */
+	qat_req->comn_mid.src_length = cipher_len;
+	qat_req->comn_mid.dst_length = cipher_len;
+
+	if (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */
+		qat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;
+
+	/* src & dst SGL addresses in 128B descriptor */
+	qat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;
+	qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+			rte_pktmbuf_data_len(op->sym->m_src));
+	QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, digest_len);
+	QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, aad_len);
+#endif
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen_lce(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+
+	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+		return -EINVAL;
+
+	/* build request for aead */
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {
+		build_request = qat_sym_build_op_aead_gen_lce;
+		ctx->build_request[proc_type] = build_request;
+	}
+	return 0;
+}
+
+
+static int
+qat_sym_crypto_cap_get_gen_lce(struct qat_cryptodev_private *internals,
+	const char *capa_memz_name,
+	const uint16_t __rte_unused slice_map)
+{
+	const uint32_t size = sizeof(qat_sym_crypto_caps_gen_lce);
+	uint32_t i;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name, size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG, "Error allocating memzone for capabilities");
+			return -1;
+		}
+	}
+
+	struct rte_cryptodev_capabilities *addr =
+		(struct rte_cryptodev_capabilities *)
+		internals->capa_mz->addr;
+	const struct rte_cryptodev_capabilities *capabilities =
+		qat_sym_crypto_caps_gen_lce;
+	const uint32_t capa_num = size / sizeof(struct rte_cryptodev_capabilities);
+	uint32_t curr_capa = 0;
+
+	for (i = 0; i < capa_num; i++) {
+		memcpy(addr + curr_capa, capabilities + i,
+				sizeof(struct rte_cryptodev_capabilities));
+		curr_capa++;
+	}
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	return 0;
+}
+
+RTE_INIT(qat_sym_crypto_gen_lce_init)
+{
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = qat_sym_crypto_cap_get_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_session = qat_sym_crypto_set_session_gen_lce;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].set_raw_dp_ctx = NULL;
+	qat_sym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen_lce_init)
+{
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].cryptodev_ops = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_capabilities = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN_LCE].set_session = NULL;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f976009bf2..f2f197d050 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -95,6 +95,12 @@ 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
 
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH_GEN_LCE 12
+
 struct qat_sym_session;
 
 struct qat_sym_sgl {