[v6,3/4] crypto/qat: update headers for GEN LCE support

Message ID 20240228140036.1996629-4-nishikanta.nayak@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: akhil goyal
Headers
Series add QAT GEN LCE device |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Nayak, Nishikanta Feb. 28, 2024, 2 p.m. UTC
  This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
---
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
---
 drivers/crypto/qat/qat_sym.c         | 16 ++++++-
 drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 140 insertions(+), 8 deletions(-)
  

Comments

Akhil Goyal Feb. 29, 2024, 4:04 p.m. UTC | #1
> This patch handles the changes required for updating the common
> header fields specific to GEN LCE, Also added/updated of the response
> processing APIs based on GEN LCE requirement.
> 
> Signed-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>
> Acked-by: Ciara Power <ciara.power@intel.com>
> ---
> v2:
>     - Renamed device from GEN 5 to GEN LCE.
>     - Removed unused code.
>     - Updated macro names.
>     - Added GEN LCE specific API for deque burst.
>     - Fixed code formatting.
> ---
> ---
>  drivers/crypto/qat/qat_sym.c         | 16 ++++++-
>  drivers/crypto/qat/qat_sym.h         | 60 ++++++++++++++++++++++++++-
>  drivers/crypto/qat/qat_sym_session.c | 62 +++++++++++++++++++++++++++-
>  drivers/crypto/qat/qat_sym_session.h | 10 ++++-
>  4 files changed, 140 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
> index 6e03bde841..439a3fc00b 100644
> --- a/drivers/crypto/qat/qat_sym.c
> +++ b/drivers/crypto/qat/qat_sym.c
> @@ -180,7 +180,15 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op
> **ops,
>  		uint16_t nb_ops)
>  {
>  	return qat_dequeue_op_burst(qp, (void **)ops,
> -				qat_sym_process_response, nb_ops);
> +			qat_sym_process_response, nb_ops);

Unnecessary change. Please remove unnecessary changes which should not be part of this patch.

The maximum length of characters in a line is 100 now. You can format the code as per that.
Since QAT has long macros etc. it would be better to leverage the 100 character per line.
The code would look more readable.
This is a general comment on the complete patchset.

> +}
> +
> +uint16_t
> +qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
> +							uint16_t nb_ops)
> +{
> +	return qat_dequeue_op_burst(qp, (void **)ops,
> +			qat_sym_process_response_gen_lce, nb_ops);
>  }
> 
>  int
> @@ -200,6 +208,7 @@ qat_sym_dev_create(struct qat_pci_device
> *qat_pci_dev,
>  	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
>  	struct rte_cryptodev *cryptodev;
>  	struct qat_cryptodev_private *internals;
> +	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
>  	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
>  		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
> 
> @@ -249,7 +258,10 @@ qat_sym_dev_create(struct qat_pci_device
> *qat_pci_dev,
>  	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
> 
>  	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
> -	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
> +	if (qat_dev_gen == QAT_GEN_LCE)
> +		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
> +	else
> +		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
> 
>  	cryptodev->feature_flags = gen_dev_ops-
> >get_feature_flags(qat_pci_dev);
> 
> diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
> index f2f197d050..3461113c13 100644
> --- a/drivers/crypto/qat/qat_sym.h
> +++ b/drivers/crypto/qat/qat_sym.h
> @@ -90,7 +90,7 @@
>  /*
>   * Maximum number of SGL entries
>   */
> -#define QAT_SYM_SGL_MAX_NUMBER	16
> +#define QAT_SYM_SGL_MAX_NUMBER 16

Again unnecessary change.

> 
>  /* Maximum data length for single pass GMAC: 2^14-1 */
>  #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
> @@ -142,6 +142,10 @@ uint16_t
>  qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
>  		uint16_t nb_ops);
> 
> +uint16_t
> +qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops);
> +
>  #ifdef RTE_QAT_OPENSSL
>  /** Encrypt a single partial block
>   *  Depends on openssl libcrypto
> @@ -390,6 +394,52 @@ qat_sym_process_response(void **op, uint8_t *resp,
> void *op_cookie,
>  	return 1;
>  }
> 
> +static __rte_always_inline int
> +qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
> +	void *op_cookie __rte_unused,
> +	uint64_t *dequeue_err_count __rte_unused)
> +{
> +	struct icp_qat_fw_comn_resp *resp_msg =
> +		(struct icp_qat_fw_comn_resp *)resp;
> +	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
> +		(resp_msg->opaque_data);
> +	struct qat_sym_session *sess;
> +
> +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
> +	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
> +		sizeof(struct icp_qat_fw_comn_resp));
> +#endif
> +
> +	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
> +
> +	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> +
> +	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
> +
> 	ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
> +			resp_msg->comn_hdr.comn_status))
> +		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> +
> +	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
> +		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
> +			resp_msg->comn_hdr.comn_status))
> +		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +
> +	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
> +		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
> +			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
> +				resp_msg->comn_hdr.comn_status))
> +			rx_op->status =
> 	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> +	}
> +
> +	*op = (void *)rx_op;
> +
> +	/*
> +	 * return 1 as dequeue op only move on to the next op
> +	 * if one was ready to return to API
> +	 */
> +	return 1;
> +}
> +
>  int
>  qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
>  	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
> @@ -455,7 +505,13 @@ qat_sym_preprocess_requests(void **ops
> __rte_unused,
> 
>  static inline void
>  qat_sym_process_response(void **op __rte_unused, uint8_t *resp
> __rte_unused,
> -	void *op_cookie __rte_unused)
> +	void *op_cookie __rte_unused, uint64_t *dequeue_err_count
> __rte_unused)
> +{
> +}
> +
> +static inline void
> +qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp
> __rte_unused,
> +	void *op_cookie __rte_unused, uint64_t *dequeue_err_count
> __rte_unused)
>  {
>  }
> 
> diff --git a/drivers/crypto/qat/qat_sym_session.c
> b/drivers/crypto/qat/qat_sym_session.c
> index 9f4f6c3d93..8f50b61365 100644
> --- a/drivers/crypto/qat/qat_sym_session.c
> +++ b/drivers/crypto/qat/qat_sym_session.c
> @@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
>  static void
>  qat_sym_session_init_common_hdr(struct qat_sym_session *session);
> 
> +static void
> +qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
> +
>  /* Req/cd init functions */
> 
>  static void
> @@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev
> *dev,
>  		session->qat_cmd);
>  		return -ENOTSUP;
>  	}
> +
> +	if (qat_dev_gen == QAT_GEN_LCE) {
> +		qat_sym_session_init_gen_lce_hdr(session);
> +		return 0;
> +	}
> +
>  	qat_sym_session_finalize(session);
> 
>  	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
> @@ -1016,6 +1025,12 @@ qat_sym_session_configure_aead(struct
> rte_cryptodev *dev,
>  			dev->data->dev_private;
>  	enum qat_device_gen qat_dev_gen =
>  			internals->qat_dev->qat_dev_gen;
> +	if (qat_dev_gen == QAT_GEN_LCE) {
> +		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
> +		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
> +
> +		key_buff->keybuff = session->key_paddr;
> +	}
> 
>  	/*
>  	 * Store AEAD IV parameters as cipher IV,
> @@ -1079,9 +1094,15 @@ qat_sym_session_configure_aead(struct
> rte_cryptodev *dev,
>  	}
> 
>  	if (session->is_single_pass) {
> -		if (qat_sym_cd_cipher_set(session,
> +		if (qat_dev_gen != QAT_GEN_LCE) {
> +			if (qat_sym_cd_cipher_set(session,
>  				aead_xform->key.data, aead_xform-
> >key.length))
> -			return -EINVAL;
> +				return -EINVAL;
> +		} else {
> +			session->auth_key_length = aead_xform->key.length;
> +			memcpy(session->key_array, aead_xform->key.data,
> +							aead_xform-
> >key.length);
> +		}
>  	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
>  			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
>  			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT
> &&
> @@ -1970,6 +1991,43 @@ qat_sym_session_init_common_hdr(struct
> qat_sym_session *session)
> 
> 	ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
>  }
> 
> +static void
> +qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
> +{
> +	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
> +	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
> +
> +	/*
> +	 * GEN_LCE specifies separate command id for AEAD operations but
> Cryptodev
> +	 * API processes AEAD operations as Single pass Crypto operations.
> +	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
> +	 * Note, however Session Algo Mode is AEAD.
> +	 */
> +	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
> +	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
> +	header->hdr_flags =
> +
> 	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COM
> N_REQ_FLAG_SET,
> +			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
> +	header->comn_req_flags =
> +
> 	ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_
> SGL,
> +			QAT_COMN_KEY_BUFFER_USED);
> +
> +	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
> +		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
> +	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
> +		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
> +	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
> +		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
> +
> +	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
> +		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
> +			ICP_QAT_HW_CIPHER_DECRYPT);
> +	} else {
> +		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
> +			ICP_QAT_HW_CIPHER_ENCRYPT);
> +	}
> +}
> +
>  int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
>  						const uint8_t *cipherkey,
>  						uint32_t cipherkeylen)
> diff --git a/drivers/crypto/qat/qat_sym_session.h
> b/drivers/crypto/qat/qat_sym_session.h
> index 9209e2e8df..958af03405 100644
> --- a/drivers/crypto/qat/qat_sym_session.h
> +++ b/drivers/crypto/qat/qat_sym_session.h
> @@ -111,10 +111,16 @@ struct qat_sym_session {
>  	enum icp_qat_hw_auth_op auth_op;
>  	enum icp_qat_hw_auth_mode auth_mode;
>  	void *bpi_ctx;
> -	struct qat_sym_cd cd;
> +	union {
> +		struct qat_sym_cd cd;
> +		uint8_t key_array[32];
> +	};
>  	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
>  	uint8_t *cd_cur_ptr;
> -	phys_addr_t cd_paddr;
> +	union {
> +		phys_addr_t cd_paddr;
> +		phys_addr_t key_paddr;
> +	};
>  	phys_addr_t prefix_paddr;
>  	struct icp_qat_fw_la_bulk_req fw_req;
>  	uint8_t aad_len;
> --
> 2.25.1
  

Patch

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..439a3fc00b 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,15 @@  qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
+			qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+							uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+			qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +208,7 @@  qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 	struct rte_cryptodev *cryptodev;
 	struct qat_cryptodev_private *internals;
+	enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
 	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
 		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +258,10 @@  qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
 	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+	if (qat_dev_gen == QAT_GEN_LCE)
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+	else
+		cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
 	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..3461113c13 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -90,7 +90,7 @@ 
 /*
  * Maximum number of SGL entries
  */
-#define QAT_SYM_SGL_MAX_NUMBER	16
+#define QAT_SYM_SGL_MAX_NUMBER 16
 
 /* Maximum data length for single pass GMAC: 2^14-1 */
 #define QAT_AES_GMAC_SPC_MAX_SIZE 16383
@@ -142,6 +142,10 @@  uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +394,52 @@  qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp,
+	void *op_cookie __rte_unused,
+	uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+		(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+		(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+		sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+	sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+	rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+	else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+		ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+			resp_msg->comn_hdr.comn_status))
+		rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+	if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+				resp_msg->comn_hdr.comn_status))
+			rx_op->status =	RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+
+	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +505,13 @@  qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-	void *op_cookie __rte_unused)
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp __rte_unused,
+	void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..8f50b61365 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@  qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -738,6 +741,12 @@  qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		session->qat_cmd);
 		return -ENOTSUP;
 	}
+
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		qat_sym_session_init_gen_lce_hdr(session);
+		return 0;
+	}
+
 	qat_sym_session_finalize(session);
 
 	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1016,6 +1025,12 @@  qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			dev->data->dev_private;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	if (qat_dev_gen == QAT_GEN_LCE) {
+		struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+		struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+		key_buff->keybuff = session->key_paddr;
+	}
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -1079,9 +1094,15 @@  qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	}
 
 	if (session->is_single_pass) {
-		if (qat_sym_cd_cipher_set(session,
+		if (qat_dev_gen != QAT_GEN_LCE) {
+			if (qat_sym_cd_cipher_set(session,
 				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
+				return -EINVAL;
+		} else {
+			session->auth_key_length = aead_xform->key.length;
+			memcpy(session->key_array, aead_xform->key.data,
+							aead_xform->key.length);
+		}
 	} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1991,43 @@  qat_sym_session_init_common_hdr(struct qat_sym_session *session)
 					ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+	/*
+	 * GEN_LCE specifies separate command id for AEAD operations but Cryptodev
+	 * API processes AEAD operations as Single pass Crypto operations.
+	 * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+	 * Note, however Session Algo Mode is AEAD.
+	 */
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->hdr_flags =
+	ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+			ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+			QAT_COMN_KEY_BUFFER_USED);
+
+	ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+		QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+	ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+		ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+		ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+	if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_DECRYPT);
+	} else {
+		ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+			ICP_QAT_HW_CIPHER_ENCRYPT);
+	}
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 						const uint8_t *cipherkey,
 						uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..958af03405 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@  struct qat_sym_session {
 	enum icp_qat_hw_auth_op auth_op;
 	enum icp_qat_hw_auth_mode auth_mode;
 	void *bpi_ctx;
-	struct qat_sym_cd cd;
+	union {
+		struct qat_sym_cd cd;
+		uint8_t key_array[32];
+	};
 	uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
 	uint8_t *cd_cur_ptr;
-	phys_addr_t cd_paddr;
+	union {
+		phys_addr_t cd_paddr;
+		phys_addr_t key_paddr;
+	};
 	phys_addr_t prefix_paddr;
 	struct icp_qat_fw_la_bulk_req fw_req;
 	uint8_t aad_len;