[2/5] lib: add pdcp protocol

Message ID 20221222092522.1628-3-anoobj@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: akhil goyal
Headers
Series lib: add pdcp protocol |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Anoob Joseph Dec. 22, 2022, 9:25 a.m. UTC
  Add Packet Data Convergence Protocol (PDCP) processing library.

The library is similar to lib_ipsec which provides IPsec processing
capabilities in DPDK.

PDCP would involve roughly the following options,
1. Transfer of user plane data
2. Transfer of control plane data
3. Header compression
4. Uplink data compression
5. Ciphering and integrity protection

PDCP library provides following control path APIs that is used to
configure various PDCP entities,
1. rte_pdcp_entity_establish()
2. rte_pdcp_entity_suspend()
3. rte_pdcp_entity_release()

PDCP process is split into 2 parts. One before crypto processing
(rte_pdcp_pkt_pre_process()) and one after crypto processing
(rte_pdcp_pkt_post_process()). Since cryptodev dequeue can return crypto
operations belonging to multiple entities, rte_pdcp_pkt_crypto_group()
is added to help grouping crypto operations belonging to same entity.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/api/doxy-api-index.md |    3 +-
 doc/api/doxy-api.conf.in  |    1 +
 lib/meson.build           |    1 +
 lib/pdcp/meson.build      |    8 +
 lib/pdcp/pdcp_crypto.c    |  240 ++++++++
 lib/pdcp/pdcp_crypto.h    |   20 +
 lib/pdcp/pdcp_entity.h    |  218 +++++++
 lib/pdcp/pdcp_process.c   | 1195 +++++++++++++++++++++++++++++++++++++
 lib/pdcp/pdcp_process.h   |   13 +
 lib/pdcp/rte_pdcp.c       |  136 +++++
 lib/pdcp/rte_pdcp.h       |  263 ++++++++
 lib/pdcp/rte_pdcp_group.h |  133 +++++
 lib/pdcp/version.map      |   13 +
 13 files changed, 2243 insertions(+), 1 deletion(-)
 create mode 100644 lib/pdcp/meson.build
 create mode 100644 lib/pdcp/pdcp_crypto.c
 create mode 100644 lib/pdcp/pdcp_crypto.h
 create mode 100644 lib/pdcp/pdcp_entity.h
 create mode 100644 lib/pdcp/pdcp_process.c
 create mode 100644 lib/pdcp/pdcp_process.h
 create mode 100644 lib/pdcp/rte_pdcp.c
 create mode 100644 lib/pdcp/rte_pdcp.h
 create mode 100644 lib/pdcp/rte_pdcp_group.h
 create mode 100644 lib/pdcp/version.map
  

Comments

Akhil Goyal Jan. 18, 2023, 4:26 p.m. UTC | #1
Hi Anoob,

Please see inline comments.
> Subject: [PATCH 2/5] lib: add pdcp protocol
> 
> Add Packet Data Convergence Protocol (PDCP) processing library.
> 
> The library is similar to lib_ipsec which provides IPsec processing
> capabilities in DPDK.
> 
> PDCP would involve roughly the following options,
> 1. Transfer of user plane data
> 2. Transfer of control plane data
> 3. Header compression
> 4. Uplink data compression
> 5. Ciphering and integrity protection
> 
> PDCP library provides following control path APIs that is used to
> configure various PDCP entities,
> 1. rte_pdcp_entity_establish()
> 2. rte_pdcp_entity_suspend()
> 3. rte_pdcp_entity_release()
> 
> PDCP process is split into 2 parts. One before crypto processing
> (rte_pdcp_pkt_pre_process()) and one after crypto processing
> (rte_pdcp_pkt_post_process()). Since cryptodev dequeue can return crypto
> operations belonging to multiple entities, rte_pdcp_pkt_crypto_group()
> is added to help grouping crypto operations belonging to same entity.
> 
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> ---
>  doc/api/doxy-api-index.md |    3 +-
>  doc/api/doxy-api.conf.in  |    1 +
>  lib/meson.build           |    1 +
>  lib/pdcp/meson.build      |    8 +
>  lib/pdcp/pdcp_crypto.c    |  240 ++++++++
>  lib/pdcp/pdcp_crypto.h    |   20 +
>  lib/pdcp/pdcp_entity.h    |  218 +++++++
>  lib/pdcp/pdcp_process.c   | 1195 +++++++++++++++++++++++++++++++++++++
>  lib/pdcp/pdcp_process.h   |   13 +
>  lib/pdcp/rte_pdcp.c       |  136 +++++
>  lib/pdcp/rte_pdcp.h       |  263 ++++++++
>  lib/pdcp/rte_pdcp_group.h |  133 +++++
>  lib/pdcp/version.map      |   13 +
>  13 files changed, 2243 insertions(+), 1 deletion(-)
>  create mode 100644 lib/pdcp/meson.build
>  create mode 100644 lib/pdcp/pdcp_crypto.c
>  create mode 100644 lib/pdcp/pdcp_crypto.h
>  create mode 100644 lib/pdcp/pdcp_entity.h
>  create mode 100644 lib/pdcp/pdcp_process.c
>  create mode 100644 lib/pdcp/pdcp_process.h
>  create mode 100644 lib/pdcp/rte_pdcp.c
>  create mode 100644 lib/pdcp/rte_pdcp.h
>  create mode 100644 lib/pdcp/rte_pdcp_group.h
>  create mode 100644 lib/pdcp/version.map
> 
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index ae4b107240..6014bee079 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -126,7 +126,8 @@ The public API headers are grouped by topics:
>    [eCPRI](@ref rte_ecpri.h),
>    [L2TPv2](@ref rte_l2tpv2.h),
>    [PPP](@ref rte_ppp.h),
> -  [PDCP hdr](@ref rte_pdcp_hdr.h)
> +  [PDCP hdr](@ref rte_pdcp_hdr.h),
> +  [PDCP](@ref rte_pdcp.h),
> 
>  - **QoS**:
>    [metering](@ref rte_meter.h),
> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> index f0886c3bd1..01314b087e 100644
> --- a/doc/api/doxy-api.conf.in
> +++ b/doc/api/doxy-api.conf.in
> @@ -61,6 +61,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-
> index.md \
>                            @TOPDIR@/lib/net \
>                            @TOPDIR@/lib/pcapng \
>                            @TOPDIR@/lib/pci \
> +                          @TOPDIR@/lib/pdcp \
>                            @TOPDIR@/lib/pdump \
>                            @TOPDIR@/lib/pipeline \
>                            @TOPDIR@/lib/port \
> diff --git a/lib/meson.build b/lib/meson.build
> index fd55925340..a827006d29 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -63,6 +63,7 @@ libraries = [
>          'flow_classify', # flow_classify lib depends on pkt framework table lib
>          'graph',
>          'node',
> +        'pdcp', # pdcp lib depends on crypto and security
>  ]
> 
>  optional_libs = [
> diff --git a/lib/pdcp/meson.build b/lib/pdcp/meson.build
> new file mode 100644
> index 0000000000..a7f5a408cf
> --- /dev/null
> +++ b/lib/pdcp/meson.build
> @@ -0,0 +1,8 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(C) 2022 Marvell.
> +#
Extra # here.

Do we support compilation on Windows as well?
Check missing here.

> +
> +sources = files('pdcp_crypto.c', 'pdcp_process.c', 'rte_pdcp.c')
> +headers = files('rte_pdcp.h')

Do we need to add the indirect header as well for lib/pdcp/rte_pdcp_group.h?

> +
> +deps += ['security']

Crypto not needed as dependency?

> diff --git a/lib/pdcp/pdcp_crypto.c b/lib/pdcp/pdcp_crypto.c
> new file mode 100644
> index 0000000000..7ffb8a07a7
> --- /dev/null
> +++ b/lib/pdcp/pdcp_crypto.c
> @@ -0,0 +1,240 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#include <rte_crypto.h>
> +#include <rte_crypto_sym.h>
> +#include <rte_cryptodev.h>
> +#include <rte_pdcp.h>
> +
> +#include "pdcp_crypto.h"
> +#include "pdcp_entity.h"
> +
> +static int
> +pdcp_crypto_caps_cipher_verify(uint8_t dev_id, const struct
> rte_crypto_sym_xform *c_xfrm)
> +{
> +	const struct rte_cryptodev_symmetric_capability *cap;
> +	struct rte_cryptodev_sym_capability_idx cap_idx;
> +	int ret;
> +
> +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> +	cap_idx.algo.cipher = c_xfrm->cipher.algo;
> +
> +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> +	if (cap == NULL)
> +		return -1;
> +
> +	ret = rte_cryptodev_sym_capability_check_cipher(cap, c_xfrm-
> >cipher.key.length,
> +							c_xfrm-
> >cipher.iv.length);
> +
> +	return ret;
> +}
> +
> +static int
> +pdcp_crypto_caps_auth_verify(uint8_t dev_id, const struct
> rte_crypto_sym_xform *a_xfrm)
> +{
> +	const struct rte_cryptodev_symmetric_capability *cap;
> +	struct rte_cryptodev_sym_capability_idx cap_idx;
> +	int ret;
> +
> +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
> +	cap_idx.algo.auth = a_xfrm->auth.algo;
> +
> +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> +	if (cap == NULL)
> +		return -1;
> +
> +	ret = rte_cryptodev_sym_capability_check_auth(cap, a_xfrm-
> >auth.key.length,
> +						      a_xfrm-
> >auth.digest_length,
> +						      a_xfrm->auth.iv.length);
> +
> +	return ret;
> +}
> +
> +static int
> +pdcp_crypto_xfrm_validate(const struct rte_pdcp_entity_conf *conf,
> +				 const struct rte_crypto_sym_xform *c_xfrm,
> +				 const struct rte_crypto_sym_xform *a_xfrm,
> +				 bool is_auth_then_cipher)
> +{
> +	uint16_t ciph_iv_len, auth_digest_len, auth_iv_len;
> +	int ret;
> +
> +	/*
> +	 * Uplink means PDCP entity is configured for transmit. Downlink means
> PDCP entity is
> +	 * configured for receive. When integrity protection is enabled, PDCP
> always performs
> +	 * digest-encrypted or auth-gen-encrypt for uplink (and decrypt-auth-
> verify for downlink).
> +	 * So for uplink, crypto chain would be auth-cipher while for downlink it
> would be
> +	 * cipher-auth.
> +	 *
> +	 * When integrity protection is not required, xform would be cipher only.
> +	 */
> +
> +	if (c_xfrm == NULL)
> +		return -EINVAL;
> +
> +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK) {
> +
> +		/* With UPLINK, if auth is enabled, it should be before cipher */
> +		if (a_xfrm != NULL && !is_auth_then_cipher)
> +			return -EINVAL;
> +
> +		/* With UPLINK, cipher operation must be encrypt */
> +		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT)
> +			return -EINVAL;
> +
> +		/* With UPLINK, auth operation (if present) must be generate */
> +		if (a_xfrm != NULL && a_xfrm->auth.op !=
> RTE_CRYPTO_AUTH_OP_GENERATE)
> +			return -EINVAL;
> +
> +	} else if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> {
> +
> +		/* With DOWNLINK, if auth is enabled, it should be after cipher
> */
> +		if (a_xfrm != NULL && is_auth_then_cipher)
> +			return -EINVAL;
> +
> +		/* With DOWNLINK, cipher operation must be decrypt */
> +		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT)
> +			return -EINVAL;
> +
> +		/* With DOWNLINK, auth operation (if present) must be verify
> */
> +		if (a_xfrm != NULL && a_xfrm->auth.op !=
> RTE_CRYPTO_AUTH_OP_VERIFY)
> +			return -EINVAL;
> +
> +	} else {
> +		return -EINVAL;
> +	}
> +
> +	if ((c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_NULL) &&
> +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_AES_CTR) &&
> +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3) &&
> +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2))
> +		return -EINVAL;
> +
> +	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
> +		ciph_iv_len = 0;
> +	else
> +		ciph_iv_len = PDCP_IV_LENGTH;
> +
> +	if (ciph_iv_len != c_xfrm->cipher.iv.length)
> +		return -EINVAL;
> +
> +	if (a_xfrm != NULL) {
> +		if ((a_xfrm->auth.algo != RTE_CRYPTO_AUTH_NULL) &&
> +		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_AES_CMAC) &&
> +		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3) &&
> +		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2))
> +			return -EINVAL;
> +
> +		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
> +			auth_digest_len = 0;
> +		else
> +			auth_digest_len = 4;

If we have a macro for IV length, why not for digest also?
Moreover, for NULL integrity, digest length is also 4 with all 0s.
Refer Annex D.1 in https://www.etsi.org/deliver/etsi_ts/133500_133599/133501/15.04.00_60/ts_133501v150400p.pdf

Digest len would be 0 only in case of a_xfrm == NULL

> +
> +		if (auth_digest_len != a_xfrm->auth.digest_length)
> +			return -EINVAL;
> +
> +		if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) ||
> +		    (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2))
> +			auth_iv_len = PDCP_IV_LENGTH;
> +		else
> +			auth_iv_len = 0;
> +
> +		if (a_xfrm->auth.iv.length != auth_iv_len)
> +			return -EINVAL;
> +	}
> +
> +	if (!rte_cryptodev_is_valid_dev(conf->dev_id))
> +		return -EINVAL;
> +
> +	ret = pdcp_crypto_caps_cipher_verify(conf->dev_id, c_xfrm);
> +	if (ret)
> +		return -ENOTSUP;
> +
> +	if (a_xfrm != NULL) {
> +		ret = pdcp_crypto_caps_auth_verify(conf->dev_id, a_xfrm);
> +		if (ret)
> +			return -ENOTSUP;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +pdcp_crypto_sess_create(struct rte_pdcp_entity *entity, const struct
> rte_pdcp_entity_conf *conf)
> +{
> +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> +	struct entity_priv *en_priv;
> +	bool is_auth_then_cipher;
> +	int ret;
> +
> +	if (entity == NULL || conf == NULL || conf->crypto_xfrm == NULL)
> +		return -EINVAL;
> +
> +	en_priv = entity_priv_get(entity);
> +
> +	en_priv->dev_id = conf->dev_id;
> +
> +	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +		c_xfrm = conf->crypto_xfrm;
> +		a_xfrm = conf->crypto_xfrm->next;
> +		is_auth_then_cipher = false;
> +	} else if (conf->crypto_xfrm->type ==
> RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		a_xfrm = conf->crypto_xfrm;
> +		c_xfrm = conf->crypto_xfrm->next;
> +		is_auth_then_cipher = true;
> +	} else {
> +		return -EINVAL;
> +	}
> +
> +	ret = pdcp_crypto_xfrm_validate(conf, c_xfrm, a_xfrm,
> is_auth_then_cipher);
> +	if (ret)
> +		return ret;
> +
> +	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
> +		c_xfrm->cipher.iv.offset = 0;
> +	else
> +		c_xfrm->cipher.iv.offset = PDCP_IV_OFFSET;
> +
> +	if (a_xfrm != NULL) {
> +		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
> +			a_xfrm->auth.iv.offset = 0;
> +		else
> +			if (c_xfrm->cipher.iv.offset)
> +				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET +
> PDCP_IV_LENGTH;
> +			else
> +				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET;
> +	}
> +
> +	if (conf->sess_mpool == NULL)
> +		return -EINVAL;
> +
> +	en_priv->crypto_sess = rte_cryptodev_sym_session_create(conf-
> >dev_id, conf->crypto_xfrm,
> +								conf-
> >sess_mpool);
> +	if (en_priv->crypto_sess == NULL) {
> +		/* API returns positive values as error codes */
> +		return -rte_errno;
> +	}
> +
> +	rte_cryptodev_sym_session_opaque_data_set(en_priv->crypto_sess,
> (uint64_t)entity);
> +
> +	return 0;
> +}
> +
> +int
> +pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity)
> +{
> +	struct entity_priv *en_priv;
> +
> +	if (entity == NULL)
> +		return -EINVAL;
> +
> +	en_priv = entity_priv_get(entity);
> +
> +	if (en_priv->crypto_sess != NULL) {
> +		rte_cryptodev_sym_session_free(en_priv->dev_id, en_priv-
> >crypto_sess);
> +		en_priv->crypto_sess = NULL;
> +	}
> +
> +	return 0;
> +}
> diff --git a/lib/pdcp/pdcp_crypto.h b/lib/pdcp/pdcp_crypto.h
> new file mode 100644
> index 0000000000..dc625b35d0
> --- /dev/null
> +++ b/lib/pdcp/pdcp_crypto.h
> @@ -0,0 +1,20 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#ifndef _PDCP_CRYPTO_H_
> +#define _PDCP_CRYPTO_H_
> +
> +#include <rte_crypto.h>
> +#include <rte_crypto_sym.h>
> +#include <rte_pdcp.h>
> +
> +#define PDCP_IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct
> rte_crypto_sym_op))
> +#define PDCP_IV_LENGTH 16
> +
> +int pdcp_crypto_sess_create(struct rte_pdcp_entity *entity,
> +			    const struct rte_pdcp_entity_conf *conf);
> +
> +int pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity);
> +
> +#endif /* _PDCP_CRYPTO_H_ */
> diff --git a/lib/pdcp/pdcp_entity.h b/lib/pdcp/pdcp_entity.h
> new file mode 100644
> index 0000000000..e312fd4a8c
> --- /dev/null
> +++ b/lib/pdcp/pdcp_entity.h
> @@ -0,0 +1,218 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#ifndef _PDCP_ENTITY_H_
> +#define _PDCP_ENTITY_H_
> +
> +#include <rte_common.h>
> +#include <rte_crypto_sym.h>
> +#include <rte_mempool.h>
> +#include <rte_pdcp.h>
> +#include <rte_security.h>
> +
> +struct entity_priv;
> +
> +#define PDCP_PDU_HDR_SIZE_SN_12 (RTE_ALIGN_MUL_CEIL(12, 8) / 8)
> +#define PDCP_PDU_HDR_SIZE_SN_18 (RTE_ALIGN_MUL_CEIL(18, 8) / 8)
> +
> +#define PDCP_GET_SN_12_FROM_COUNT(c) ((c) & 0xfff)
> +#define PDCP_GET_SN_18_FROM_COUNT(c) ((c) & 0x3ffff)
> +
> +#define PDCP_GET_HFN_SN_12_FROM_COUNT(c) (((c) >> 12) & 0xfffff)
> +#define PDCP_GET_HFN_SN_18_FROM_COUNT(c) (((c) >> 18) & 0x3fff)
> +
> +#define PDCP_SET_COUNT_FROM_HFN_SN_12(h, s) ((((h) & 0xfffff) << 12) | ((s)
> & 0xfff))
> +#define PDCP_SET_COUNT_FROM_HFN_SN_18(h, s) ((((h) & 0x3fff) << 18) | ((s)
> & 0x3ffff))
> +
> +#define PDCP_SN_12_WINDOW_SZ 0x800
> +#define PDCP_SN_18_WINDOW_SZ 0x20000
> +
> +#define PDCP_SN_12_HFN_MAX ((1 << (32 - 12)) - 1)
> +#define PDCP_SN_12_HFN_MIN 0
> +#define PDCP_SN_18_HFN_MAX ((1 << (32 - 18)) - 1)
> +#define PDCP_SN_18_HFN_MIN 0
> +

Can we have common defines for SN-12 and SN-18 and take SN as parameter?
We can have something like this.

#define PDCP_PDU_HDR_SIZE(sn_size) (RTE_ALIGN_MUL_CEIL((sn_size), 8) / 8)
#define PDCP_GET_SN_FROM_COUNT(c, sn_size) ((c) & ((1<<sn_size)-1))
#define PDCP_GET_HFN_FROM_COUNT(c, sn_size) (((c) >> sn_size) & ((1 << (32 - sn_size)) - 1))
#define PDCP_SET_COUNT_FROM_HFN_SN(h, s, sn_size) ((((h) & ((1 << (32 - sn_size)) - 1)) << sn_size) | ((s) & ((1<<sn_size)-1)))
#define PDCP_HFN_MAX(sn_size) ((1 << (32 - (sn_size))) - 1)
#define PDCP_HFN_MIN 0

> +/* IV generation function based on the entity configuration */
> +typedef void (*iv_gen_t)(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv,
> +			 uint32_t count);
> +
> +enum pdcp_pdu_type {
> +	PDCP_PDU_TYPE_CTRL = 0,
> +	PDCP_PDU_TYPE_DATA = 1,
> +};
> +
> +enum pdcp_up_ctrl_pdu_type {
> +	PDCP_UP_CTRL_PDU_TYPE_STATUS_REPORT,
> +	PDCP_UP_CTRL_PDU_TYPE_ROHC_FEEDBACK,
> +	PDCP_UP_CTRL_PDU_TYPE_EHC_FEEDBACK,
> +	PDCP_UP_CRTL_PDU_TYPE_UDC_FEEDBACK
> +};
> +
> +struct entity_state {
> +	uint32_t rx_next;
> +	uint32_t tx_next;
> +	uint32_t rx_deliv;
> +	uint32_t rx_reord;
> +};
> +
> +union auth_iv_partial {
> +	/* For AES-CMAC, there is no IV, but message gets prepended */
> +	struct {
> +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> +		uint64_t count : 32;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t direction : 1;
> +		uint64_t bearer : 5;
> +		uint64_t zero_40_63 : 24;
> +#else
> +		uint64_t count : 32;
> +		uint64_t bearer : 5;
> +		uint64_t direction : 1;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t zero_40_63 : 24;
> +#endif
> +	} aes_cmac;
> +	struct {
> +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> +		uint64_t count : 32;
> +		uint64_t zero_37_39 : 3;
> +		uint64_t bearer : 5;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t rsvd_65_71 : 7;
> +		uint64_t direction_64 : 1;
> +		uint64_t rsvd_72_111 : 40;
> +		uint64_t rsvd_113_119 : 7;
> +		uint64_t direction_112 : 1;
> +		uint64_t rsvd_120_127 : 8;
> +#else
> +		uint64_t count : 32;
> +		uint64_t bearer : 5;
> +		uint64_t zero_37_39 : 3;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t direction_64 : 1;
> +		uint64_t rsvd_65_71 : 7;
> +		uint64_t rsvd_72_111 : 40;
> +		uint64_t direction_112 : 1;
> +		uint64_t rsvd_113_119 : 7;
> +		uint64_t rsvd_120_127 : 8;
> +#endif
> +	} zs;
> +	uint64_t u64[2];
> +};
> +
> +union cipher_iv_partial {
> +	struct {
> +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> +		uint64_t count : 32;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t direction : 1;
> +		uint64_t bearer : 5;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t zero_64_127;
> +#else
> +		uint64_t count : 32;
> +		uint64_t bearer : 5;
> +		uint64_t direction : 1;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t zero_64_127;

Can we take zero_64_127 out of #if-else

> +#endif
> +	} aes_ctr;
> +	struct {
> +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> +		uint64_t count : 32;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t direction : 1;
> +		uint64_t bearer : 5;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t rsvd_64_127;
> +#else
> +		uint64_t count : 32;
> +		uint64_t bearer : 5;
> +		uint64_t direction : 1;
> +		uint64_t zero_38_39 : 2;
> +		uint64_t zero_40_63 : 24;
> +
> +		uint64_t rsvd_64_127;
> +#endif

rsvd_64_127 can also be out of #if-else

> +	} zs;
> +	uint64_t u64[2];
> +};
> +
> +/*
> + * Layout of PDCP entity: [rte_pdcp_entity] [entity_priv] [entity_dl/ul]

If the layout is fixed, can we have 0 length array in rte_pdcp_entity for entity_priv and in entity_priv for entity_dl/ul.
I see that entity_dl/ul are not completely defined. You can define them later when they are supported.

> + */
> +

Extra line

> +struct entity_priv {
> +	/** Crypto sym session. */
> +	struct rte_cryptodev_sym_session *crypto_sess;
> +	/** Entity specific IV generation function. */
> +	iv_gen_t iv_gen;
> +	/** Pre-prepared auth IV. */
> +	union auth_iv_partial auth_iv_part;
> +	/** Pre-prepared cipher IV. */
> +	union cipher_iv_partial cipher_iv_part;
> +	/** Entity state variables. */
> +	struct entity_state state;
> +	/** Flags. */
> +	struct {
> +		/** PDCP PDU has 4 byte MAC-I. */
> +		uint64_t is_authenticated : 1;
> +		/** Cipher offset & length in bits. */
> +		uint64_t is_ciph_in_bits : 1;
> +		/** Auth offset & length in bits. */
> +		uint64_t is_auth_in_bits : 1;
> +		/** Is UL/transmitting PDCP entity */
> +		uint64_t is_ul_entity : 1;
> +	} flags;
> +	/** Crypto op pool. */
> +	struct rte_mempool *cop_pool;
> +	/** PDCP header size. */
> +	uint8_t hdr_sz;
> +	/** PDCP AAD size. For AES-CMAC, additional message is prepended for
> the operation. */
> +	uint8_t aad_sz;
> +	/** Device ID of the device to be used for offload. */
> +	uint8_t dev_id;
> +};
> +
> +struct entity_priv_dl_part {
> +	/* TODO - when in-order-delivery is supported, post PDCP packets
> would need to cached. */
> +	uint8_t dummy;
> +};
> +
> +struct entity_priv_ul_part {
> +	/*
> +	 * TODO - when re-establish is supported, both plain & post PDCP
> packets would need to be
> +	 * cached.
> +	 */
> +	uint8_t dummy;
> +};
> +
> +static inline struct entity_priv *
> +entity_priv_get(const struct rte_pdcp_entity *entity) {
> +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity));
> +}
> +
> +static inline struct entity_priv_dl_part *
> +entity_dl_part_get(const struct rte_pdcp_entity *entity) {
> +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) +
> sizeof(struct entity_priv));
> +}
> +
> +static inline struct entity_priv_ul_part *
> +entity_ul_part_get(const struct rte_pdcp_entity *entity) {
> +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) +
> sizeof(struct entity_priv));
> +}

Above inline functions may not be needed also if we have 0 len arrays.

> +
> +static inline int
> +pdcp_hdr_size_get(enum rte_security_pdcp_sn_size sn_size)
> +{
> +	return RTE_ALIGN_MUL_CEIL(sn_size, 8) / 8;
> +}

PDCP_PDU_HDR_SIZE is same as this inline function.
Can we get away with this one?

> +
> +#endif /* _PDCP_ENTITY_H_ */
> diff --git a/lib/pdcp/pdcp_process.c b/lib/pdcp/pdcp_process.c
> new file mode 100644
> index 0000000000..282cf38ec4
> --- /dev/null
> +++ b/lib/pdcp/pdcp_process.c
> @@ -0,0 +1,1195 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#include <rte_crypto.h>
> +#include <rte_crypto_sym.h>
> +#include <rte_cryptodev.h>
> +#include <rte_memcpy.h>
> +#include <rte_pdcp.h>
> +#include <rte_pdcp_hdr.h>
> +
> +#include "pdcp_crypto.h"
> +#include "pdcp_entity.h"
> +#include "pdcp_process.h"
> +
> +#define PDCP_MAC_I_LEN 4

Can you define it at same place where PDCP_IV_LENGTH is defined and use this in xform validate.

> +
> +/* Enum of supported algorithms for ciphering */
> +enum pdcp_cipher_algo {
> +	PDCP_CIPHER_ALGO_NULL,
> +	PDCP_CIPHER_ALGO_AES,
> +	PDCP_CIPHER_ALGO_ZUC,
> +	PDCP_CIPHER_ALGO_SNOW3G,
> +	PDCP_CIPHER_ALGO_MAX
> +};
> +
> +/* Enum of supported algorithms for integrity */
> +enum pdcp_auth_algo {
> +	PDCP_AUTH_ALGO_NULL,
> +	PDCP_AUTH_ALGO_AES,
> +	PDCP_AUTH_ALGO_ZUC,
> +	PDCP_AUTH_ALGO_SNOW3G,
> +	PDCP_AUTH_ALGO_MAX
> +};
> +
> +/* IV generation functions based on type of operation (cipher - auth) */
> +
> +static void
> +pdcp_iv_gen_null_null(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv, uint32_t count)
> +{
> +	/* No IV required for NULL cipher + NULL auth */
> +	RTE_SET_USED(cop);
> +	RTE_SET_USED(en_priv);
> +	RTE_SET_USED(count);
> +}
> +
> +static void
> +pdcp_iv_gen_null_aes_cmac(struct rte_crypto_op *cop, const struct
> entity_priv *en_priv,
> +			  uint32_t count)
> +{
> +	struct rte_crypto_sym_op *op = cop->sym;
> +	struct rte_mbuf *mb = op->m_src;
> +	uint8_t *m_ptr;
> +	uint64_t m;
> +
> +	/* AES-CMAC requires message to be prepended with info on count etc
> */
> +
> +	/* Prepend by 8 bytes to add custom message */
> +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> +
> +	m = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +
> +	rte_memcpy(m_ptr, &m, 8);
> +}
> +
> +static void
> +pdcp_iv_gen_null_zs(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv, uint32_t count)
> +{
> +	uint64_t iv_u64[2];
> +	uint8_t *iv;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64[0], 8);
> +
> +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> +}
> +
> +static void
> +pdcp_iv_gen_aes_ctr_null(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv,
> +			 uint32_t count)
> +{
> +	uint64_t iv_u64[2];
> +	uint8_t *iv;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	iv_u64[1] = 0;
> +	rte_memcpy(iv, iv_u64, 16);
> +}
> +
> +static void
> +pdcp_iv_gen_zs_null(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv, uint32_t count)
> +{
> +	uint64_t iv_u64;
> +	uint8_t *iv;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	iv_u64 = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64, 8);
> +	rte_memcpy(iv + 8, &iv_u64, 8);
> +}
> +
> +static void
> +pdcp_iv_gen_zs_zs(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv, uint32_t count)
> +{
> +	uint64_t iv_u64[2];
> +	uint8_t *iv;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	/* Generating cipher IV */
> +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64[0], 8);
> +	rte_memcpy(iv + 8, &iv_u64[0], 8);
> +
> +	iv += PDCP_IV_LENGTH;
> +
> +	/* Generating auth IV */
> +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64[0], 8);
> +
> +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> +}
> +
> +static void
> +pdcp_iv_gen_zs_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv,
> +			uint32_t count)
> +{
> +	struct rte_crypto_sym_op *op = cop->sym;
> +	struct rte_mbuf *mb = op->m_src;
> +	uint8_t *m_ptr, *iv;
> +	uint64_t iv_u64[2];
> +	uint64_t m;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64[0], 8);
> +	rte_memcpy(iv + 8, &iv_u64[0], 8);
> +
> +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> +	m = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(m_ptr, &m, 8);
> +}
> +
> +static void
> +pdcp_iv_gen_aes_ctr_aes_cmac(struct rte_crypto_op *cop, const struct
> entity_priv *en_priv,
> +			    uint32_t count)
> +{
> +	struct rte_crypto_sym_op *op = cop->sym;
> +	struct rte_mbuf *mb = op->m_src;
> +	uint8_t *m_ptr, *iv;
> +	uint64_t iv_u64[2];
> +	uint64_t m;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	iv_u64[1] = 0;
> +	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
> +
> +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> +	m = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(m_ptr, &m, 8);
> +}
> +
> +static void
> +pdcp_iv_gen_aes_ctr_zs(struct rte_crypto_op *cop, const struct entity_priv
> *en_priv, uint32_t count)
> +{
> +	uint64_t iv_u64[2];
> +	uint8_t *iv;
> +
> +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> +
> +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	iv_u64[1] = 0;
> +	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
> +
> +	iv += PDCP_IV_LENGTH;
> +
> +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> ((uint64_t)(rte_cpu_to_be_32(count)));
> +	rte_memcpy(iv, &iv_u64[0], 8);
> +
> +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> +}
> +
> +static int
> +pdcp_crypto_xfrm_get(const struct rte_pdcp_entity_conf *conf, struct
> rte_crypto_sym_xform **c_xfrm,
> +		     struct rte_crypto_sym_xform **a_xfrm)
> +{
> +	*c_xfrm = NULL;
> +	*a_xfrm = NULL;
> +
> +	if (conf->crypto_xfrm == NULL)
> +		return -EINVAL;
> +
> +	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +		*c_xfrm = conf->crypto_xfrm;
> +		*a_xfrm = conf->crypto_xfrm->next;
> +	} else if (conf->crypto_xfrm->type ==
> RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		*a_xfrm = conf->crypto_xfrm;
> +		*c_xfrm = conf->crypto_xfrm->next;
> +	} else {
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +pdcp_iv_gen_func_set(struct rte_pdcp_entity *entity, const struct
> rte_pdcp_entity_conf *conf)
> +{
> +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> +	enum rte_security_pdcp_direction direction;
> +	enum pdcp_cipher_algo ciph_algo;
> +	enum pdcp_auth_algo auth_algo;
> +	struct entity_priv *en_priv;
> +	int ret;
> +
> +	en_priv = entity_priv_get(entity);
> +
> +	direction = conf->pdcp_xfrm.pkt_dir;
> +	if (conf->reverse_iv_direction)
> +		direction = !direction;
> +
> +	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
> +	if (ret)
> +		return ret;
> +
> +	if (c_xfrm == NULL)
> +		return -EINVAL;
> +
> +	memset(&en_priv->auth_iv_part, 0, sizeof(en_priv->auth_iv_part));
> +	memset(&en_priv->cipher_iv_part, 0, sizeof(en_priv->cipher_iv_part));
> +
> +	switch (c_xfrm->cipher.algo) {
> +	case RTE_CRYPTO_CIPHER_NULL:
> +		ciph_algo = PDCP_CIPHER_ALGO_NULL;
> +		break;
> +	case RTE_CRYPTO_CIPHER_AES_CTR:
> +		ciph_algo = PDCP_CIPHER_ALGO_AES;
> +		en_priv->cipher_iv_part.aes_ctr.bearer = conf-
> >pdcp_xfrm.bearer;
> +		en_priv->cipher_iv_part.aes_ctr.direction = direction;
> +		break;
> +	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
> +		ciph_algo = PDCP_CIPHER_ALGO_SNOW3G;
> +		en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
> +		en_priv->cipher_iv_part.zs.direction = direction;
> +		break;
> +	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
> +		ciph_algo = PDCP_CIPHER_ALGO_ZUC;
> +		en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
> +		en_priv->cipher_iv_part.zs.direction = direction;
> +		break;
> +	default:
> +		return -ENOTSUP;
> +	}
> +
> +	if (a_xfrm != NULL) {
> +		switch (a_xfrm->auth.algo) {
> +		case RTE_CRYPTO_AUTH_NULL:
> +			auth_algo = PDCP_AUTH_ALGO_NULL;
> +			break;
> +		case RTE_CRYPTO_AUTH_AES_CMAC:
> +			auth_algo = PDCP_AUTH_ALGO_AES;
> +			en_priv->auth_iv_part.aes_cmac.bearer = conf-
> >pdcp_xfrm.bearer;
> +			en_priv->auth_iv_part.aes_cmac.direction = direction;
> +			break;
> +		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
> +			auth_algo = PDCP_AUTH_ALGO_SNOW3G;
> +			en_priv->auth_iv_part.zs.bearer = conf-
> >pdcp_xfrm.bearer;
> +			en_priv->auth_iv_part.zs.direction_64 = direction;
> +			en_priv->auth_iv_part.zs.direction_112 = direction;
> +			break;
> +		case RTE_CRYPTO_AUTH_ZUC_EIA3:
> +			auth_algo = PDCP_AUTH_ALGO_ZUC;
> +			en_priv->auth_iv_part.zs.bearer = conf-
> >pdcp_xfrm.bearer;
> +			en_priv->auth_iv_part.zs.direction_64 = direction;
> +			en_priv->auth_iv_part.zs.direction_112 = direction;
> +			break;
> +		default:
> +			return -ENOTSUP;
> +		}
> +	} else {
> +		auth_algo = PDCP_AUTH_ALGO_NULL;
> +	}
> +
> +	static const iv_gen_t
> iv_gen_map[PDCP_CIPHER_ALGO_MAX][PDCP_AUTH_ALGO_MAX] = {
> +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_NULL] =
> pdcp_iv_gen_null_null,
> +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_AES] =
> pdcp_iv_gen_null_aes_cmac,
> +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_SNOW3G] =
> pdcp_iv_gen_null_zs,
> +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_ZUC] =
> pdcp_iv_gen_null_zs,
> +
> +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_NULL] =
> pdcp_iv_gen_aes_ctr_null,
> +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_AES] =
> pdcp_iv_gen_aes_ctr_aes_cmac,
> +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_SNOW3G] =
> pdcp_iv_gen_aes_ctr_zs,
> +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_ZUC] =
> pdcp_iv_gen_aes_ctr_zs,
> +
> +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_NULL] =
> pdcp_iv_gen_zs_null,
> +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_AES] =
> pdcp_iv_gen_zs_aes_cmac,
> +
> 	[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_SNOW3G] =
> pdcp_iv_gen_zs_zs,
> +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_ZUC] =
> pdcp_iv_gen_zs_zs,
> +
> +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_NULL] =
> pdcp_iv_gen_zs_null,
> +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_AES] =
> pdcp_iv_gen_zs_aes_cmac,
> +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_SNOW3G] =
> pdcp_iv_gen_zs_zs,
> +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_ZUC] =
> pdcp_iv_gen_zs_zs,
> +	};
> +
> +	en_priv->iv_gen = iv_gen_map[ciph_algo][auth_algo];
> +
> +	return 0;
> +}
> +
> +static inline void
> +cop_prepare(const struct entity_priv *en_priv, struct rte_mbuf *mb, struct
> rte_crypto_op *cop,
> +	    uint8_t data_offset, uint32_t count, const bool is_auth)
> +{
> +	const struct rte_crypto_op cop_init = {
> +		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
> +		.sess_type = RTE_CRYPTO_OP_WITH_SESSION,
> +	};
> +	struct rte_crypto_sym_op *op;
> +	uint32_t pkt_len;
> +
> +	const uint8_t ciph_shift = 3 * en_priv->flags.is_ciph_in_bits;
> +	const uint8_t auth_shift = 3 * en_priv->flags.is_auth_in_bits;
> +
> +	op = cop->sym;
> +	cop->raw = cop_init.raw;
> +	op->m_src = mb;
> +	op->m_dst = mb;
> +
> +	/* Set IV */
> +	en_priv->iv_gen(cop, en_priv, count);
> +
> +	/* Prepare op */
> +	pkt_len = rte_pktmbuf_pkt_len(mb);
> +	op->cipher.data.offset = data_offset << ciph_shift;
> +	op->cipher.data.length = (pkt_len - data_offset) << ciph_shift;
> +
> +	if (is_auth) {
> +		op->auth.data.offset = 0;
> +		op->auth.data.length = (pkt_len - PDCP_MAC_I_LEN) <<
> auth_shift;
> +		op->auth.digest.data = rte_pktmbuf_mtod_offset(mb, uint8_t
> *,
> +							       (pkt_len -
> PDCP_MAC_I_LEN));
> +	}
> +
> +	__rte_crypto_sym_op_attach_sym_session(op, en_priv->crypto_sess);
> +}
> +
> +static inline bool
> +pdcp_pre_process_uplane_sn_12_ul_set_sn(struct entity_priv *en_priv, struct
> rte_mbuf *mb,
> +					uint32_t *count)
> +{
> +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> +	const uint8_t hdr_sz = en_priv->hdr_sz;
> +	uint32_t sn;
> +
> +	/* Prepend PDU header */
> +	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_12_hdr
> *)rte_pktmbuf_prepend(mb, hdr_sz);
> +	if (unlikely(pdu_hdr == NULL))
> +		return false;
> +
> +	/* Update sequence num in the PDU header */
> +	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> __ATOMIC_RELAXED);
> +	sn = PDCP_GET_SN_12_FROM_COUNT(*count);
> +
> +	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
> +	pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
> +	pdu_hdr->sn_7_0 = (sn & 0xff);
> +	pdu_hdr->r = 0;
> +	return true;
> +}
> +
> +static uint16_t
> +pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	uint16_t nb_cop;
> +	uint32_t count;
> +	int i;
> +
> +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> +
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	if (en_priv->flags.is_authenticated) {
> +		for (i = 0; i < nb_cop; i++) {
> +			if (unlikely(rte_pktmbuf_append(mb[i],
> PDCP_MAC_I_LEN) == NULL))
> +				goto cop_free;
> +			if
> (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
> +
> &count)))
> +				goto cop_free;
> +			cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> true);
> +		}
> +	} else {
> +		for (i = 0; i < nb_cop; i++) {
> +			if
> (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
> +
> &count)))
> +				goto cop_free;
> +			cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> false);
> +		}
> +	}
> +
> +	*nb_err = num - nb_cop;
> +	return nb_cop;
> +cop_free:
> +	/* Using mempool API since crypto API is not providing bulk free */
> +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
> +	*nb_err = num - i;
> +	return i;
> +}
> +
> +static inline bool
> +pdcp_pre_process_uplane_sn_18_ul_set_sn(struct entity_priv *en_priv, struct
> rte_mbuf *mb,
> +					uint32_t *count)
> +{
> +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> +	const uint8_t hdr_sz = en_priv->hdr_sz;
> +	uint32_t sn;
> +
> +	/* Prepend PDU header */
> +	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_18_hdr
> *)rte_pktmbuf_prepend(mb, hdr_sz);
> +	if (unlikely(pdu_hdr == NULL))
> +		return false;
> +
> +	/* Update sequence num in the PDU header */
> +	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> __ATOMIC_RELAXED);
> +	sn = PDCP_GET_SN_18_FROM_COUNT(*count);
> +
> +	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
> +	pdu_hdr->sn_17_16 = ((sn & 0x30000) >> 16);
> +	pdu_hdr->sn_15_8 = ((sn & 0xff00) >> 8);
> +	pdu_hdr->sn_7_0 = (sn & 0xff);
> +	pdu_hdr->r = 0;
> +
> +	return true;
> +}
> +
> +static inline uint16_t
> +pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	uint16_t nb_cop;
> +	uint32_t count;
> +	int i;
> +
> +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> +
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	if (en_priv->flags.is_authenticated) {
> +		for (i = 0; i < nb_cop; i++) {
> +			if (unlikely(rte_pktmbuf_append(mb[i],
> PDCP_MAC_I_LEN) == NULL))
> +				goto cop_free;
> +			if
> (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
> +
> &count)))
> +				goto cop_free;
> +			cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> true);
> +		}
> +	} else {
> +		for (i = 0; i < nb_cop; i++) {
> +			if
> (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
> +
> &count)))
> +				goto cop_free;
> +			cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> false);
> +		}
> +	}
> +
> +	*nb_err = num - nb_cop;
> +	return nb_cop;
> +
> +cop_free:
> +	/* Using mempool API since crypto API is not providing bulk free */
> +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
> +	*nb_err = num - i;
> +	return i;
> +}
> +
> +static uint16_t
> +pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> +	uint32_t count, sn;
> +	uint16_t nb_cop;
> +	int i;
> +
> +	const uint8_t hdr_sz = en_priv->hdr_sz;
> +	const uint8_t data_offset = hdr_sz + en_priv->aad_sz;
> +
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	for (i = 0; i < nb_cop; i++) {
> +		/* Prepend PDU header */
> +		pdu_hdr = (struct rte_pdcp_cp_data_pdu_sn_12_hdr
> *)rte_pktmbuf_prepend(mb[i],
> +
> 	       hdr_sz);
> +		if (unlikely(pdu_hdr == NULL))
> +			goto cop_free;
> +		if (unlikely(rte_pktmbuf_append(mb[i], PDCP_MAC_I_LEN) ==
> NULL))
> +			goto cop_free;
> +
> +		/* Update sequence number in the PDU header */
> +		count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> __ATOMIC_RELAXED);
> +		sn = PDCP_GET_SN_12_FROM_COUNT(count);
> +
> +		pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
> +		pdu_hdr->sn_7_0 = (sn & 0xff);
> +		pdu_hdr->r = 0;
> +
> +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
> +	}
> +
> +	*nb_err = num - nb_cop;
> +	return nb_cop;
> +
> +cop_free:
> +	/* Using mempool API since crypto API is not providing bulk free */
> +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
> +	*nb_err = num - i;
> +	return i;
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err_ret)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> +	int i, nb_success = 0, nb_err = 0;
> +	struct rte_mbuf *err_mb[num];
> +	struct rte_mbuf *mb;
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> +			err_mb[nb_err++] = mb;
> +			continue;
> +		}
> +
> +		if (hdr_trim_sz)
> +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> +
> +		out_mb[nb_success++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err_ret)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> +	int i, nb_success = 0, nb_err = 0;
> +	struct rte_mbuf *err_mb[num];
> +	struct rte_mbuf *mb;
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> +			err_mb[nb_err++] = mb;
> +			continue;
> +		}
> +
> +		if (hdr_trim_sz)
> +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> +
> +		out_mb[nb_success++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static uint16_t
> +pdcp_post_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err_ret)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> +	int i, nb_success = 0, nb_err = 0;
> +	struct rte_mbuf *mb, *err_mb[num];
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> +			err_mb[nb_err++] = mb;
> +			continue;
> +		}
> +
> +		if (hdr_trim_sz)
> +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> +
> +		out_mb[nb_success++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static inline int
> +pdcp_sn_18_count_get(const struct rte_pdcp_entity *entity, int32_t rsn,
> uint32_t *count)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	uint32_t rhfn, rx_deliv;
> +
> +	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv,
> __ATOMIC_RELAXED);
> +	rhfn = PDCP_GET_HFN_SN_18_FROM_COUNT(rx_deliv);
> +
> +	if (rsn < (int32_t)(PDCP_GET_SN_18_FROM_COUNT(rx_deliv) -
> PDCP_SN_18_WINDOW_SZ)) {
> +		if (unlikely(rhfn == PDCP_SN_18_HFN_MAX))
> +			return -ERANGE;
> +		rhfn += 1;
> +	} else if ((uint32_t)rsn >= (PDCP_GET_SN_18_FROM_COUNT(rx_deliv) +
> PDCP_SN_18_WINDOW_SZ)) {
> +		if (unlikely(rhfn == PDCP_SN_18_HFN_MIN))
> +			return -ERANGE;
> +		rhfn -= 1;
> +	}
> +
> +	*count = PDCP_SET_COUNT_FROM_HFN_SN_18(rhfn, rsn);
> +
> +	return 0;
> +}
> +
> +static inline int
> +pdcp_sn_12_count_get(const struct rte_pdcp_entity *entity, int32_t rsn,
> uint32_t *count)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	uint32_t rhfn, rx_deliv;
> +
> +	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv,
> __ATOMIC_RELAXED);
> +	rhfn = PDCP_GET_HFN_SN_12_FROM_COUNT(rx_deliv);
> +
> +	if (rsn < (int32_t)(PDCP_GET_SN_12_FROM_COUNT(rx_deliv) -
> PDCP_SN_12_WINDOW_SZ)) {
> +		if (unlikely(rhfn == PDCP_SN_12_HFN_MAX))
> +			return -ERANGE;
> +		rhfn += 1;
> +	} else if ((uint32_t)rsn >= (PDCP_GET_SN_12_FROM_COUNT(rx_deliv) +
> PDCP_SN_12_WINDOW_SZ)) {
> +		if (unlikely(rhfn == PDCP_SN_12_HFN_MIN))
> +			return -ERANGE;
> +		rhfn -= 1;
> +	}
> +
> +	*count = PDCP_SET_COUNT_FROM_HFN_SN_12(rhfn, rsn);
> +
> +	return 0;
> +}
> +
> +static inline uint16_t
> +pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity
> *entity, struct rte_mbuf *mb[],
> +				       struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err,
> +				       const bool is_integ_protected)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> +	uint16_t nb_cop;
> +	int32_t rsn = 0;
> +	uint32_t count;
> +	int i;
> +
> +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> +
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	for (i = 0; i < nb_cop; i++) {
> +
> +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> rte_pdcp_up_data_pdu_sn_12_hdr *);
> +
> +		/* Check for PDU type */
> +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> +			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> +		else
> +			rte_panic("TODO: Control PDU not handled");
> +
> +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> +			break;
> +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> is_integ_protected);
> +	}
> +
> +	*nb_err = num - nb_cop;
> +
> +	return nb_cop;
> +}
> +
> +static uint16_t
> +pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				    struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num,
> nb_err, true);
> +}
> +
> +static uint16_t
> +pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num,
> nb_err, false);
> +}
> +
> +static inline uint16_t
> +pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity
> *entity, struct rte_mbuf *mb[],
> +				       struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err,
> +				       const bool is_integ_protected)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> +	uint16_t nb_cop;
> +	int32_t rsn = 0;
> +	uint32_t count;
> +	int i;
> +
> +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	for (i = 0; i < nb_cop; i++) {
> +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> rte_pdcp_up_data_pdu_sn_18_hdr *);
> +
> +		/* Check for PDU type */
> +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> +			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8
> << 8) |
> +			       (pdu_hdr->sn_7_0));
> +		else
> +			rte_panic("TODO: Control PDU not handled");
> +
> +		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
> +			break;
> +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> is_integ_protected);
> +	}
> +
> +	*nb_err = num - nb_cop;
> +
> +	return nb_cop;
> +}
> +
> +static uint16_t
> +pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				    struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num,
> nb_err, true);
> +}
> +
> +static uint16_t
> +pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num,
> nb_err, false);
> +}
> +
> +static uint16_t
> +pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity,
> struct rte_mbuf *mb[],
> +				 struct rte_crypto_op *cop[], uint16_t num,
> uint16_t *nb_err)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> +	uint16_t nb_cop;
> +	uint32_t count;
> +	int32_t rsn;
> +	int i;
> +
> +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> +
> +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> +					  num);
> +
> +	for (i = 0; i < nb_cop; i++) {
> +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> rte_pdcp_cp_data_pdu_sn_12_hdr *);
> +		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> +			break;
> +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
> +	}
> +
> +	*nb_err = num - nb_cop;
> +	return nb_cop;
> +}
> +
> +static inline bool
> +pdcp_post_process_update_entity_state(const struct rte_pdcp_entity *entity,
> +				      const uint32_t count)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +
> +	if (count < __atomic_load_n(&en_priv->state.rx_deliv,
> __ATOMIC_RELAXED))
> +		return false;
> +
> +	/* t-Reordering timer is not supported - SDU will be delivered
> immediately.
> +	 * Update RX_DELIV to the COUNT value of the first PDCP SDU which
> has not
> +	 * been delivered to upper layers
> +	 */
> +	__atomic_store_n(&en_priv->state.rx_deliv, (count + 1),
> __ATOMIC_RELAXED);
> +
> +	if (count >= __atomic_load_n(&en_priv->state.rx_next,
> __ATOMIC_RELAXED))
> +		__atomic_store_n(&en_priv->state.rx_next, (count + 1),
> __ATOMIC_RELAXED);
> +
> +	return true;
> +}
> +
> +static inline uint16_t
> +pdcp_post_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity
> *entity,
> +					struct rte_mbuf *in_mb[],
> +					struct rte_mbuf *out_mb[],
> +					uint16_t num, uint16_t *nb_err_ret,
> +					const bool is_integ_protected)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> +	int i, nb_success = 0, nb_err = 0, rsn = 0;
> +	const uint32_t aad_sz = en_priv->aad_sz;
> +	struct rte_mbuf *err_mb[num];
> +	struct rte_mbuf *mb;
> +	uint32_t count;
> +
> +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> +			goto error;
> +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> rte_pdcp_up_data_pdu_sn_12_hdr *,
> +						  aad_sz);
> +
> +		/* Check for PDU type */
> +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> +			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> +		else
> +			rte_panic("Control PDU should not be received");
> +
> +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> +			goto error;
> +
> +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> count)))
> +			goto error;
> +
> +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> +		if (is_integ_protected)
> +			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> +		out_mb[nb_success++] = mb;
> +		continue;
> +
> +error:
> +		err_mb[nb_err++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity,
> +				     struct rte_mbuf *in_mb[],
> +				     struct rte_mbuf *out_mb[],
> +				     uint16_t num, uint16_t *nb_err)
> +{
> +	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb,
> out_mb, num, nb_err, true);
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err)
> +{
> +	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb,
> out_mb, num, nb_err, false);
> +}
> +
> +static inline uint16_t
> +pdcp_post_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity
> *entity,
> +					struct rte_mbuf *in_mb[],
> +					struct rte_mbuf *out_mb[],
> +					uint16_t num, uint16_t *nb_err_ret,
> +					const bool is_integ_protected)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> +	const uint32_t aad_sz = en_priv->aad_sz;
> +	int i, nb_success = 0, nb_err = 0;
> +	struct rte_mbuf *mb, *err_mb[num];
> +	int32_t rsn = 0;
> +	uint32_t count;
> +
> +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> +			goto error;
> +
> +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> rte_pdcp_up_data_pdu_sn_18_hdr *,
> +						  aad_sz);
> +
> +		/* Check for PDU type */
> +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> +			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8
> << 8) |
> +			       (pdu_hdr->sn_7_0));
> +		else
> +			rte_panic("Control PDU should not be received");
> +
> +		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
> +			goto error;
> +
> +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> count)))
> +			goto error;
> +
> +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> +		if (is_integ_protected)
> +			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> +		out_mb[nb_success++] = mb;
> +		continue;
> +
> +error:
> +		err_mb[nb_err++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity,
> +				     struct rte_mbuf *in_mb[],
> +				     struct rte_mbuf *out_mb[],
> +				     uint16_t num, uint16_t *nb_err)
> +{
> +	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb,
> out_mb, num, nb_err, true);
> +}
> +
> +static uint16_t
> +pdcp_post_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err)
> +{
> +	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb,
> out_mb, num, nb_err, false);
> +}
> +
> +static uint16_t
> +pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity,
> +				  struct rte_mbuf *in_mb[],
> +				  struct rte_mbuf *out_mb[],
> +				  uint16_t num, uint16_t *nb_err_ret)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> +	const uint32_t aad_sz = en_priv->aad_sz;
> +	int i, nb_success = 0, nb_err = 0;
> +	struct rte_mbuf *err_mb[num];
> +	struct rte_mbuf *mb;
> +	uint32_t count;
> +	int32_t rsn;
> +
> +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> +
> +	for (i = 0; i < num; i++) {
> +		mb = in_mb[i];
> +		if (unlikely(mb->ol_flags &
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> +			goto error;
> +
> +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> rte_pdcp_cp_data_pdu_sn_12_hdr *,
> +						  aad_sz);
> +		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> +
> +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> +			goto error;
> +
> +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> count)))
> +			goto error;
> +
> +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> +		rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> +		out_mb[nb_success++] = mb;
> +		continue;
> +
> +error:
> +		err_mb[nb_err++] = mb;
> +	}
> +
> +	if (unlikely(nb_err != 0))
> +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> sizeof(struct rte_mbuf *));
> +
> +	*nb_err_ret = nb_err;
> +	return nb_success;
> +}
> +
> +static int
> +pdcp_pre_process_func_set(struct rte_pdcp_entity *entity, const struct
> rte_pdcp_entity_conf *conf)
> +{
> +	struct entity_priv *en_priv = entity_priv_get(entity);
> +
> +	entity->pre_process = NULL;
> +	entity->post_process = NULL;
> +
> +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_CONTROL) &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> +		entity->pre_process = pdcp_pre_process_cplane_sn_12_ul;
> +		entity->post_process = pdcp_post_process_cplane_sn_12_ul;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_CONTROL) &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)) {
> +		entity->pre_process = pdcp_pre_process_cplane_sn_12_dl;
> +		entity->post_process = pdcp_post_process_cplane_sn_12_dl;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_12_ul;
> +		entity->post_process = pdcp_post_process_uplane_sn_12_ul;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_18_ul;
> +		entity->post_process = pdcp_post_process_uplane_sn_18_ul;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
> +	    (en_priv->flags.is_authenticated)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_12_dl_ip;
> +		entity->post_process =
> pdcp_post_process_uplane_sn_12_dl_ip;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
> +	    (!en_priv->flags.is_authenticated)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_12_dl;
> +		entity->post_process = pdcp_post_process_uplane_sn_12_dl;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
> +	    (en_priv->flags.is_authenticated)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_18_dl_ip;
> +		entity->post_process =
> pdcp_post_process_uplane_sn_18_dl_ip;
> +	}
> +
> +	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA)
> &&
> +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
> +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
> +	    (!en_priv->flags.is_authenticated)) {
> +		entity->pre_process = pdcp_pre_process_uplane_sn_18_dl;
> +		entity->post_process = pdcp_post_process_uplane_sn_18_dl;
> +	}
> +
> +	if (entity->pre_process == NULL || entity->post_process == NULL)
> +		return -ENOTSUP;
> +
> +	return 0;
> +}
> +
> +static int
> +pdcp_entity_priv_populate(struct entity_priv *en_priv, const struct
> rte_pdcp_entity_conf *conf)
> +{
> +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> +	int ret;
> +
> +	/**
> +	 * flags.is_authenticated
> +	 *
> +	 * MAC-I would be added in case of control plane packets and when
> authentication
> +	 * transform is not NULL.
> +	 */
> +
> +	if (conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_CONTROL)
> +		en_priv->flags.is_authenticated = 1;
> +
> +	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
> +	if (ret)
> +		return ret;
> +
> +	if (a_xfrm != NULL)
> +		en_priv->flags.is_authenticated = 1;
> +
> +	/**
> +	 * flags.is_ciph_in_bits
> +	 *
> +	 * For ZUC & SNOW3G cipher algos, offset & length need to be provided
> in bits.
> +	 */
> +
> +	if ((c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) ||
> +	    (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_ZUC_EEA3))
> +		en_priv->flags.is_ciph_in_bits = 1;
> +
> +	/**
> +	 * flags.is_auth_in_bits
> +	 *
> +	 * For ZUC & SNOW3G authentication algos, offset & length need to be
> provided in bits.
> +	 */
> +
> +	if (a_xfrm != NULL) {
> +		if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2)
> ||
> +		    (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3))
> +			en_priv->flags.is_auth_in_bits = 1;
> +	}
> +
> +	/**
> +	 * flags.is_ul_entity
> +	 *
> +	 * Indicate whether the entity is UL/transmitting PDCP entity.
> +	 */
> +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
> +		en_priv->flags.is_ul_entity = 1;
> +
> +	/**
> +	 * hdr_sz
> +	 *
> +	 * PDCP header size of the entity
> +	 */
> +	en_priv->hdr_sz = pdcp_hdr_size_get(conf->pdcp_xfrm.sn_size);
> +
> +	/**
> +	 * aad_sz
> +	 *
> +	 * For AES-CMAC, additional message is prepended for processing. Need
> to be trimmed after
> +	 * crypto processing is done.
> +	 */
> +	if (a_xfrm != NULL && a_xfrm->auth.algo ==
> RTE_CRYPTO_AUTH_AES_CMAC)
> +		en_priv->aad_sz = 8;
> +	else
> +		en_priv->aad_sz = 0;
> +
> +	return 0;
> +}
> +
> +int
> +pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct
> rte_pdcp_entity_conf *conf)
> +{
> +	struct entity_priv *en_priv;
> +	int ret;
> +
> +	if (entity == NULL || conf == NULL)
> +		return -EINVAL;
> +
> +	en_priv = entity_priv_get(entity);
> +
> +	ret = pdcp_iv_gen_func_set(entity, conf);
> +	if (ret)
> +		return ret;
> +
> +	ret = pdcp_entity_priv_populate(en_priv, conf);
> +	if (ret)
> +		return ret;
> +
> +	ret = pdcp_pre_process_func_set(entity, conf);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> diff --git a/lib/pdcp/pdcp_process.h b/lib/pdcp/pdcp_process.h
> new file mode 100644
> index 0000000000..c92ab34c40
> --- /dev/null
> +++ b/lib/pdcp/pdcp_process.h
> @@ -0,0 +1,13 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#ifndef _PDCP_PROCESS_H_
> +#define _PDCP_PROCESS_H_
> +
> +#include <rte_pdcp.h>
> +
> +int
> +pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct
> rte_pdcp_entity_conf *conf);
> +
> +#endif /* _PDCP_PROCESS_H_ */
> diff --git a/lib/pdcp/rte_pdcp.c b/lib/pdcp/rte_pdcp.c
> new file mode 100644
> index 0000000000..b1533971c2
> --- /dev/null
> +++ b/lib/pdcp/rte_pdcp.c
> @@ -0,0 +1,136 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#include <rte_pdcp.h>
> +#include <rte_malloc.h>
> +
> +#include "pdcp_crypto.h"
> +#include "pdcp_entity.h"
> +#include "pdcp_process.h"
> +
> +static int
> +pdcp_entity_size_get(const struct rte_pdcp_entity_conf *conf)
> +{
> +	int size;
> +
> +	size = sizeof(struct rte_pdcp_entity) + sizeof(struct entity_priv);
> +
> +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> +		size += sizeof(struct entity_priv_dl_part);
> +	else if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
> +		size += sizeof(struct entity_priv_ul_part);
> +	else
> +		return -EINVAL;
> +
> +	return RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE);
> +}
> +
> +struct rte_pdcp_entity *
> +rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf)
> +{
> +	struct rte_pdcp_entity *entity = NULL;
> +	struct entity_priv *en_priv;
> +	int ret;
> +
> +	if (conf == NULL || conf->cop_pool == NULL) {
> +		rte_errno = -EINVAL;
> +		return NULL;
> +	}
> +
> +	if (conf->pdcp_xfrm.en_ordering || conf-
> >pdcp_xfrm.remove_duplicates || conf->is_slrb ||
> +	    conf->en_sec_offload) {
> +		rte_errno = -ENOTSUP;
> +		return NULL;
> +	}
> +
> +	/*
> +	 * 6.3.2 PDCP SN
> +	 * Length: 12 or 18 bits as indicated in table 6.3.2-1. The length of the
> PDCP SN is
> +	 * configured by upper layers (pdcp-SN-SizeUL, pdcp-SN-SizeDL, or sl-
> PDCP-SN-Size in
> +	 * TS 38.331 [3])
> +	 */
> +	if ((conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) &&
> +	    (conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_18)) {
> +		rte_errno = -ENOTSUP;
> +		return NULL;
> +	}
> +
> +	if (conf->pdcp_xfrm.hfn || conf->pdcp_xfrm.hfn_threshold) {
> +		rte_errno = -EINVAL;
> +		return NULL;
> +	}
> +
> +	entity = rte_zmalloc_socket("pdcp_entity", pdcp_entity_size_get(conf),
> +				    RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> +	if (entity == NULL) {
> +		rte_errno = -ENOMEM;
> +		return NULL;
> +	}
> +
> +	en_priv = entity_priv_get(entity);
> +
> +	en_priv->state.rx_deliv = conf->count;
> +	en_priv->state.tx_next = conf->count;
> +	en_priv->cop_pool = conf->cop_pool;
> +
> +	/* Setup crypto session */
> +	ret = pdcp_crypto_sess_create(entity, conf);
> +	if (ret)
> +		goto entity_free;
> +
> +	ret = pdcp_process_func_set(entity, conf);
> +	if (ret)
> +		goto crypto_sess_destroy;
> +
> +	return entity;
> +
> +crypto_sess_destroy:
> +	pdcp_crypto_sess_destroy(entity);
> +entity_free:
> +	rte_free(entity);
> +	rte_errno = ret;
> +	return NULL;
> +}
> +
> +int
> +rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity, struct rte_mbuf
> *out_mb[])
> +{
> +	int ret;
> +
> +	if (pdcp_entity == NULL)
> +		return -EINVAL;
> +
> +	/* Teardown crypto sessions */
> +	ret = pdcp_crypto_sess_destroy(pdcp_entity);
> +	if (ret)
> +		return ret;
> +
> +	rte_free(pdcp_entity);
> +
> +	RTE_SET_USED(out_mb);
> +	return 0;
> +}
> +
> +int
> +rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
> +			struct rte_mbuf *out_mb[])
> +{
> +	struct entity_priv *en_priv;
> +
> +	if (pdcp_entity == NULL)
> +		return -EINVAL;
> +
> +	en_priv = entity_priv_get(pdcp_entity);
> +
> +	if (en_priv->flags.is_ul_entity) {
> +		en_priv->state.tx_next = 0;
> +	} else {
> +		en_priv->state.rx_next = 0;
> +		en_priv->state.rx_deliv = 0;
> +	}
> +
> +	RTE_SET_USED(out_mb);
> +
> +	return 0;
> +}
> diff --git a/lib/pdcp/rte_pdcp.h b/lib/pdcp/rte_pdcp.h
> new file mode 100644
> index 0000000000..b6c7f32c05
> --- /dev/null
> +++ b/lib/pdcp/rte_pdcp.h
> @@ -0,0 +1,263 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#ifndef _RTE_PDCP_H_
> +#define _RTE_PDCP_H_
> +
> +/**
> + * @file rte_pdcp.h
> + *
> + * RTE PDCP support.
> + *
> + * librte_pdcp provides a framework for PDCP protocol processing.
> + */
> +
> +#include <rte_compat.h>
> +#include <rte_common.h>
> +#include <rte_errno.h>
> +#include <rte_mempool.h>
> +#include <rte_security.h>

Remove header file which is not needed.
I do not see use of rte_errno.h
I believe rte_common.h and rte_compat.h are also not needed.

> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/* Forward declarations */
> +struct rte_pdcp_entity;
> +
> +/* PDCP pre-process function based on entity configuration */
> +typedef uint16_t (*rte_pdcp_pre_p_t)(const struct rte_pdcp_entity *entity,
> +				     struct rte_mbuf *mb[],
> +				     struct rte_crypto_op *cop[],
> +				     uint16_t num, uint16_t *nb_err);
> +
> +/* PDCP post-process function based on entity configuration */
> +typedef uint16_t (*rte_pdcp_post_p_t)(const struct rte_pdcp_entity *entity,
> +				      struct rte_mbuf *in_mb[],
> +				      struct rte_mbuf *out_mb[],
> +				      uint16_t num, uint16_t *nb_err);
> +
> +/**
> + * PDCP entity.
> + */
> +struct rte_pdcp_entity {
> +	/** Entity specific pre-process handle. */
> +	rte_pdcp_pre_p_t pre_process;
> +	/** Entity specific post-process handle. */
> +	rte_pdcp_post_p_t post_process;
> +	/**
> +	 * PDCP entities may hold packets for purposes of in-order delivery (in
> +	 * case of receiving PDCP entity) and re-transmission (in case of
> +	 * transmitting PDCP entity).
> +	 *
> +	 * For receiving PDCP entity, it may hold packets when in-order
> +	 * delivery is enabled. The packets would be cached until either a
> +	 * packet that completes the sequence arrives or when discard timer
> +	 * expires.
> +	 *
> +	 * When post-processing of PDCP packet which completes a sequence is
> +	 * done, the API may return more packets than enqueued. Application is
> +	 * expected to provide *rte_pdcp_pkt_post_process()* with *out_mb*
> +	 * which can hold maximum number of packets which may be returned.
> +	 *
> +	 * For transmitting PDCP entity, during re-establishment (5.1.2),
> +	 * entity may be required to perform re-transmission of the buffers
> +	 * after applying new ciphering & integrity algorithms. For performing
> +	 * crypto operation, *rte_pdcp_entity_re_establish()* would return as
> +	 * many crypto_ops as the ones cached.
> +	 */
> +	uint16_t max_pkt_cache;
> +	/** User area for saving application data. */
> +	uint64_t user_area[2];
> +} __rte_cache_aligned;
> +
> +/**
> + * PDCP entity configuration to be used for establishing an entity.
> + */
> +struct rte_pdcp_entity_conf {
> +	/** PDCP transform for the entity. */
> +	struct rte_security_pdcp_xform pdcp_xfrm;
> +	/** Crypto transform applicable for the entity. */
> +	struct rte_crypto_sym_xform *crypto_xfrm;
> +	/** Mempool for crypto symmetric session. */
> +	struct rte_mempool *sess_mpool;
> +	/** Crypto op pool.*/
> +	struct rte_mempool *cop_pool;
> +	/**
> +	 * 32 bit count value (HFN + SN) to be used for the first packet.
> +	 * pdcp_xfrm.hfn would be ignored as the HFN would be derived from
> this value.
> +	 */
> +	uint32_t count;
> +	/** Indicate whether the PDCP entity belongs to Side Link Radio Bearer.
> */
> +	bool is_slrb;
> +	/** Enable security offload on the device specified. */
> +	bool en_sec_offload;
> +	/** Enable non-atomic usage of entity. */
> +	bool en_non_atomic;
> +	/** Device on which security/crypto session need to be created. */
> +	uint8_t dev_id;
> +	/** Reverse direction during IV generation. Can be used to simulate UE
> crypto processing.*/
> +	bool reverse_iv_direction;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * 5.1.1 PDCP entity establishment
> + *
> + * Establish PDCP entity based on provided input configuration.
> + *
> + * @param conf
> + *   Parameters to be used for initializing PDCP entity object.
> + * @return
> + *   - Valid handle if success
> + *   - NULL in case of failure. rte_errno will be set to error code
> + */
> +__rte_experimental
> +struct rte_pdcp_entity *
> +rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * 5.1.3 PDCP entity release
> + *
> + * Release PDCP entity.
> + *
> + * For UL/transmitting PDCP entity, all stored PDCP SDUs would be dropped.
> + * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned in
> + * *out_mb* buffer. The buffer should be large enough to hold all cached
> + * packets in the entity.
> + *
> + * @param pdcp_entity
> + *   Pointer to the PDCP entity to be released.
> + * @param[out] out_mb
> + *   The address of an array that can hold up to
> *rte_pdcp_entity.max_pkt_cache*
> + *   pointers to *rte_mbuf* structures.
> + * @return
> + *   -  0: Success and no cached packets to return
> + *   - >0: Success and the number of packets returned in out_mb
> + *   - <0: Error code in case of failures
> + */
> +__rte_experimental
> +int
> +rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity,
> +			struct rte_mbuf *out_mb[]);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * 5.1.4 PDCP entity suspend
> + *
> + * Suspend PDCP entity.
> + *
> + * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned in
> + * *out_mb* buffer. The buffer should be large enough to hold all cached
> + * packets in the entity.
> + *
> + * For UL/transmitting PDCP entity, *out_mb* buffer would be unused.
> + *
> + * @param pdcp_entity
> + *   Pointer to the PDCP entity to be suspended.
> + * @param[out] out_mb
> + *   The address of an array that can hold up to
> *rte_pdcp_entity.max_pkt_cache*
> + *   pointers to *rte_mbuf* structures.
> + * @return
> + *   -  0: Success and no cached packets to return
> + *   - >0: Success and the number of packets returned in out_mb
> + *   - <0: Error code in case of failures
> + */
> +__rte_experimental
> +int
> +rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
> +			struct rte_mbuf *out_mb[]);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * For input mbufs and given PDCP entity pre-process the mbufs and prepare
> + * crypto ops that can be enqueued to the cryptodev associated with given
> + * session. Only error packets would be moved returned in the input buffer,
> + * *mb*, and it is the responsibility of the application to free the same.
> + *
> + * @param entity
> + *   Pointer to the *rte_pdcp_entity* object the packets belong to.
> + * @param[in, out] mb
> + *   The address of an array of *num* pointers to *rte_mbuf* structures
> + *   which contain the input packets. Any error packets would be returned in the
> + *   same buffer.
> + * @param[out] cop
> + *   The address of an array that can hold up to *num* pointers to
> + *   *rte_crypto_op* structures. Crypto ops would be allocated by
> + *   ``rte_pdcp_pkt_pre_process`` API.
> + * @param num
> + *   The maximum number of packets to process.
> + * @param[out] nb_err
> + *   Pointer to return the number of error packets returned in *mb*
> + * @return
> + *   Count of crypto_ops prepared
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_pdcp_pkt_pre_process(const struct rte_pdcp_entity *entity,
> +			 struct rte_mbuf *mb[], struct rte_crypto_op *cop[],
> +			 uint16_t num, uint16_t *nb_err)
> +{
> +	return entity->pre_process(entity, mb, cop, num, nb_err);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * For input mbufs and given PDCP entity, perform PDCP post-processing of the
> + * mbufs.
> + *
> + * Input mbufs are the ones retrieved from crypto_ops dequeued from
> cryptodev
> + * and grouped by *rte_pdcp_pkt_crypto_group()*.
> + *
> + * The post-processed packets would be returned in the *out_mb* buffer.
> + * The resultant mbufs would be grouped into success packets and error
> packets.
> + * Error packets would be grouped in the end of the array and it is the
> + * responsibility of the application to handle the same.
> + *
> + * When in-order delivery is enabled, PDCP entity may buffer packets and would
> + * deliver packets only when all prior packets have been post-processed. That
> + * would result in returning more/less packets than enqueued.
> + *
> + * @param entity
> + *   Pointer to the *rte_pdcp_entity* object the packets belong to.
> + * @param in_mb
> + *   The address of an array of *num* pointers to *rte_mbuf* structures.
> + * @param[out] out_mb
> + *   The address of an array of *num* pointers to *rte_mbuf* structures
> + *   to output packets after PDCP post-processing.
> + * @param num
> + *   The maximum number of packets to process.
> + * @param[out] nb_err
> + *   The number of error packets returned in *out_mb* buffer.
> + * @return
> + *   Count of packets returned in *out_mb* buffer.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_pdcp_pkt_post_process(const struct rte_pdcp_entity *entity,
> +			  struct rte_mbuf *in_mb[],
> +			  struct rte_mbuf *out_mb[],
> +			  uint16_t num, uint16_t *nb_err)
> +{
> +	return entity->post_process(entity, in_mb, out_mb, num, nb_err);
> +}
> +
> +#include <rte_pdcp_group.h>
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_PDCP_H_ */
> diff --git a/lib/pdcp/rte_pdcp_group.h b/lib/pdcp/rte_pdcp_group.h
> new file mode 100644
> index 0000000000..2c01c19d4e
> --- /dev/null
> +++ b/lib/pdcp/rte_pdcp_group.h
> @@ -0,0 +1,133 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#ifndef _RTE_PDCP_GROUP_H_
> +#define _RTE_PDCP_GROUP_H_
> +
> +/**
> + * @file rte_pdcp_group.h
> + *
> + * RTE PDCP grouping support.
> + * It is not recommended to include this file directly, include <rte_pdcp.h>
> + * instead.
> + * Provides helper functions to process completed crypto-ops and group
> related
> + * packets by sessions they belong to.
> + */
> +
> +#include <rte_common.h>
> +#include <rte_crypto.h>
> +#include <rte_cryptodev.h>
> +#include <rte_security.h>

Remove header files which are not needed.

> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * Group packets belonging to same PDCP entity.
> + */
> +struct rte_pdcp_group {
> +	union {
> +		uint64_t val;
> +		void *ptr;
> +	} id; /**< Grouped by value */
> +	struct rte_mbuf **m;  /**< Start of the group */
> +	uint32_t cnt;         /**< Number of entries in the group */
> +	int32_t rc;           /**< Status code associated with the group */
> +};
> +
> +/**
> + * Take crypto-op as an input and extract pointer to related PDCP entity.
> + * @param cop
> + *   The address of an input *rte_crypto_op* structure.
> + * @return
> + *   The pointer to the related *rte_pdcp_entity* structure.
> + */
> +static inline struct rte_pdcp_entity *
> +rte_pdcp_en_from_cop(const struct rte_crypto_op *cop)
> +{
> +	void *sess = cop->sym[0].session;
> +
> +	if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> +		return (struct rte_pdcp_entity *)(uintptr_t)
> +			rte_security_session_opaque_data_get(sess);
> +	} else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> +		return (struct rte_pdcp_entity *)(uintptr_t)
> +			rte_cryptodev_sym_session_opaque_data_get(sess);
> +	}

This patchset is not supporting security sessions, so it would be better to return NULL for that.
Moreover, we can directly call rte_cryptodev_sym_session_opaque_data_get(cop->sym[0].session)
From rte_pdcp_pkt_crypto_group. No need to have a wrapper.

> +
> +	return NULL;
> +}
> +
> +/**
> + * Take as input completed crypto ops, extract related mbufs and group them
> by
> + * *rte_pdcp_entity* they belong to. Mbuf for which the crypto operation has
> + * failed would be flagged using *RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED*
> flag
> + * in rte_mbuf.ol_flags. The crypto_ops would be freed after the grouping.
> + *
> + * Note that application must ensure only crypto-ops prepared by lib_pdcp is
> + * provided back to @see rte_pdcp_pkt_crypto_group().
> + *
> + * @param cop
> + *   The address of an array of *num* pointers to the input *rte_crypto_op*
> + *   structures.
> + * @param[out] mb
> + *   The address of an array of *num* pointers to output *rte_mbuf* structures.
> + * @param[out] grp
> + *   The address of an array of *num* to output *rte_pdcp_group* structures.
> + * @param num
> + *   The maximum number of crypto-ops to process.
> + * @return
> + *   Number of filled elements in *grp* array.
> + *
> + */
> +static inline uint16_t
> +rte_pdcp_pkt_crypto_group(struct rte_crypto_op *cop[], struct rte_mbuf
> *mb[],
> +			  struct rte_pdcp_group grp[], uint16_t num)
> +{
> +	uint32_t i, j = 0, n = 0;
> +	void *ns, *ps = NULL;
> +	struct rte_mbuf *m;
> +
> +	for (i = 0; i != num; i++) {
> +		m = cop[i]->sym[0].m_src;
> +		ns = cop[i]->sym[0].session;
> +
> +		m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
> +		if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
> +			m->ol_flags |=
> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
> +
> +		/* Different entity */
> +		if (ps != ns) {
> +
> +			/* Finalize open group and start a new one */
> +			if (ps != NULL) {
> +				grp[n].cnt = mb + j - grp[n].m;
> +				n++;
> +			}
> +
> +			/* Start new group */
> +			grp[n].m = mb + j;
> +			ps = ns;
> +			grp[n].id.ptr =	rte_pdcp_en_from_cop(cop[i]);
> +		}
> +
> +		mb[j++] = m;
> +		rte_crypto_op_free(cop[i]);
> +	}
> +
> +	/* Finalize last group */
> +	if (ps != NULL) {
> +		grp[n].cnt = mb + j - grp[n].m;
> +		n++;
> +	}
> +
> +	return n;
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_PDCP_GROUP_H_ */
> diff --git a/lib/pdcp/version.map b/lib/pdcp/version.map
> new file mode 100644
> index 0000000000..8fa9d5d7cc
> --- /dev/null
> +++ b/lib/pdcp/version.map
> @@ -0,0 +1,13 @@
> +EXPERIMENTAL {
> +	global:
> +
> +	# added in 22.11

Change to 23.03

> +	rte_pdcp_entity_establish;
> +	rte_pdcp_entity_release;
> +	rte_pdcp_entity_suspend;
> +
> +	rte_pdcp_pkt_post_process;
> +	rte_pdcp_pkt_pre_process;
> +
> +	local: *;
> +};
> --
> 2.25.1
  
Anoob Joseph Feb. 13, 2023, 10:59 a.m. UTC | #2
Hi Akhil,

Thanks for the review. Please see inline.

Thanks,
Anoob

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Wednesday, January 18, 2023 9:57 PM
> To: Anoob Joseph <anoobj@marvell.com>; Thomas Monjalon
> <thomas@monjalon.net>; Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>; Bernard
> Iremonger <bernard.iremonger@intel.com>
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>; Mattias Rönnblom
> <mattias.ronnblom@ericsson.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Volodymyr Fialko <vfialko@marvell.com>;
> dev@dpdk.org; Olivier Matz <olivier.matz@6wind.com>
> Subject: RE: [PATCH 2/5] lib: add pdcp protocol
> 
> Hi Anoob,
> 
> Please see inline comments.
> > Subject: [PATCH 2/5] lib: add pdcp protocol
> >
> > Add Packet Data Convergence Protocol (PDCP) processing library.
> >
> > The library is similar to lib_ipsec which provides IPsec processing
> > capabilities in DPDK.
> >
> > PDCP would involve roughly the following options,
> > 1. Transfer of user plane data
> > 2. Transfer of control plane data
> > 3. Header compression
> > 4. Uplink data compression
> > 5. Ciphering and integrity protection
> >
> > PDCP library provides following control path APIs that is used to
> > configure various PDCP entities,
> > 1. rte_pdcp_entity_establish()
> > 2. rte_pdcp_entity_suspend()
> > 3. rte_pdcp_entity_release()
> >
> > PDCP process is split into 2 parts. One before crypto processing
> > (rte_pdcp_pkt_pre_process()) and one after crypto processing
> > (rte_pdcp_pkt_post_process()). Since cryptodev dequeue can return
> crypto
> > operations belonging to multiple entities, rte_pdcp_pkt_crypto_group()
> > is added to help grouping crypto operations belonging to same entity.
> >
> > Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> > Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> > Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> > ---
> >  doc/api/doxy-api-index.md |    3 +-
> >  doc/api/doxy-api.conf.in  |    1 +
> >  lib/meson.build           |    1 +
> >  lib/pdcp/meson.build      |    8 +
> >  lib/pdcp/pdcp_crypto.c    |  240 ++++++++
> >  lib/pdcp/pdcp_crypto.h    |   20 +
> >  lib/pdcp/pdcp_entity.h    |  218 +++++++
> >  lib/pdcp/pdcp_process.c   | 1195
> +++++++++++++++++++++++++++++++++++++
> >  lib/pdcp/pdcp_process.h   |   13 +
> >  lib/pdcp/rte_pdcp.c       |  136 +++++
> >  lib/pdcp/rte_pdcp.h       |  263 ++++++++
> >  lib/pdcp/rte_pdcp_group.h |  133 +++++
> >  lib/pdcp/version.map      |   13 +
> >  13 files changed, 2243 insertions(+), 1 deletion(-)
> >  create mode 100644 lib/pdcp/meson.build
> >  create mode 100644 lib/pdcp/pdcp_crypto.c
> >  create mode 100644 lib/pdcp/pdcp_crypto.h
> >  create mode 100644 lib/pdcp/pdcp_entity.h
> >  create mode 100644 lib/pdcp/pdcp_process.c
> >  create mode 100644 lib/pdcp/pdcp_process.h
> >  create mode 100644 lib/pdcp/rte_pdcp.c
> >  create mode 100644 lib/pdcp/rte_pdcp.h
> >  create mode 100644 lib/pdcp/rte_pdcp_group.h
> >  create mode 100644 lib/pdcp/version.map
> >
> > diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> > index ae4b107240..6014bee079 100644
> > --- a/doc/api/doxy-api-index.md
> > +++ b/doc/api/doxy-api-index.md
> > @@ -126,7 +126,8 @@ The public API headers are grouped by topics:
> >    [eCPRI](@ref rte_ecpri.h),
> >    [L2TPv2](@ref rte_l2tpv2.h),
> >    [PPP](@ref rte_ppp.h),
> > -  [PDCP hdr](@ref rte_pdcp_hdr.h)
> > +  [PDCP hdr](@ref rte_pdcp_hdr.h),
> > +  [PDCP](@ref rte_pdcp.h),
> >
> >  - **QoS**:
> >    [metering](@ref rte_meter.h),
> > diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> > index f0886c3bd1..01314b087e 100644
> > --- a/doc/api/doxy-api.conf.in
> > +++ b/doc/api/doxy-api.conf.in
> > @@ -61,6 +61,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-
> > index.md \
> >                            @TOPDIR@/lib/net \
> >                            @TOPDIR@/lib/pcapng \
> >                            @TOPDIR@/lib/pci \
> > +                          @TOPDIR@/lib/pdcp \
> >                            @TOPDIR@/lib/pdump \
> >                            @TOPDIR@/lib/pipeline \
> >                            @TOPDIR@/lib/port \
> > diff --git a/lib/meson.build b/lib/meson.build
> > index fd55925340..a827006d29 100644
> > --- a/lib/meson.build
> > +++ b/lib/meson.build
> > @@ -63,6 +63,7 @@ libraries = [
> >          'flow_classify', # flow_classify lib depends on pkt framework table lib
> >          'graph',
> >          'node',
> > +        'pdcp', # pdcp lib depends on crypto and security
> >  ]
> >
> >  optional_libs = [
> > diff --git a/lib/pdcp/meson.build b/lib/pdcp/meson.build
> > new file mode 100644
> > index 0000000000..a7f5a408cf
> > --- /dev/null
> > +++ b/lib/pdcp/meson.build
> > @@ -0,0 +1,8 @@
> > +# SPDX-License-Identifier: BSD-3-Clause
> > +# Copyright(C) 2022 Marvell.
> > +#
> Extra # here.
> 
> Do we support compilation on Windows as well?
> Check missing here.
> 

[Anoob] Will add checks in next version.

> > +
> > +sources = files('pdcp_crypto.c', 'pdcp_process.c', 'rte_pdcp.c')
> > +headers = files('rte_pdcp.h')
> 
> Do we need to add the indirect header as well for
> lib/pdcp/rte_pdcp_group.h?

[Anoob] Yes. Will update in next version.

> 
> > +
> > +deps += ['security']
> 
> Crypto not needed as dependency?

[Anoob] Yes. Will include other dependencies as well.

> 
> > diff --git a/lib/pdcp/pdcp_crypto.c b/lib/pdcp/pdcp_crypto.c
> > new file mode 100644
> > index 0000000000..7ffb8a07a7
> > --- /dev/null
> > +++ b/lib/pdcp/pdcp_crypto.c
> > @@ -0,0 +1,240 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#include <rte_crypto.h>
> > +#include <rte_crypto_sym.h>
> > +#include <rte_cryptodev.h>
> > +#include <rte_pdcp.h>
> > +
> > +#include "pdcp_crypto.h"
> > +#include "pdcp_entity.h"
> > +
> > +static int
> > +pdcp_crypto_caps_cipher_verify(uint8_t dev_id, const struct
> > rte_crypto_sym_xform *c_xfrm)
> > +{
> > +	const struct rte_cryptodev_symmetric_capability *cap;
> > +	struct rte_cryptodev_sym_capability_idx cap_idx;
> > +	int ret;
> > +
> > +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> > +	cap_idx.algo.cipher = c_xfrm->cipher.algo;
> > +
> > +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> > +	if (cap == NULL)
> > +		return -1;
> > +
> > +	ret = rte_cryptodev_sym_capability_check_cipher(cap, c_xfrm-
> > >cipher.key.length,
> > +							c_xfrm-
> > >cipher.iv.length);
> > +
> > +	return ret;
> > +}
> > +
> > +static int
> > +pdcp_crypto_caps_auth_verify(uint8_t dev_id, const struct
> > rte_crypto_sym_xform *a_xfrm)
> > +{
> > +	const struct rte_cryptodev_symmetric_capability *cap;
> > +	struct rte_cryptodev_sym_capability_idx cap_idx;
> > +	int ret;
> > +
> > +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
> > +	cap_idx.algo.auth = a_xfrm->auth.algo;
> > +
> > +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> > +	if (cap == NULL)
> > +		return -1;
> > +
> > +	ret = rte_cryptodev_sym_capability_check_auth(cap, a_xfrm-
> > >auth.key.length,
> > +						      a_xfrm-
> > >auth.digest_length,
> > +						      a_xfrm->auth.iv.length);
> > +
> > +	return ret;
> > +}
> > +
> > +static int
> > +pdcp_crypto_xfrm_validate(const struct rte_pdcp_entity_conf *conf,
> > +				 const struct rte_crypto_sym_xform *c_xfrm,
> > +				 const struct rte_crypto_sym_xform
> *a_xfrm,
> > +				 bool is_auth_then_cipher)
> > +{
> > +	uint16_t ciph_iv_len, auth_digest_len, auth_iv_len;
> > +	int ret;
> > +
> > +	/*
> > +	 * Uplink means PDCP entity is configured for transmit. Downlink
> means
> > PDCP entity is
> > +	 * configured for receive. When integrity protection is enabled, PDCP
> > always performs
> > +	 * digest-encrypted or auth-gen-encrypt for uplink (and decrypt-
> auth-
> > verify for downlink).
> > +	 * So for uplink, crypto chain would be auth-cipher while for downlink
> it
> > would be
> > +	 * cipher-auth.
> > +	 *
> > +	 * When integrity protection is not required, xform would be cipher
> only.
> > +	 */
> > +
> > +	if (c_xfrm == NULL)
> > +		return -EINVAL;
> > +
> > +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK) {
> > +
> > +		/* With UPLINK, if auth is enabled, it should be before cipher
> */
> > +		if (a_xfrm != NULL && !is_auth_then_cipher)
> > +			return -EINVAL;
> > +
> > +		/* With UPLINK, cipher operation must be encrypt */
> > +		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT)
> > +			return -EINVAL;
> > +
> > +		/* With UPLINK, auth operation (if present) must be
> generate */
> > +		if (a_xfrm != NULL && a_xfrm->auth.op !=
> > RTE_CRYPTO_AUTH_OP_GENERATE)
> > +			return -EINVAL;
> > +
> > +	} else if (conf->pdcp_xfrm.pkt_dir ==
> RTE_SECURITY_PDCP_DOWNLINK)
> > {
> > +
> > +		/* With DOWNLINK, if auth is enabled, it should be after
> cipher
> > */
> > +		if (a_xfrm != NULL && is_auth_then_cipher)
> > +			return -EINVAL;
> > +
> > +		/* With DOWNLINK, cipher operation must be decrypt */
> > +		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT)
> > +			return -EINVAL;
> > +
> > +		/* With DOWNLINK, auth operation (if present) must be
> verify
> > */
> > +		if (a_xfrm != NULL && a_xfrm->auth.op !=
> > RTE_CRYPTO_AUTH_OP_VERIFY)
> > +			return -EINVAL;
> > +
> > +	} else {
> > +		return -EINVAL;
> > +	}
> > +
> > +	if ((c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_NULL) &&
> > +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_AES_CTR) &&
> > +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3) &&
> > +	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2))
> > +		return -EINVAL;
> > +
> > +	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
> > +		ciph_iv_len = 0;
> > +	else
> > +		ciph_iv_len = PDCP_IV_LENGTH;
> > +
> > +	if (ciph_iv_len != c_xfrm->cipher.iv.length)
> > +		return -EINVAL;
> > +
> > +	if (a_xfrm != NULL) {
> > +		if ((a_xfrm->auth.algo != RTE_CRYPTO_AUTH_NULL) &&
> > +		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_AES_CMAC)
> &&
> > +		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3) &&
> > +		    (a_xfrm->auth.algo !=
> RTE_CRYPTO_AUTH_SNOW3G_UIA2))
> > +			return -EINVAL;
> > +
> > +		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
> > +			auth_digest_len = 0;
> > +		else
> > +			auth_digest_len = 4;
> 
> If we have a macro for IV length, why not for digest also?

[Anoob] Will add macro for digest length.

> Moreover, for NULL integrity, digest length is also 4 with all 0s.
> Refer Annex D.1 in
> https://www.etsi.org/deliver/etsi_ts/133500_133599/133501/15.04.00_60/ts
> _133501v150400p.pdf
> 
> Digest len would be 0 only in case of a_xfrm == NULL

[Anoob] I agree that PDCP requires AUTH NULL to add all 0s. And we satisfy the same by having lib PDCP add zeros. Most of the crypto drivers in DPDK doesn't allow non-zero digest length for NULL auth. The above change was added to make sure the library works well with existing drivers.

> 
> > +
> > +		if (auth_digest_len != a_xfrm->auth.digest_length)
> > +			return -EINVAL;
> > +
> > +		if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) ||
> > +		    (a_xfrm->auth.algo ==
> RTE_CRYPTO_AUTH_SNOW3G_UIA2))
> > +			auth_iv_len = PDCP_IV_LENGTH;
> > +		else
> > +			auth_iv_len = 0;
> > +
> > +		if (a_xfrm->auth.iv.length != auth_iv_len)
> > +			return -EINVAL;
> > +	}
> > +
> > +	if (!rte_cryptodev_is_valid_dev(conf->dev_id))
> > +		return -EINVAL;
> > +
> > +	ret = pdcp_crypto_caps_cipher_verify(conf->dev_id, c_xfrm);
> > +	if (ret)
> > +		return -ENOTSUP;
> > +
> > +	if (a_xfrm != NULL) {
> > +		ret = pdcp_crypto_caps_auth_verify(conf->dev_id, a_xfrm);
> > +		if (ret)
> > +			return -ENOTSUP;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +pdcp_crypto_sess_create(struct rte_pdcp_entity *entity, const struct
> > rte_pdcp_entity_conf *conf)
> > +{
> > +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> > +	struct entity_priv *en_priv;
> > +	bool is_auth_then_cipher;
> > +	int ret;
> > +
> > +	if (entity == NULL || conf == NULL || conf->crypto_xfrm == NULL)
> > +		return -EINVAL;
> > +
> > +	en_priv = entity_priv_get(entity);
> > +
> > +	en_priv->dev_id = conf->dev_id;
> > +
> > +	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> > +		c_xfrm = conf->crypto_xfrm;
> > +		a_xfrm = conf->crypto_xfrm->next;
> > +		is_auth_then_cipher = false;
> > +	} else if (conf->crypto_xfrm->type ==
> > RTE_CRYPTO_SYM_XFORM_AUTH) {
> > +		a_xfrm = conf->crypto_xfrm;
> > +		c_xfrm = conf->crypto_xfrm->next;
> > +		is_auth_then_cipher = true;
> > +	} else {
> > +		return -EINVAL;
> > +	}
> > +
> > +	ret = pdcp_crypto_xfrm_validate(conf, c_xfrm, a_xfrm,
> > is_auth_then_cipher);
> > +	if (ret)
> > +		return ret;
> > +
> > +	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
> > +		c_xfrm->cipher.iv.offset = 0;
> > +	else
> > +		c_xfrm->cipher.iv.offset = PDCP_IV_OFFSET;
> > +
> > +	if (a_xfrm != NULL) {
> > +		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
> > +			a_xfrm->auth.iv.offset = 0;
> > +		else
> > +			if (c_xfrm->cipher.iv.offset)
> > +				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET +
> > PDCP_IV_LENGTH;
> > +			else
> > +				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET;
> > +	}
> > +
> > +	if (conf->sess_mpool == NULL)
> > +		return -EINVAL;
> > +
> > +	en_priv->crypto_sess = rte_cryptodev_sym_session_create(conf-
> > >dev_id, conf->crypto_xfrm,
> > +								conf-
> > >sess_mpool);
> > +	if (en_priv->crypto_sess == NULL) {
> > +		/* API returns positive values as error codes */
> > +		return -rte_errno;
> > +	}
> > +
> > +	rte_cryptodev_sym_session_opaque_data_set(en_priv-
> >crypto_sess,
> > (uint64_t)entity);
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity)
> > +{
> > +	struct entity_priv *en_priv;
> > +
> > +	if (entity == NULL)
> > +		return -EINVAL;
> > +
> > +	en_priv = entity_priv_get(entity);
> > +
> > +	if (en_priv->crypto_sess != NULL) {
> > +		rte_cryptodev_sym_session_free(en_priv->dev_id, en_priv-
> > >crypto_sess);
> > +		en_priv->crypto_sess = NULL;
> > +	}
> > +
> > +	return 0;
> > +}
> > diff --git a/lib/pdcp/pdcp_crypto.h b/lib/pdcp/pdcp_crypto.h
> > new file mode 100644
> > index 0000000000..dc625b35d0
> > --- /dev/null
> > +++ b/lib/pdcp/pdcp_crypto.h
> > @@ -0,0 +1,20 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#ifndef _PDCP_CRYPTO_H_
> > +#define _PDCP_CRYPTO_H_
> > +
> > +#include <rte_crypto.h>
> > +#include <rte_crypto_sym.h>
> > +#include <rte_pdcp.h>
> > +
> > +#define PDCP_IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct
> > rte_crypto_sym_op))
> > +#define PDCP_IV_LENGTH 16
> > +
> > +int pdcp_crypto_sess_create(struct rte_pdcp_entity *entity,
> > +			    const struct rte_pdcp_entity_conf *conf);
> > +
> > +int pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity);
> > +
> > +#endif /* _PDCP_CRYPTO_H_ */
> > diff --git a/lib/pdcp/pdcp_entity.h b/lib/pdcp/pdcp_entity.h
> > new file mode 100644
> > index 0000000000..e312fd4a8c
> > --- /dev/null
> > +++ b/lib/pdcp/pdcp_entity.h
> > @@ -0,0 +1,218 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#ifndef _PDCP_ENTITY_H_
> > +#define _PDCP_ENTITY_H_
> > +
> > +#include <rte_common.h>
> > +#include <rte_crypto_sym.h>
> > +#include <rte_mempool.h>
> > +#include <rte_pdcp.h>
> > +#include <rte_security.h>
> > +
> > +struct entity_priv;
> > +
> > +#define PDCP_PDU_HDR_SIZE_SN_12 (RTE_ALIGN_MUL_CEIL(12, 8) / 8)
> > +#define PDCP_PDU_HDR_SIZE_SN_18 (RTE_ALIGN_MUL_CEIL(18, 8) / 8)
> > +
> > +#define PDCP_GET_SN_12_FROM_COUNT(c) ((c) & 0xfff)
> > +#define PDCP_GET_SN_18_FROM_COUNT(c) ((c) & 0x3ffff)
> > +
> > +#define PDCP_GET_HFN_SN_12_FROM_COUNT(c) (((c) >> 12) & 0xfffff)
> > +#define PDCP_GET_HFN_SN_18_FROM_COUNT(c) (((c) >> 18) & 0x3fff)
> > +
> > +#define PDCP_SET_COUNT_FROM_HFN_SN_12(h, s) ((((h) & 0xfffff) <<
> 12) | ((s)
> > & 0xfff))
> > +#define PDCP_SET_COUNT_FROM_HFN_SN_18(h, s) ((((h) & 0x3fff) <<
> 18) | ((s)
> > & 0x3ffff))
> > +
> > +#define PDCP_SN_12_WINDOW_SZ 0x800
> > +#define PDCP_SN_18_WINDOW_SZ 0x20000
> > +
> > +#define PDCP_SN_12_HFN_MAX ((1 << (32 - 12)) - 1)
> > +#define PDCP_SN_12_HFN_MIN 0
> > +#define PDCP_SN_18_HFN_MAX ((1 << (32 - 18)) - 1)
> > +#define PDCP_SN_18_HFN_MIN 0
> > +
> 
> Can we have common defines for SN-12 and SN-18 and take SN as
> parameter?
> We can have something like this.
> 
> #define PDCP_PDU_HDR_SIZE(sn_size) (RTE_ALIGN_MUL_CEIL((sn_size), 8)
> / 8)
> #define PDCP_GET_SN_FROM_COUNT(c, sn_size) ((c) & ((1<<sn_size)-1))
> #define PDCP_GET_HFN_FROM_COUNT(c, sn_size) (((c) >> sn_size) & ((1 <<
> (32 - sn_size)) - 1))
> #define PDCP_SET_COUNT_FROM_HFN_SN(h, s, sn_size) ((((h) & ((1 << (32
> - sn_size)) - 1)) << sn_size) | ((s) & ((1<<sn_size)-1)))
> #define PDCP_HFN_MAX(sn_size) ((1 << (32 - (sn_size))) - 1)
> #define PDCP_HFN_MIN 0
> 

[Anoob] Agreed. Will replace these macros with generic static inlines. 

> > +/* IV generation function based on the entity configuration */
> > +typedef void (*iv_gen_t)(struct rte_crypto_op *cop, const struct
> entity_priv
> > *en_priv,
> > +			 uint32_t count);
> > +
> > +enum pdcp_pdu_type {
> > +	PDCP_PDU_TYPE_CTRL = 0,
> > +	PDCP_PDU_TYPE_DATA = 1,
> > +};
> > +
> > +enum pdcp_up_ctrl_pdu_type {
> > +	PDCP_UP_CTRL_PDU_TYPE_STATUS_REPORT,
> > +	PDCP_UP_CTRL_PDU_TYPE_ROHC_FEEDBACK,
> > +	PDCP_UP_CTRL_PDU_TYPE_EHC_FEEDBACK,
> > +	PDCP_UP_CRTL_PDU_TYPE_UDC_FEEDBACK
> > +};
> > +
> > +struct entity_state {
> > +	uint32_t rx_next;
> > +	uint32_t tx_next;
> > +	uint32_t rx_deliv;
> > +	uint32_t rx_reord;
> > +};
> > +
> > +union auth_iv_partial {
> > +	/* For AES-CMAC, there is no IV, but message gets prepended */
> > +	struct {
> > +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> > +		uint64_t count : 32;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t direction : 1;
> > +		uint64_t bearer : 5;
> > +		uint64_t zero_40_63 : 24;
> > +#else
> > +		uint64_t count : 32;
> > +		uint64_t bearer : 5;
> > +		uint64_t direction : 1;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t zero_40_63 : 24;
> > +#endif
> > +	} aes_cmac;
> > +	struct {
> > +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> > +		uint64_t count : 32;
> > +		uint64_t zero_37_39 : 3;
> > +		uint64_t bearer : 5;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t rsvd_65_71 : 7;
> > +		uint64_t direction_64 : 1;
> > +		uint64_t rsvd_72_111 : 40;
> > +		uint64_t rsvd_113_119 : 7;
> > +		uint64_t direction_112 : 1;
> > +		uint64_t rsvd_120_127 : 8;
> > +#else
> > +		uint64_t count : 32;
> > +		uint64_t bearer : 5;
> > +		uint64_t zero_37_39 : 3;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t direction_64 : 1;
> > +		uint64_t rsvd_65_71 : 7;
> > +		uint64_t rsvd_72_111 : 40;
> > +		uint64_t direction_112 : 1;
> > +		uint64_t rsvd_113_119 : 7;
> > +		uint64_t rsvd_120_127 : 8;
> > +#endif
> > +	} zs;
> > +	uint64_t u64[2];
> > +};
> > +
> > +union cipher_iv_partial {
> > +	struct {
> > +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> > +		uint64_t count : 32;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t direction : 1;
> > +		uint64_t bearer : 5;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t zero_64_127;
> > +#else
> > +		uint64_t count : 32;
> > +		uint64_t bearer : 5;
> > +		uint64_t direction : 1;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t zero_64_127;
> 
> Can we take zero_64_127 out of #if-else

[Anoob] Agreed. Will address in next version.

> 
> > +#endif
> > +	} aes_ctr;
> > +	struct {
> > +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
> > +		uint64_t count : 32;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t direction : 1;
> > +		uint64_t bearer : 5;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t rsvd_64_127;
> > +#else
> > +		uint64_t count : 32;
> > +		uint64_t bearer : 5;
> > +		uint64_t direction : 1;
> > +		uint64_t zero_38_39 : 2;
> > +		uint64_t zero_40_63 : 24;
> > +
> > +		uint64_t rsvd_64_127;
> > +#endif
> 
> rsvd_64_127 can also be out of #if-else
> 
> > +	} zs;
> > +	uint64_t u64[2];
> > +};
> > +
> > +/*
> > + * Layout of PDCP entity: [rte_pdcp_entity] [entity_priv] [entity_dl/ul]
> 
> If the layout is fixed, can we have 0 length array in rte_pdcp_entity for
> entity_priv and in entity_priv for entity_dl/ul.
> I see that entity_dl/ul are not completely defined. You can define them later
> when they are supported.

[Anoob] The implementation will be part of next version.

> 
> > + */
> > +
> 
> Extra line

[Anoob] The description was not just for the following struct, but the section (3 structures basically). Hence kept a blank line in between.

> 
> > +struct entity_priv {
> > +	/** Crypto sym session. */
> > +	struct rte_cryptodev_sym_session *crypto_sess;
> > +	/** Entity specific IV generation function. */
> > +	iv_gen_t iv_gen;
> > +	/** Pre-prepared auth IV. */
> > +	union auth_iv_partial auth_iv_part;
> > +	/** Pre-prepared cipher IV. */
> > +	union cipher_iv_partial cipher_iv_part;
> > +	/** Entity state variables. */
> > +	struct entity_state state;
> > +	/** Flags. */
> > +	struct {
> > +		/** PDCP PDU has 4 byte MAC-I. */
> > +		uint64_t is_authenticated : 1;
> > +		/** Cipher offset & length in bits. */
> > +		uint64_t is_ciph_in_bits : 1;
> > +		/** Auth offset & length in bits. */
> > +		uint64_t is_auth_in_bits : 1;
> > +		/** Is UL/transmitting PDCP entity */
> > +		uint64_t is_ul_entity : 1;
> > +	} flags;
> > +	/** Crypto op pool. */
> > +	struct rte_mempool *cop_pool;
> > +	/** PDCP header size. */
> > +	uint8_t hdr_sz;
> > +	/** PDCP AAD size. For AES-CMAC, additional message is prepended
> for
> > the operation. */
> > +	uint8_t aad_sz;
> > +	/** Device ID of the device to be used for offload. */
> > +	uint8_t dev_id;
> > +};
> > +
> > +struct entity_priv_dl_part {
> > +	/* TODO - when in-order-delivery is supported, post PDCP packets
> > would need to cached. */
> > +	uint8_t dummy;
> > +};
> > +
> > +struct entity_priv_ul_part {
> > +	/*
> > +	 * TODO - when re-establish is supported, both plain & post PDCP
> > packets would need to be
> > +	 * cached.
> > +	 */
> > +	uint8_t dummy;
> > +};
> > +
> > +static inline struct entity_priv *
> > +entity_priv_get(const struct rte_pdcp_entity *entity) {
> > +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity));
> > +}
> > +
> > +static inline struct entity_priv_dl_part *
> > +entity_dl_part_get(const struct rte_pdcp_entity *entity) {
> > +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) +
> > sizeof(struct entity_priv));
> > +}
> > +
> > +static inline struct entity_priv_ul_part *
> > +entity_ul_part_get(const struct rte_pdcp_entity *entity) {
> > +	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) +
> > sizeof(struct entity_priv));
> > +}
> 
> Above inline functions may not be needed also if we have 0 len arrays.
> 
> > +
> > +static inline int
> > +pdcp_hdr_size_get(enum rte_security_pdcp_sn_size sn_size)
> > +{
> > +	return RTE_ALIGN_MUL_CEIL(sn_size, 8) / 8;
> > +}
> 
> PDCP_PDU_HDR_SIZE is same as this inline function.
> Can we get away with this one?

[Anoob] Removed the macro.

> 
> > +
> > +#endif /* _PDCP_ENTITY_H_ */
> > diff --git a/lib/pdcp/pdcp_process.c b/lib/pdcp/pdcp_process.c
> > new file mode 100644
> > index 0000000000..282cf38ec4
> > --- /dev/null
> > +++ b/lib/pdcp/pdcp_process.c
> > @@ -0,0 +1,1195 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#include <rte_crypto.h>
> > +#include <rte_crypto_sym.h>
> > +#include <rte_cryptodev.h>
> > +#include <rte_memcpy.h>
> > +#include <rte_pdcp.h>
> > +#include <rte_pdcp_hdr.h>
> > +
> > +#include "pdcp_crypto.h"
> > +#include "pdcp_entity.h"
> > +#include "pdcp_process.h"
> > +
> > +#define PDCP_MAC_I_LEN 4
> 
> Can you define it at same place where PDCP_IV_LENGTH is defined and use
> this in xform validate.

[Anoob] Moved to pdcp_crypto.h

> 
> > +
> > +/* Enum of supported algorithms for ciphering */
> > +enum pdcp_cipher_algo {
> > +	PDCP_CIPHER_ALGO_NULL,
> > +	PDCP_CIPHER_ALGO_AES,
> > +	PDCP_CIPHER_ALGO_ZUC,
> > +	PDCP_CIPHER_ALGO_SNOW3G,
> > +	PDCP_CIPHER_ALGO_MAX
> > +};
> > +
> > +/* Enum of supported algorithms for integrity */
> > +enum pdcp_auth_algo {
> > +	PDCP_AUTH_ALGO_NULL,
> > +	PDCP_AUTH_ALGO_AES,
> > +	PDCP_AUTH_ALGO_ZUC,
> > +	PDCP_AUTH_ALGO_SNOW3G,
> > +	PDCP_AUTH_ALGO_MAX
> > +};
> > +
> > +/* IV generation functions based on type of operation (cipher - auth) */
> > +
> > +static void
> > +pdcp_iv_gen_null_null(struct rte_crypto_op *cop, const struct
> entity_priv
> > *en_priv, uint32_t count)
> > +{
> > +	/* No IV required for NULL cipher + NULL auth */
> > +	RTE_SET_USED(cop);
> > +	RTE_SET_USED(en_priv);
> > +	RTE_SET_USED(count);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_null_aes_cmac(struct rte_crypto_op *cop, const struct
> > entity_priv *en_priv,
> > +			  uint32_t count)
> > +{
> > +	struct rte_crypto_sym_op *op = cop->sym;
> > +	struct rte_mbuf *mb = op->m_src;
> > +	uint8_t *m_ptr;
> > +	uint64_t m;
> > +
> > +	/* AES-CMAC requires message to be prepended with info on count
> etc
> > */
> > +
> > +	/* Prepend by 8 bytes to add custom message */
> > +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> > +
> > +	m = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +
> > +	rte_memcpy(m_ptr, &m, 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_null_zs(struct rte_crypto_op *cop, const struct entity_priv
> > *en_priv, uint32_t count)
> > +{
> > +	uint64_t iv_u64[2];
> > +	uint8_t *iv;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64[0], 8);
> > +
> > +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> > +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_aes_ctr_null(struct rte_crypto_op *cop, const struct
> entity_priv
> > *en_priv,
> > +			 uint32_t count)
> > +{
> > +	uint64_t iv_u64[2];
> > +	uint8_t *iv;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	iv_u64[1] = 0;
> > +	rte_memcpy(iv, iv_u64, 16);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_zs_null(struct rte_crypto_op *cop, const struct entity_priv
> > *en_priv, uint32_t count)
> > +{
> > +	uint64_t iv_u64;
> > +	uint8_t *iv;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	iv_u64 = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64, 8);
> > +	rte_memcpy(iv + 8, &iv_u64, 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_zs_zs(struct rte_crypto_op *cop, const struct entity_priv
> > *en_priv, uint32_t count)
> > +{
> > +	uint64_t iv_u64[2];
> > +	uint8_t *iv;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	/* Generating cipher IV */
> > +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64[0], 8);
> > +	rte_memcpy(iv + 8, &iv_u64[0], 8);
> > +
> > +	iv += PDCP_IV_LENGTH;
> > +
> > +	/* Generating auth IV */
> > +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64[0], 8);
> > +
> > +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> > +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_zs_aes_cmac(struct rte_crypto_op *cop, const struct
> entity_priv
> > *en_priv,
> > +			uint32_t count)
> > +{
> > +	struct rte_crypto_sym_op *op = cop->sym;
> > +	struct rte_mbuf *mb = op->m_src;
> > +	uint8_t *m_ptr, *iv;
> > +	uint64_t iv_u64[2];
> > +	uint64_t m;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64[0], 8);
> > +	rte_memcpy(iv + 8, &iv_u64[0], 8);
> > +
> > +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> > +	m = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(m_ptr, &m, 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_aes_ctr_aes_cmac(struct rte_crypto_op *cop, const struct
> > entity_priv *en_priv,
> > +			    uint32_t count)
> > +{
> > +	struct rte_crypto_sym_op *op = cop->sym;
> > +	struct rte_mbuf *mb = op->m_src;
> > +	uint8_t *m_ptr, *iv;
> > +	uint64_t iv_u64[2];
> > +	uint64_t m;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	iv_u64[1] = 0;
> > +	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
> > +
> > +	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
> > +	m = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(m_ptr, &m, 8);
> > +}
> > +
> > +static void
> > +pdcp_iv_gen_aes_ctr_zs(struct rte_crypto_op *cop, const struct
> entity_priv
> > *en_priv, uint32_t count)
> > +{
> > +	uint64_t iv_u64[2];
> > +	uint8_t *iv;
> > +
> > +	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
> > +
> > +	iv_u64[0] = en_priv->cipher_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	iv_u64[1] = 0;
> > +	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
> > +
> > +	iv += PDCP_IV_LENGTH;
> > +
> > +	iv_u64[0] = en_priv->auth_iv_part.u64[0] |
> > ((uint64_t)(rte_cpu_to_be_32(count)));
> > +	rte_memcpy(iv, &iv_u64[0], 8);
> > +
> > +	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
> > +	rte_memcpy(iv + 8, &iv_u64[1], 8);
> > +}
> > +
> > +static int
> > +pdcp_crypto_xfrm_get(const struct rte_pdcp_entity_conf *conf, struct
> > rte_crypto_sym_xform **c_xfrm,
> > +		     struct rte_crypto_sym_xform **a_xfrm)
> > +{
> > +	*c_xfrm = NULL;
> > +	*a_xfrm = NULL;
> > +
> > +	if (conf->crypto_xfrm == NULL)
> > +		return -EINVAL;
> > +
> > +	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> > +		*c_xfrm = conf->crypto_xfrm;
> > +		*a_xfrm = conf->crypto_xfrm->next;
> > +	} else if (conf->crypto_xfrm->type ==
> > RTE_CRYPTO_SYM_XFORM_AUTH) {
> > +		*a_xfrm = conf->crypto_xfrm;
> > +		*c_xfrm = conf->crypto_xfrm->next;
> > +	} else {
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +pdcp_iv_gen_func_set(struct rte_pdcp_entity *entity, const struct
> > rte_pdcp_entity_conf *conf)
> > +{
> > +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> > +	enum rte_security_pdcp_direction direction;
> > +	enum pdcp_cipher_algo ciph_algo;
> > +	enum pdcp_auth_algo auth_algo;
> > +	struct entity_priv *en_priv;
> > +	int ret;
> > +
> > +	en_priv = entity_priv_get(entity);
> > +
> > +	direction = conf->pdcp_xfrm.pkt_dir;
> > +	if (conf->reverse_iv_direction)
> > +		direction = !direction;
> > +
> > +	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
> > +	if (ret)
> > +		return ret;
> > +
> > +	if (c_xfrm == NULL)
> > +		return -EINVAL;
> > +
> > +	memset(&en_priv->auth_iv_part, 0, sizeof(en_priv->auth_iv_part));
> > +	memset(&en_priv->cipher_iv_part, 0, sizeof(en_priv-
> >cipher_iv_part));
> > +
> > +	switch (c_xfrm->cipher.algo) {
> > +	case RTE_CRYPTO_CIPHER_NULL:
> > +		ciph_algo = PDCP_CIPHER_ALGO_NULL;
> > +		break;
> > +	case RTE_CRYPTO_CIPHER_AES_CTR:
> > +		ciph_algo = PDCP_CIPHER_ALGO_AES;
> > +		en_priv->cipher_iv_part.aes_ctr.bearer = conf-
> > >pdcp_xfrm.bearer;
> > +		en_priv->cipher_iv_part.aes_ctr.direction = direction;
> > +		break;
> > +	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
> > +		ciph_algo = PDCP_CIPHER_ALGO_SNOW3G;
> > +		en_priv->cipher_iv_part.zs.bearer = conf-
> >pdcp_xfrm.bearer;
> > +		en_priv->cipher_iv_part.zs.direction = direction;
> > +		break;
> > +	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
> > +		ciph_algo = PDCP_CIPHER_ALGO_ZUC;
> > +		en_priv->cipher_iv_part.zs.bearer = conf-
> >pdcp_xfrm.bearer;
> > +		en_priv->cipher_iv_part.zs.direction = direction;
> > +		break;
> > +	default:
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	if (a_xfrm != NULL) {
> > +		switch (a_xfrm->auth.algo) {
> > +		case RTE_CRYPTO_AUTH_NULL:
> > +			auth_algo = PDCP_AUTH_ALGO_NULL;
> > +			break;
> > +		case RTE_CRYPTO_AUTH_AES_CMAC:
> > +			auth_algo = PDCP_AUTH_ALGO_AES;
> > +			en_priv->auth_iv_part.aes_cmac.bearer = conf-
> > >pdcp_xfrm.bearer;
> > +			en_priv->auth_iv_part.aes_cmac.direction =
> direction;
> > +			break;
> > +		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
> > +			auth_algo = PDCP_AUTH_ALGO_SNOW3G;
> > +			en_priv->auth_iv_part.zs.bearer = conf-
> > >pdcp_xfrm.bearer;
> > +			en_priv->auth_iv_part.zs.direction_64 = direction;
> > +			en_priv->auth_iv_part.zs.direction_112 = direction;
> > +			break;
> > +		case RTE_CRYPTO_AUTH_ZUC_EIA3:
> > +			auth_algo = PDCP_AUTH_ALGO_ZUC;
> > +			en_priv->auth_iv_part.zs.bearer = conf-
> > >pdcp_xfrm.bearer;
> > +			en_priv->auth_iv_part.zs.direction_64 = direction;
> > +			en_priv->auth_iv_part.zs.direction_112 = direction;
> > +			break;
> > +		default:
> > +			return -ENOTSUP;
> > +		}
> > +	} else {
> > +		auth_algo = PDCP_AUTH_ALGO_NULL;
> > +	}
> > +
> > +	static const iv_gen_t
> > iv_gen_map[PDCP_CIPHER_ALGO_MAX][PDCP_AUTH_ALGO_MAX] = {
> > +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_NULL] =
> > pdcp_iv_gen_null_null,
> > +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_AES] =
> > pdcp_iv_gen_null_aes_cmac,
> > +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_SNOW3G]
> =
> > pdcp_iv_gen_null_zs,
> > +		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_ZUC] =
> > pdcp_iv_gen_null_zs,
> > +
> > +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_NULL] =
> > pdcp_iv_gen_aes_ctr_null,
> > +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_AES] =
> > pdcp_iv_gen_aes_ctr_aes_cmac,
> > +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_SNOW3G] =
> > pdcp_iv_gen_aes_ctr_zs,
> > +		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_ZUC] =
> > pdcp_iv_gen_aes_ctr_zs,
> > +
> > +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_NULL]
> =
> > pdcp_iv_gen_zs_null,
> > +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_AES] =
> > pdcp_iv_gen_zs_aes_cmac,
> > +
> > 	[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_SNOW3G] =
> > pdcp_iv_gen_zs_zs,
> > +		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_ZUC]
> =
> > pdcp_iv_gen_zs_zs,
> > +
> > +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_NULL] =
> > pdcp_iv_gen_zs_null,
> > +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_AES] =
> > pdcp_iv_gen_zs_aes_cmac,
> > +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_SNOW3G]
> =
> > pdcp_iv_gen_zs_zs,
> > +		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_ZUC] =
> > pdcp_iv_gen_zs_zs,
> > +	};
> > +
> > +	en_priv->iv_gen = iv_gen_map[ciph_algo][auth_algo];
> > +
> > +	return 0;
> > +}
> > +
> > +static inline void
> > +cop_prepare(const struct entity_priv *en_priv, struct rte_mbuf *mb,
> struct
> > rte_crypto_op *cop,
> > +	    uint8_t data_offset, uint32_t count, const bool is_auth)
> > +{
> > +	const struct rte_crypto_op cop_init = {
> > +		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> > +		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
> > +		.sess_type = RTE_CRYPTO_OP_WITH_SESSION,
> > +	};
> > +	struct rte_crypto_sym_op *op;
> > +	uint32_t pkt_len;
> > +
> > +	const uint8_t ciph_shift = 3 * en_priv->flags.is_ciph_in_bits;
> > +	const uint8_t auth_shift = 3 * en_priv->flags.is_auth_in_bits;
> > +
> > +	op = cop->sym;
> > +	cop->raw = cop_init.raw;
> > +	op->m_src = mb;
> > +	op->m_dst = mb;
> > +
> > +	/* Set IV */
> > +	en_priv->iv_gen(cop, en_priv, count);
> > +
> > +	/* Prepare op */
> > +	pkt_len = rte_pktmbuf_pkt_len(mb);
> > +	op->cipher.data.offset = data_offset << ciph_shift;
> > +	op->cipher.data.length = (pkt_len - data_offset) << ciph_shift;
> > +
> > +	if (is_auth) {
> > +		op->auth.data.offset = 0;
> > +		op->auth.data.length = (pkt_len - PDCP_MAC_I_LEN) <<
> > auth_shift;
> > +		op->auth.digest.data = rte_pktmbuf_mtod_offset(mb,
> uint8_t
> > *,
> > +							       (pkt_len -
> > PDCP_MAC_I_LEN));
> > +	}
> > +
> > +	__rte_crypto_sym_op_attach_sym_session(op, en_priv-
> >crypto_sess);
> > +}
> > +
> > +static inline bool
> > +pdcp_pre_process_uplane_sn_12_ul_set_sn(struct entity_priv *en_priv,
> struct
> > rte_mbuf *mb,
> > +					uint32_t *count)
> > +{
> > +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> > +	const uint8_t hdr_sz = en_priv->hdr_sz;
> > +	uint32_t sn;
> > +
> > +	/* Prepend PDU header */
> > +	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_12_hdr
> > *)rte_pktmbuf_prepend(mb, hdr_sz);
> > +	if (unlikely(pdu_hdr == NULL))
> > +		return false;
> > +
> > +	/* Update sequence num in the PDU header */
> > +	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> > __ATOMIC_RELAXED);
> > +	sn = PDCP_GET_SN_12_FROM_COUNT(*count);
> > +
> > +	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
> > +	pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
> > +	pdu_hdr->sn_7_0 = (sn & 0xff);
> > +	pdu_hdr->r = 0;
> > +	return true;
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	uint16_t nb_cop;
> > +	uint32_t count;
> > +	int i;
> > +
> > +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> > +
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	if (en_priv->flags.is_authenticated) {
> > +		for (i = 0; i < nb_cop; i++) {
> > +			if (unlikely(rte_pktmbuf_append(mb[i],
> > PDCP_MAC_I_LEN) == NULL))
> > +				goto cop_free;
> > +			if
> > (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
> > +
> > &count)))
> > +				goto cop_free;
> > +			cop_prepare(en_priv, mb[i], cop[i], data_offset,
> count,
> > true);
> > +		}
> > +	} else {
> > +		for (i = 0; i < nb_cop; i++) {
> > +			if
> > (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
> > +
> > &count)))
> > +				goto cop_free;
> > +			cop_prepare(en_priv, mb[i], cop[i], data_offset,
> count,
> > false);
> > +		}
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +	return nb_cop;
> > +cop_free:
> > +	/* Using mempool API since crypto API is not providing bulk free */
> > +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop
> - i);
> > +	*nb_err = num - i;
> > +	return i;
> > +}
> > +
> > +static inline bool
> > +pdcp_pre_process_uplane_sn_18_ul_set_sn(struct entity_priv *en_priv,
> struct
> > rte_mbuf *mb,
> > +					uint32_t *count)
> > +{
> > +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> > +	const uint8_t hdr_sz = en_priv->hdr_sz;
> > +	uint32_t sn;
> > +
> > +	/* Prepend PDU header */
> > +	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_18_hdr
> > *)rte_pktmbuf_prepend(mb, hdr_sz);
> > +	if (unlikely(pdu_hdr == NULL))
> > +		return false;
> > +
> > +	/* Update sequence num in the PDU header */
> > +	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> > __ATOMIC_RELAXED);
> > +	sn = PDCP_GET_SN_18_FROM_COUNT(*count);
> > +
> > +	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
> > +	pdu_hdr->sn_17_16 = ((sn & 0x30000) >> 16);
> > +	pdu_hdr->sn_15_8 = ((sn & 0xff00) >> 8);
> > +	pdu_hdr->sn_7_0 = (sn & 0xff);
> > +	pdu_hdr->r = 0;
> > +
> > +	return true;
> > +}
> > +
> > +static inline uint16_t
> > +pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	uint16_t nb_cop;
> > +	uint32_t count;
> > +	int i;
> > +
> > +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> > +
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	if (en_priv->flags.is_authenticated) {
> > +		for (i = 0; i < nb_cop; i++) {
> > +			if (unlikely(rte_pktmbuf_append(mb[i],
> > PDCP_MAC_I_LEN) == NULL))
> > +				goto cop_free;
> > +			if
> > (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
> > +
> > &count)))
> > +				goto cop_free;
> > +			cop_prepare(en_priv, mb[i], cop[i], data_offset,
> count,
> > true);
> > +		}
> > +	} else {
> > +		for (i = 0; i < nb_cop; i++) {
> > +			if
> > (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
> > +
> > &count)))
> > +				goto cop_free;
> > +			cop_prepare(en_priv, mb[i], cop[i], data_offset,
> count,
> > false);
> > +		}
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +	return nb_cop;
> > +
> > +cop_free:
> > +	/* Using mempool API since crypto API is not providing bulk free */
> > +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop
> - i);
> > +	*nb_err = num - i;
> > +	return i;
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> > +	uint32_t count, sn;
> > +	uint16_t nb_cop;
> > +	int i;
> > +
> > +	const uint8_t hdr_sz = en_priv->hdr_sz;
> > +	const uint8_t data_offset = hdr_sz + en_priv->aad_sz;
> > +
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	for (i = 0; i < nb_cop; i++) {
> > +		/* Prepend PDU header */
> > +		pdu_hdr = (struct rte_pdcp_cp_data_pdu_sn_12_hdr
> > *)rte_pktmbuf_prepend(mb[i],
> > +
> > 	       hdr_sz);
> > +		if (unlikely(pdu_hdr == NULL))
> > +			goto cop_free;
> > +		if (unlikely(rte_pktmbuf_append(mb[i], PDCP_MAC_I_LEN)
> ==
> > NULL))
> > +			goto cop_free;
> > +
> > +		/* Update sequence number in the PDU header */
> > +		count = __atomic_fetch_add(&en_priv->state.tx_next, 1,
> > __ATOMIC_RELAXED);
> > +		sn = PDCP_GET_SN_12_FROM_COUNT(count);
> > +
> > +		pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
> > +		pdu_hdr->sn_7_0 = (sn & 0xff);
> > +		pdu_hdr->r = 0;
> > +
> > +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> true);
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +	return nb_cop;
> > +
> > +cop_free:
> > +	/* Using mempool API since crypto API is not providing bulk free */
> > +	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop
> - i);
> > +	*nb_err = num - i;
> > +	return i;
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_12_ul(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err_ret)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> > +	int i, nb_success = 0, nb_err = 0;
> > +	struct rte_mbuf *err_mb[num];
> > +	struct rte_mbuf *mb;
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> > +			err_mb[nb_err++] = mb;
> > +			continue;
> > +		}
> > +
> > +		if (hdr_trim_sz)
> > +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +
> > +		out_mb[nb_success++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_18_ul(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err_ret)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> > +	int i, nb_success = 0, nb_err = 0;
> > +	struct rte_mbuf *err_mb[num];
> > +	struct rte_mbuf *mb;
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> > +			err_mb[nb_err++] = mb;
> > +			continue;
> > +		}
> > +
> > +		if (hdr_trim_sz)
> > +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +
> > +		out_mb[nb_success++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_cplane_sn_12_ul(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err_ret)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	const uint32_t hdr_trim_sz = en_priv->aad_sz;
> > +	int i, nb_success = 0, nb_err = 0;
> > +	struct rte_mbuf *mb, *err_mb[num];
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
> > +			err_mb[nb_err++] = mb;
> > +			continue;
> > +		}
> > +
> > +		if (hdr_trim_sz)
> > +			rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +
> > +		out_mb[nb_success++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static inline int
> > +pdcp_sn_18_count_get(const struct rte_pdcp_entity *entity, int32_t rsn,
> > uint32_t *count)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	uint32_t rhfn, rx_deliv;
> > +
> > +	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv,
> > __ATOMIC_RELAXED);
> > +	rhfn = PDCP_GET_HFN_SN_18_FROM_COUNT(rx_deliv);
> > +
> > +	if (rsn < (int32_t)(PDCP_GET_SN_18_FROM_COUNT(rx_deliv) -
> > PDCP_SN_18_WINDOW_SZ)) {
> > +		if (unlikely(rhfn == PDCP_SN_18_HFN_MAX))
> > +			return -ERANGE;
> > +		rhfn += 1;
> > +	} else if ((uint32_t)rsn >=
> (PDCP_GET_SN_18_FROM_COUNT(rx_deliv) +
> > PDCP_SN_18_WINDOW_SZ)) {
> > +		if (unlikely(rhfn == PDCP_SN_18_HFN_MIN))
> > +			return -ERANGE;
> > +		rhfn -= 1;
> > +	}
> > +
> > +	*count = PDCP_SET_COUNT_FROM_HFN_SN_18(rhfn, rsn);
> > +
> > +	return 0;
> > +}
> > +
> > +static inline int
> > +pdcp_sn_12_count_get(const struct rte_pdcp_entity *entity, int32_t rsn,
> > uint32_t *count)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	uint32_t rhfn, rx_deliv;
> > +
> > +	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv,
> > __ATOMIC_RELAXED);
> > +	rhfn = PDCP_GET_HFN_SN_12_FROM_COUNT(rx_deliv);
> > +
> > +	if (rsn < (int32_t)(PDCP_GET_SN_12_FROM_COUNT(rx_deliv) -
> > PDCP_SN_12_WINDOW_SZ)) {
> > +		if (unlikely(rhfn == PDCP_SN_12_HFN_MAX))
> > +			return -ERANGE;
> > +		rhfn += 1;
> > +	} else if ((uint32_t)rsn >=
> (PDCP_GET_SN_12_FROM_COUNT(rx_deliv) +
> > PDCP_SN_12_WINDOW_SZ)) {
> > +		if (unlikely(rhfn == PDCP_SN_12_HFN_MIN))
> > +			return -ERANGE;
> > +		rhfn -= 1;
> > +	}
> > +
> > +	*count = PDCP_SET_COUNT_FROM_HFN_SN_12(rhfn, rsn);
> > +
> > +	return 0;
> > +}
> > +
> > +static inline uint16_t
> > +pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity
> > *entity, struct rte_mbuf *mb[],
> > +				       struct rte_crypto_op *cop[], uint16_t
> num,
> > uint16_t *nb_err,
> > +				       const bool is_integ_protected)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> > +	uint16_t nb_cop;
> > +	int32_t rsn = 0;
> > +	uint32_t count;
> > +	int i;
> > +
> > +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> > +
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	for (i = 0; i < nb_cop; i++) {
> > +
> > +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> > rte_pdcp_up_data_pdu_sn_12_hdr *);
> > +
> > +		/* Check for PDU type */
> > +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> > +			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr-
> >sn_7_0));
> > +		else
> > +			rte_panic("TODO: Control PDU not handled");
> > +
> > +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> > +			break;
> > +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> > is_integ_protected);
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +
> > +	return nb_cop;
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				    struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop,
> num,
> > nb_err, true);
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop,
> num,
> > nb_err, false);
> > +}
> > +
> > +static inline uint16_t
> > +pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity
> > *entity, struct rte_mbuf *mb[],
> > +				       struct rte_crypto_op *cop[], uint16_t
> num,
> > uint16_t *nb_err,
> > +				       const bool is_integ_protected)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> > +	uint16_t nb_cop;
> > +	int32_t rsn = 0;
> > +	uint32_t count;
> > +	int i;
> > +
> > +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	for (i = 0; i < nb_cop; i++) {
> > +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> > rte_pdcp_up_data_pdu_sn_18_hdr *);
> > +
> > +		/* Check for PDU type */
> > +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> > +			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr-
> >sn_15_8
> > << 8) |
> > +			       (pdu_hdr->sn_7_0));
> > +		else
> > +			rte_panic("TODO: Control PDU not handled");
> > +
> > +		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
> > +			break;
> > +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> > is_integ_protected);
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +
> > +	return nb_cop;
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				    struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop,
> num,
> > nb_err, true);
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop,
> num,
> > nb_err, false);
> > +}
> > +
> > +static uint16_t
> > +pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity
> *entity,
> > struct rte_mbuf *mb[],
> > +				 struct rte_crypto_op *cop[], uint16_t num,
> > uint16_t *nb_err)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> > +	uint16_t nb_cop;
> > +	uint32_t count;
> > +	int32_t rsn;
> > +	int i;
> > +
> > +	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
> > +
> > +	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool,
> > RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
> > +					  num);
> > +
> > +	for (i = 0; i < nb_cop; i++) {
> > +		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct
> > rte_pdcp_cp_data_pdu_sn_12_hdr *);
> > +		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> > +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> > +			break;
> > +		cop_prepare(en_priv, mb[i], cop[i], data_offset, count,
> true);
> > +	}
> > +
> > +	*nb_err = num - nb_cop;
> > +	return nb_cop;
> > +}
> > +
> > +static inline bool
> > +pdcp_post_process_update_entity_state(const struct rte_pdcp_entity
> *entity,
> > +				      const uint32_t count)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +
> > +	if (count < __atomic_load_n(&en_priv->state.rx_deliv,
> > __ATOMIC_RELAXED))
> > +		return false;
> > +
> > +	/* t-Reordering timer is not supported - SDU will be delivered
> > immediately.
> > +	 * Update RX_DELIV to the COUNT value of the first PDCP SDU which
> > has not
> > +	 * been delivered to upper layers
> > +	 */
> > +	__atomic_store_n(&en_priv->state.rx_deliv, (count + 1),
> > __ATOMIC_RELAXED);
> > +
> > +	if (count >= __atomic_load_n(&en_priv->state.rx_next,
> > __ATOMIC_RELAXED))
> > +		__atomic_store_n(&en_priv->state.rx_next, (count + 1),
> > __ATOMIC_RELAXED);
> > +
> > +	return true;
> > +}
> > +
> > +static inline uint16_t
> > +pdcp_post_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity
> > *entity,
> > +					struct rte_mbuf *in_mb[],
> > +					struct rte_mbuf *out_mb[],
> > +					uint16_t num, uint16_t *nb_err_ret,
> > +					const bool is_integ_protected)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
> > +	int i, nb_success = 0, nb_err = 0, rsn = 0;
> > +	const uint32_t aad_sz = en_priv->aad_sz;
> > +	struct rte_mbuf *err_mb[num];
> > +	struct rte_mbuf *mb;
> > +	uint32_t count;
> > +
> > +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> > +			goto error;
> > +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> > rte_pdcp_up_data_pdu_sn_12_hdr *,
> > +						  aad_sz);
> > +
> > +		/* Check for PDU type */
> > +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> > +			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr-
> >sn_7_0));
> > +		else
> > +			rte_panic("Control PDU should not be received");
> > +
> > +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> > +			goto error;
> > +
> > +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> > count)))
> > +			goto error;
> > +
> > +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +		if (is_integ_protected)
> > +			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> > +		out_mb[nb_success++] = mb;
> > +		continue;
> > +
> > +error:
> > +		err_mb[nb_err++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity
> *entity,
> > +				     struct rte_mbuf *in_mb[],
> > +				     struct rte_mbuf *out_mb[],
> > +				     uint16_t num, uint16_t *nb_err)
> > +{
> > +	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb,
> > out_mb, num, nb_err, true);
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_12_dl(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err)
> > +{
> > +	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb,
> > out_mb, num, nb_err, false);
> > +}
> > +
> > +static inline uint16_t
> > +pdcp_post_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity
> > *entity,
> > +					struct rte_mbuf *in_mb[],
> > +					struct rte_mbuf *out_mb[],
> > +					uint16_t num, uint16_t *nb_err_ret,
> > +					const bool is_integ_protected)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
> > +	const uint32_t aad_sz = en_priv->aad_sz;
> > +	int i, nb_success = 0, nb_err = 0;
> > +	struct rte_mbuf *mb, *err_mb[num];
> > +	int32_t rsn = 0;
> > +	uint32_t count;
> > +
> > +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> > +			goto error;
> > +
> > +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> > rte_pdcp_up_data_pdu_sn_18_hdr *,
> > +						  aad_sz);
> > +
> > +		/* Check for PDU type */
> > +		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
> > +			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr-
> >sn_15_8
> > << 8) |
> > +			       (pdu_hdr->sn_7_0));
> > +		else
> > +			rte_panic("Control PDU should not be received");
> > +
> > +		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
> > +			goto error;
> > +
> > +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> > count)))
> > +			goto error;
> > +
> > +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +		if (is_integ_protected)
> > +			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> > +		out_mb[nb_success++] = mb;
> > +		continue;
> > +
> > +error:
> > +		err_mb[nb_err++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity
> *entity,
> > +				     struct rte_mbuf *in_mb[],
> > +				     struct rte_mbuf *out_mb[],
> > +				     uint16_t num, uint16_t *nb_err)
> > +{
> > +	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb,
> > out_mb, num, nb_err, true);
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_uplane_sn_18_dl(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err)
> > +{
> > +	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb,
> > out_mb, num, nb_err, false);
> > +}
> > +
> > +static uint16_t
> > +pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity
> *entity,
> > +				  struct rte_mbuf *in_mb[],
> > +				  struct rte_mbuf *out_mb[],
> > +				  uint16_t num, uint16_t *nb_err_ret)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
> > +	const uint32_t aad_sz = en_priv->aad_sz;
> > +	int i, nb_success = 0, nb_err = 0;
> > +	struct rte_mbuf *err_mb[num];
> > +	struct rte_mbuf *mb;
> > +	uint32_t count;
> > +	int32_t rsn;
> > +
> > +	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
> > +
> > +	for (i = 0; i < num; i++) {
> > +		mb = in_mb[i];
> > +		if (unlikely(mb->ol_flags &
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
> > +			goto error;
> > +
> > +		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct
> > rte_pdcp_cp_data_pdu_sn_12_hdr *,
> > +						  aad_sz);
> > +		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
> > +
> > +		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
> > +			goto error;
> > +
> > +		if (unlikely(!pdcp_post_process_update_entity_state(entity,
> > count)))
> > +			goto error;
> > +
> > +		rte_pktmbuf_adj(mb, hdr_trim_sz);
> > +		rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
> > +		out_mb[nb_success++] = mb;
> > +		continue;
> > +
> > +error:
> > +		err_mb[nb_err++] = mb;
> > +	}
> > +
> > +	if (unlikely(nb_err != 0))
> > +		rte_memcpy(&out_mb[nb_success], err_mb, nb_err *
> > sizeof(struct rte_mbuf *));
> > +
> > +	*nb_err_ret = nb_err;
> > +	return nb_success;
> > +}
> > +
> > +static int
> > +pdcp_pre_process_func_set(struct rte_pdcp_entity *entity, const struct
> > rte_pdcp_entity_conf *conf)
> > +{
> > +	struct entity_priv *en_priv = entity_priv_get(entity);
> > +
> > +	entity->pre_process = NULL;
> > +	entity->post_process = NULL;
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> > RTE_SECURITY_PDCP_MODE_CONTROL) &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> > +		entity->pre_process = pdcp_pre_process_cplane_sn_12_ul;
> > +		entity->post_process =
> pdcp_post_process_cplane_sn_12_ul;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> > RTE_SECURITY_PDCP_MODE_CONTROL) &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)) {
> > +		entity->pre_process = pdcp_pre_process_cplane_sn_12_dl;
> > +		entity->post_process =
> pdcp_post_process_cplane_sn_12_dl;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> > +		entity->pre_process = pdcp_pre_process_uplane_sn_12_ul;
> > +		entity->post_process =
> pdcp_post_process_uplane_sn_12_ul;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
> > +		entity->pre_process = pdcp_pre_process_uplane_sn_18_ul;
> > +		entity->post_process =
> pdcp_post_process_uplane_sn_18_ul;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> &&
> > +	    (en_priv->flags.is_authenticated)) {
> > +		entity->pre_process =
> pdcp_pre_process_uplane_sn_12_dl_ip;
> > +		entity->post_process =
> > pdcp_post_process_uplane_sn_12_dl_ip;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> &&
> > +	    (!en_priv->flags.is_authenticated)) {
> > +		entity->pre_process = pdcp_pre_process_uplane_sn_12_dl;
> > +		entity->post_process =
> pdcp_post_process_uplane_sn_12_dl;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> &&
> > +	    (en_priv->flags.is_authenticated)) {
> > +		entity->pre_process =
> pdcp_pre_process_uplane_sn_18_dl_ip;
> > +		entity->post_process =
> > pdcp_post_process_uplane_sn_18_dl_ip;
> > +	}
> > +
> > +	if ((conf->pdcp_xfrm.domain ==
> RTE_SECURITY_PDCP_MODE_DATA)
> > &&
> > +	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18)
> &&
> > +	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> &&
> > +	    (!en_priv->flags.is_authenticated)) {
> > +		entity->pre_process = pdcp_pre_process_uplane_sn_18_dl;
> > +		entity->post_process =
> pdcp_post_process_uplane_sn_18_dl;
> > +	}
> > +
> > +	if (entity->pre_process == NULL || entity->post_process == NULL)
> > +		return -ENOTSUP;
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +pdcp_entity_priv_populate(struct entity_priv *en_priv, const struct
> > rte_pdcp_entity_conf *conf)
> > +{
> > +	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
> > +	int ret;
> > +
> > +	/**
> > +	 * flags.is_authenticated
> > +	 *
> > +	 * MAC-I would be added in case of control plane packets and when
> > authentication
> > +	 * transform is not NULL.
> > +	 */
> > +
> > +	if (conf->pdcp_xfrm.domain ==
> > RTE_SECURITY_PDCP_MODE_CONTROL)
> > +		en_priv->flags.is_authenticated = 1;
> > +
> > +	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
> > +	if (ret)
> > +		return ret;
> > +
> > +	if (a_xfrm != NULL)
> > +		en_priv->flags.is_authenticated = 1;
> > +
> > +	/**
> > +	 * flags.is_ciph_in_bits
> > +	 *
> > +	 * For ZUC & SNOW3G cipher algos, offset & length need to be
> provided
> > in bits.
> > +	 */
> > +
> > +	if ((c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
> ||
> > +	    (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_ZUC_EEA3))
> > +		en_priv->flags.is_ciph_in_bits = 1;
> > +
> > +	/**
> > +	 * flags.is_auth_in_bits
> > +	 *
> > +	 * For ZUC & SNOW3G authentication algos, offset & length need to
> be
> > provided in bits.
> > +	 */
> > +
> > +	if (a_xfrm != NULL) {
> > +		if ((a_xfrm->auth.algo ==
> RTE_CRYPTO_AUTH_SNOW3G_UIA2)
> > ||
> > +		    (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3))
> > +			en_priv->flags.is_auth_in_bits = 1;
> > +	}
> > +
> > +	/**
> > +	 * flags.is_ul_entity
> > +	 *
> > +	 * Indicate whether the entity is UL/transmitting PDCP entity.
> > +	 */
> > +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
> > +		en_priv->flags.is_ul_entity = 1;
> > +
> > +	/**
> > +	 * hdr_sz
> > +	 *
> > +	 * PDCP header size of the entity
> > +	 */
> > +	en_priv->hdr_sz = pdcp_hdr_size_get(conf->pdcp_xfrm.sn_size);
> > +
> > +	/**
> > +	 * aad_sz
> > +	 *
> > +	 * For AES-CMAC, additional message is prepended for processing.
> Need
> > to be trimmed after
> > +	 * crypto processing is done.
> > +	 */
> > +	if (a_xfrm != NULL && a_xfrm->auth.algo ==
> > RTE_CRYPTO_AUTH_AES_CMAC)
> > +		en_priv->aad_sz = 8;
> > +	else
> > +		en_priv->aad_sz = 0;
> > +
> > +	return 0;
> > +}
> > +
> > +int
> > +pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct
> > rte_pdcp_entity_conf *conf)
> > +{
> > +	struct entity_priv *en_priv;
> > +	int ret;
> > +
> > +	if (entity == NULL || conf == NULL)
> > +		return -EINVAL;
> > +
> > +	en_priv = entity_priv_get(entity);
> > +
> > +	ret = pdcp_iv_gen_func_set(entity, conf);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = pdcp_entity_priv_populate(en_priv, conf);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = pdcp_pre_process_func_set(entity, conf);
> > +	if (ret)
> > +		return ret;
> > +
> > +	return 0;
> > +}
> > diff --git a/lib/pdcp/pdcp_process.h b/lib/pdcp/pdcp_process.h
> > new file mode 100644
> > index 0000000000..c92ab34c40
> > --- /dev/null
> > +++ b/lib/pdcp/pdcp_process.h
> > @@ -0,0 +1,13 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#ifndef _PDCP_PROCESS_H_
> > +#define _PDCP_PROCESS_H_
> > +
> > +#include <rte_pdcp.h>
> > +
> > +int
> > +pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct
> > rte_pdcp_entity_conf *conf);
> > +
> > +#endif /* _PDCP_PROCESS_H_ */
> > diff --git a/lib/pdcp/rte_pdcp.c b/lib/pdcp/rte_pdcp.c
> > new file mode 100644
> > index 0000000000..b1533971c2
> > --- /dev/null
> > +++ b/lib/pdcp/rte_pdcp.c
> > @@ -0,0 +1,136 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#include <rte_pdcp.h>
> > +#include <rte_malloc.h>
> > +
> > +#include "pdcp_crypto.h"
> > +#include "pdcp_entity.h"
> > +#include "pdcp_process.h"
> > +
> > +static int
> > +pdcp_entity_size_get(const struct rte_pdcp_entity_conf *conf)
> > +{
> > +	int size;
> > +
> > +	size = sizeof(struct rte_pdcp_entity) + sizeof(struct entity_priv);
> > +
> > +	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
> > +		size += sizeof(struct entity_priv_dl_part);
> > +	else if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
> > +		size += sizeof(struct entity_priv_ul_part);
> > +	else
> > +		return -EINVAL;
> > +
> > +	return RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE);
> > +}
> > +
> > +struct rte_pdcp_entity *
> > +rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf)
> > +{
> > +	struct rte_pdcp_entity *entity = NULL;
> > +	struct entity_priv *en_priv;
> > +	int ret;
> > +
> > +	if (conf == NULL || conf->cop_pool == NULL) {
> > +		rte_errno = -EINVAL;
> > +		return NULL;
> > +	}
> > +
> > +	if (conf->pdcp_xfrm.en_ordering || conf-
> > >pdcp_xfrm.remove_duplicates || conf->is_slrb ||
> > +	    conf->en_sec_offload) {
> > +		rte_errno = -ENOTSUP;
> > +		return NULL;
> > +	}
> > +
> > +	/*
> > +	 * 6.3.2 PDCP SN
> > +	 * Length: 12 or 18 bits as indicated in table 6.3.2-1. The length of the
> > PDCP SN is
> > +	 * configured by upper layers (pdcp-SN-SizeUL, pdcp-SN-SizeDL, or sl-
> > PDCP-SN-Size in
> > +	 * TS 38.331 [3])
> > +	 */
> > +	if ((conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_12)
> &&
> > +	    (conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_18)) {
> > +		rte_errno = -ENOTSUP;
> > +		return NULL;
> > +	}
> > +
> > +	if (conf->pdcp_xfrm.hfn || conf->pdcp_xfrm.hfn_threshold) {
> > +		rte_errno = -EINVAL;
> > +		return NULL;
> > +	}
> > +
> > +	entity = rte_zmalloc_socket("pdcp_entity",
> pdcp_entity_size_get(conf),
> > +				    RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> > +	if (entity == NULL) {
> > +		rte_errno = -ENOMEM;
> > +		return NULL;
> > +	}
> > +
> > +	en_priv = entity_priv_get(entity);
> > +
> > +	en_priv->state.rx_deliv = conf->count;
> > +	en_priv->state.tx_next = conf->count;
> > +	en_priv->cop_pool = conf->cop_pool;
> > +
> > +	/* Setup crypto session */
> > +	ret = pdcp_crypto_sess_create(entity, conf);
> > +	if (ret)
> > +		goto entity_free;
> > +
> > +	ret = pdcp_process_func_set(entity, conf);
> > +	if (ret)
> > +		goto crypto_sess_destroy;
> > +
> > +	return entity;
> > +
> > +crypto_sess_destroy:
> > +	pdcp_crypto_sess_destroy(entity);
> > +entity_free:
> > +	rte_free(entity);
> > +	rte_errno = ret;
> > +	return NULL;
> > +}
> > +
> > +int
> > +rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity, struct
> rte_mbuf
> > *out_mb[])
> > +{
> > +	int ret;
> > +
> > +	if (pdcp_entity == NULL)
> > +		return -EINVAL;
> > +
> > +	/* Teardown crypto sessions */
> > +	ret = pdcp_crypto_sess_destroy(pdcp_entity);
> > +	if (ret)
> > +		return ret;
> > +
> > +	rte_free(pdcp_entity);
> > +
> > +	RTE_SET_USED(out_mb);
> > +	return 0;
> > +}
> > +
> > +int
> > +rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
> > +			struct rte_mbuf *out_mb[])
> > +{
> > +	struct entity_priv *en_priv;
> > +
> > +	if (pdcp_entity == NULL)
> > +		return -EINVAL;
> > +
> > +	en_priv = entity_priv_get(pdcp_entity);
> > +
> > +	if (en_priv->flags.is_ul_entity) {
> > +		en_priv->state.tx_next = 0;
> > +	} else {
> > +		en_priv->state.rx_next = 0;
> > +		en_priv->state.rx_deliv = 0;
> > +	}
> > +
> > +	RTE_SET_USED(out_mb);
> > +
> > +	return 0;
> > +}
> > diff --git a/lib/pdcp/rte_pdcp.h b/lib/pdcp/rte_pdcp.h
> > new file mode 100644
> > index 0000000000..b6c7f32c05
> > --- /dev/null
> > +++ b/lib/pdcp/rte_pdcp.h
> > @@ -0,0 +1,263 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#ifndef _RTE_PDCP_H_
> > +#define _RTE_PDCP_H_
> > +
> > +/**
> > + * @file rte_pdcp.h
> > + *
> > + * RTE PDCP support.
> > + *
> > + * librte_pdcp provides a framework for PDCP protocol processing.
> > + */
> > +
> > +#include <rte_compat.h>
> > +#include <rte_common.h>
> > +#include <rte_errno.h>
> > +#include <rte_mempool.h>
> > +#include <rte_security.h>
> 
> Remove header file which is not needed.
> I do not see use of rte_errno.h
> I believe rte_common.h and rte_compat.h are also not needed.

[Anoob] Generally following the philosophy of "include-what-you-own". 

rte_errno.h - The API would set rte_errno in certain cases. Also, certain APIs return error numbers in case of failures. Since this is grey area, I'll follow what you suggest. Please suggest.
rte_common.h - For generic & basic defines. Included it only to address build failure that we had early on. There are other similar rte_ files which include rte_common.h
rte_compat.h - For 'experimental' etc. Also please check patch

commit 1094dd940ec0cc4e3ce2c5cd94807350855a17f9
Author: David Marchand <david.marchand@redhat.com>
Date:   Fri Oct 28 14:13:39 2022 +0200

    cleanup compat header inclusions
    
    With symbols going though experimental/stable stages, we accumulated
    a lot of discrepancies about inclusion of the rte_compat.h header.
    
    Some headers are including it where unneeded, while others rely on
    implicit inclusion.

> 
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +/* Forward declarations */
> > +struct rte_pdcp_entity;
> > +
> > +/* PDCP pre-process function based on entity configuration */
> > +typedef uint16_t (*rte_pdcp_pre_p_t)(const struct rte_pdcp_entity
> *entity,
> > +				     struct rte_mbuf *mb[],
> > +				     struct rte_crypto_op *cop[],
> > +				     uint16_t num, uint16_t *nb_err);
> > +
> > +/* PDCP post-process function based on entity configuration */
> > +typedef uint16_t (*rte_pdcp_post_p_t)(const struct rte_pdcp_entity
> *entity,
> > +				      struct rte_mbuf *in_mb[],
> > +				      struct rte_mbuf *out_mb[],
> > +				      uint16_t num, uint16_t *nb_err);
> > +
> > +/**
> > + * PDCP entity.
> > + */
> > +struct rte_pdcp_entity {
> > +	/** Entity specific pre-process handle. */
> > +	rte_pdcp_pre_p_t pre_process;
> > +	/** Entity specific post-process handle. */
> > +	rte_pdcp_post_p_t post_process;
> > +	/**
> > +	 * PDCP entities may hold packets for purposes of in-order delivery
> (in
> > +	 * case of receiving PDCP entity) and re-transmission (in case of
> > +	 * transmitting PDCP entity).
> > +	 *
> > +	 * For receiving PDCP entity, it may hold packets when in-order
> > +	 * delivery is enabled. The packets would be cached until either a
> > +	 * packet that completes the sequence arrives or when discard timer
> > +	 * expires.
> > +	 *
> > +	 * When post-processing of PDCP packet which completes a
> sequence is
> > +	 * done, the API may return more packets than enqueued.
> Application is
> > +	 * expected to provide *rte_pdcp_pkt_post_process()* with
> *out_mb*
> > +	 * which can hold maximum number of packets which may be
> returned.
> > +	 *
> > +	 * For transmitting PDCP entity, during re-establishment (5.1.2),
> > +	 * entity may be required to perform re-transmission of the buffers
> > +	 * after applying new ciphering & integrity algorithms. For performing
> > +	 * crypto operation, *rte_pdcp_entity_re_establish()* would return
> as
> > +	 * many crypto_ops as the ones cached.
> > +	 */
> > +	uint16_t max_pkt_cache;
> > +	/** User area for saving application data. */
> > +	uint64_t user_area[2];
> > +} __rte_cache_aligned;
> > +
> > +/**
> > + * PDCP entity configuration to be used for establishing an entity.
> > + */
> > +struct rte_pdcp_entity_conf {
> > +	/** PDCP transform for the entity. */
> > +	struct rte_security_pdcp_xform pdcp_xfrm;
> > +	/** Crypto transform applicable for the entity. */
> > +	struct rte_crypto_sym_xform *crypto_xfrm;
> > +	/** Mempool for crypto symmetric session. */
> > +	struct rte_mempool *sess_mpool;
> > +	/** Crypto op pool.*/
> > +	struct rte_mempool *cop_pool;
> > +	/**
> > +	 * 32 bit count value (HFN + SN) to be used for the first packet.
> > +	 * pdcp_xfrm.hfn would be ignored as the HFN would be derived
> from
> > this value.
> > +	 */
> > +	uint32_t count;
> > +	/** Indicate whether the PDCP entity belongs to Side Link Radio
> Bearer.
> > */
> > +	bool is_slrb;
> > +	/** Enable security offload on the device specified. */
> > +	bool en_sec_offload;
> > +	/** Enable non-atomic usage of entity. */
> > +	bool en_non_atomic;
> > +	/** Device on which security/crypto session need to be created. */
> > +	uint8_t dev_id;
> > +	/** Reverse direction during IV generation. Can be used to simulate
> UE
> > crypto processing.*/
> > +	bool reverse_iv_direction;
> > +};
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * 5.1.1 PDCP entity establishment
> > + *
> > + * Establish PDCP entity based on provided input configuration.
> > + *
> > + * @param conf
> > + *   Parameters to be used for initializing PDCP entity object.
> > + * @return
> > + *   - Valid handle if success
> > + *   - NULL in case of failure. rte_errno will be set to error code
> > + */
> > +__rte_experimental
> > +struct rte_pdcp_entity *
> > +rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * 5.1.3 PDCP entity release
> > + *
> > + * Release PDCP entity.
> > + *
> > + * For UL/transmitting PDCP entity, all stored PDCP SDUs would be
> dropped.
> > + * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned
> in
> > + * *out_mb* buffer. The buffer should be large enough to hold all cached
> > + * packets in the entity.
> > + *
> > + * @param pdcp_entity
> > + *   Pointer to the PDCP entity to be released.
> > + * @param[out] out_mb
> > + *   The address of an array that can hold up to
> > *rte_pdcp_entity.max_pkt_cache*
> > + *   pointers to *rte_mbuf* structures.
> > + * @return
> > + *   -  0: Success and no cached packets to return
> > + *   - >0: Success and the number of packets returned in out_mb
> > + *   - <0: Error code in case of failures
> > + */
> > +__rte_experimental
> > +int
> > +rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity,
> > +			struct rte_mbuf *out_mb[]);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * 5.1.4 PDCP entity suspend
> > + *
> > + * Suspend PDCP entity.
> > + *
> > + * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned
> in
> > + * *out_mb* buffer. The buffer should be large enough to hold all cached
> > + * packets in the entity.
> > + *
> > + * For UL/transmitting PDCP entity, *out_mb* buffer would be unused.
> > + *
> > + * @param pdcp_entity
> > + *   Pointer to the PDCP entity to be suspended.
> > + * @param[out] out_mb
> > + *   The address of an array that can hold up to
> > *rte_pdcp_entity.max_pkt_cache*
> > + *   pointers to *rte_mbuf* structures.
> > + * @return
> > + *   -  0: Success and no cached packets to return
> > + *   - >0: Success and the number of packets returned in out_mb
> > + *   - <0: Error code in case of failures
> > + */
> > +__rte_experimental
> > +int
> > +rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
> > +			struct rte_mbuf *out_mb[]);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * For input mbufs and given PDCP entity pre-process the mbufs and
> prepare
> > + * crypto ops that can be enqueued to the cryptodev associated with
> given
> > + * session. Only error packets would be moved returned in the input
> buffer,
> > + * *mb*, and it is the responsibility of the application to free the same.
> > + *
> > + * @param entity
> > + *   Pointer to the *rte_pdcp_entity* object the packets belong to.
> > + * @param[in, out] mb
> > + *   The address of an array of *num* pointers to *rte_mbuf* structures
> > + *   which contain the input packets. Any error packets would be returned
> in the
> > + *   same buffer.
> > + * @param[out] cop
> > + *   The address of an array that can hold up to *num* pointers to
> > + *   *rte_crypto_op* structures. Crypto ops would be allocated by
> > + *   ``rte_pdcp_pkt_pre_process`` API.
> > + * @param num
> > + *   The maximum number of packets to process.
> > + * @param[out] nb_err
> > + *   Pointer to return the number of error packets returned in *mb*
> > + * @return
> > + *   Count of crypto_ops prepared
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_pdcp_pkt_pre_process(const struct rte_pdcp_entity *entity,
> > +			 struct rte_mbuf *mb[], struct rte_crypto_op *cop[],
> > +			 uint16_t num, uint16_t *nb_err)
> > +{
> > +	return entity->pre_process(entity, mb, cop, num, nb_err);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice
> > + *
> > + * For input mbufs and given PDCP entity, perform PDCP post-processing
> of the
> > + * mbufs.
> > + *
> > + * Input mbufs are the ones retrieved from crypto_ops dequeued from
> > cryptodev
> > + * and grouped by *rte_pdcp_pkt_crypto_group()*.
> > + *
> > + * The post-processed packets would be returned in the *out_mb*
> buffer.
> > + * The resultant mbufs would be grouped into success packets and error
> > packets.
> > + * Error packets would be grouped in the end of the array and it is the
> > + * responsibility of the application to handle the same.
> > + *
> > + * When in-order delivery is enabled, PDCP entity may buffer packets and
> would
> > + * deliver packets only when all prior packets have been post-processed.
> That
> > + * would result in returning more/less packets than enqueued.
> > + *
> > + * @param entity
> > + *   Pointer to the *rte_pdcp_entity* object the packets belong to.
> > + * @param in_mb
> > + *   The address of an array of *num* pointers to *rte_mbuf* structures.
> > + * @param[out] out_mb
> > + *   The address of an array of *num* pointers to *rte_mbuf* structures
> > + *   to output packets after PDCP post-processing.
> > + * @param num
> > + *   The maximum number of packets to process.
> > + * @param[out] nb_err
> > + *   The number of error packets returned in *out_mb* buffer.
> > + * @return
> > + *   Count of packets returned in *out_mb* buffer.
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_pdcp_pkt_post_process(const struct rte_pdcp_entity *entity,
> > +			  struct rte_mbuf *in_mb[],
> > +			  struct rte_mbuf *out_mb[],
> > +			  uint16_t num, uint16_t *nb_err)
> > +{
> > +	return entity->post_process(entity, in_mb, out_mb, num, nb_err);
> > +}
> > +
> > +#include <rte_pdcp_group.h>
> > +
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif /* _RTE_PDCP_H_ */
> > diff --git a/lib/pdcp/rte_pdcp_group.h b/lib/pdcp/rte_pdcp_group.h
> > new file mode 100644
> > index 0000000000..2c01c19d4e
> > --- /dev/null
> > +++ b/lib/pdcp/rte_pdcp_group.h
> > @@ -0,0 +1,133 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2022 Marvell.
> > + */
> > +
> > +#ifndef _RTE_PDCP_GROUP_H_
> > +#define _RTE_PDCP_GROUP_H_
> > +
> > +/**
> > + * @file rte_pdcp_group.h
> > + *
> > + * RTE PDCP grouping support.
> > + * It is not recommended to include this file directly, include <rte_pdcp.h>
> > + * instead.
> > + * Provides helper functions to process completed crypto-ops and group
> > related
> > + * packets by sessions they belong to.
> > + */
> > +
> > +#include <rte_common.h>
> > +#include <rte_crypto.h>
> > +#include <rte_cryptodev.h>
> > +#include <rte_security.h>
> 
> Remove header files which are not needed.
> 
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +/**
> > + * Group packets belonging to same PDCP entity.
> > + */
> > +struct rte_pdcp_group {
> > +	union {
> > +		uint64_t val;
> > +		void *ptr;
> > +	} id; /**< Grouped by value */
> > +	struct rte_mbuf **m;  /**< Start of the group */
> > +	uint32_t cnt;         /**< Number of entries in the group */
> > +	int32_t rc;           /**< Status code associated with the group */
> > +};
> > +
> > +/**
> > + * Take crypto-op as an input and extract pointer to related PDCP entity.
> > + * @param cop
> > + *   The address of an input *rte_crypto_op* structure.
> > + * @return
> > + *   The pointer to the related *rte_pdcp_entity* structure.
> > + */
> > +static inline struct rte_pdcp_entity *
> > +rte_pdcp_en_from_cop(const struct rte_crypto_op *cop)
> > +{
> > +	void *sess = cop->sym[0].session;
> > +
> > +	if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
> > +		return (struct rte_pdcp_entity *)(uintptr_t)
> > +			rte_security_session_opaque_data_get(sess);
> > +	} else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
> > +		return (struct rte_pdcp_entity *)(uintptr_t)
> > +
> 	rte_cryptodev_sym_session_opaque_data_get(sess);
> > +	}
> 
> This patchset is not supporting security sessions, so it would be better to
> return NULL for that.
> Moreover, we can directly call
> rte_cryptodev_sym_session_opaque_data_get(cop->sym[0].session)
> From rte_pdcp_pkt_crypto_group. No need to have a wrapper.

[Anoob] Agreed. Will address in next version.

> 
> > +
> > +	return NULL;
> > +}
> > +
> > +/**
> > + * Take as input completed crypto ops, extract related mbufs and group
> them
> > by
> > + * *rte_pdcp_entity* they belong to. Mbuf for which the crypto
> operation has
> > + * failed would be flagged using
> *RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED*
> > flag
> > + * in rte_mbuf.ol_flags. The crypto_ops would be freed after the
> grouping.
> > + *
> > + * Note that application must ensure only crypto-ops prepared by
> lib_pdcp is
> > + * provided back to @see rte_pdcp_pkt_crypto_group().
> > + *
> > + * @param cop
> > + *   The address of an array of *num* pointers to the input
> *rte_crypto_op*
> > + *   structures.
> > + * @param[out] mb
> > + *   The address of an array of *num* pointers to output *rte_mbuf*
> structures.
> > + * @param[out] grp
> > + *   The address of an array of *num* to output *rte_pdcp_group*
> structures.
> > + * @param num
> > + *   The maximum number of crypto-ops to process.
> > + * @return
> > + *   Number of filled elements in *grp* array.
> > + *
> > + */
> > +static inline uint16_t
> > +rte_pdcp_pkt_crypto_group(struct rte_crypto_op *cop[], struct
> rte_mbuf
> > *mb[],
> > +			  struct rte_pdcp_group grp[], uint16_t num)
> > +{
> > +	uint32_t i, j = 0, n = 0;
> > +	void *ns, *ps = NULL;
> > +	struct rte_mbuf *m;
> > +
> > +	for (i = 0; i != num; i++) {
> > +		m = cop[i]->sym[0].m_src;
> > +		ns = cop[i]->sym[0].session;
> > +
> > +		m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
> > +		if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
> > +			m->ol_flags |=
> > RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
> > +
> > +		/* Different entity */
> > +		if (ps != ns) {
> > +
> > +			/* Finalize open group and start a new one */
> > +			if (ps != NULL) {
> > +				grp[n].cnt = mb + j - grp[n].m;
> > +				n++;
> > +			}
> > +
> > +			/* Start new group */
> > +			grp[n].m = mb + j;
> > +			ps = ns;
> > +			grp[n].id.ptr =	rte_pdcp_en_from_cop(cop[i]);
> > +		}
> > +
> > +		mb[j++] = m;
> > +		rte_crypto_op_free(cop[i]);
> > +	}
> > +
> > +	/* Finalize last group */
> > +	if (ps != NULL) {
> > +		grp[n].cnt = mb + j - grp[n].m;
> > +		n++;
> > +	}
> > +
> > +	return n;
> > +}
> > +
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif /* _RTE_PDCP_GROUP_H_ */
> > diff --git a/lib/pdcp/version.map b/lib/pdcp/version.map
> > new file mode 100644
> > index 0000000000..8fa9d5d7cc
> > --- /dev/null
> > +++ b/lib/pdcp/version.map
> > @@ -0,0 +1,13 @@
> > +EXPERIMENTAL {
> > +	global:
> > +
> > +	# added in 22.11
> 
> Change to 23.03
> 
> > +	rte_pdcp_entity_establish;
> > +	rte_pdcp_entity_release;
> > +	rte_pdcp_entity_suspend;
> > +
> > +	rte_pdcp_pkt_post_process;
> > +	rte_pdcp_pkt_pre_process;
> > +
> > +	local: *;
> > +};
> > --
> > 2.25.1
  

Patch

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index ae4b107240..6014bee079 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -126,7 +126,8 @@  The public API headers are grouped by topics:
   [eCPRI](@ref rte_ecpri.h),
   [L2TPv2](@ref rte_l2tpv2.h),
   [PPP](@ref rte_ppp.h),
-  [PDCP hdr](@ref rte_pdcp_hdr.h)
+  [PDCP hdr](@ref rte_pdcp_hdr.h),
+  [PDCP](@ref rte_pdcp.h),
 
 - **QoS**:
   [metering](@ref rte_meter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index f0886c3bd1..01314b087e 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -61,6 +61,7 @@  INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/net \
                           @TOPDIR@/lib/pcapng \
                           @TOPDIR@/lib/pci \
+                          @TOPDIR@/lib/pdcp \
                           @TOPDIR@/lib/pdump \
                           @TOPDIR@/lib/pipeline \
                           @TOPDIR@/lib/port \
diff --git a/lib/meson.build b/lib/meson.build
index fd55925340..a827006d29 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -63,6 +63,7 @@  libraries = [
         'flow_classify', # flow_classify lib depends on pkt framework table lib
         'graph',
         'node',
+        'pdcp', # pdcp lib depends on crypto and security
 ]
 
 optional_libs = [
diff --git a/lib/pdcp/meson.build b/lib/pdcp/meson.build
new file mode 100644
index 0000000000..a7f5a408cf
--- /dev/null
+++ b/lib/pdcp/meson.build
@@ -0,0 +1,8 @@ 
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2022 Marvell.
+#
+
+sources = files('pdcp_crypto.c', 'pdcp_process.c', 'rte_pdcp.c')
+headers = files('rte_pdcp.h')
+
+deps += ['security']
diff --git a/lib/pdcp/pdcp_crypto.c b/lib/pdcp/pdcp_crypto.c
new file mode 100644
index 0000000000..7ffb8a07a7
--- /dev/null
+++ b/lib/pdcp/pdcp_crypto.c
@@ -0,0 +1,240 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include <rte_crypto.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+#include <rte_pdcp.h>
+
+#include "pdcp_crypto.h"
+#include "pdcp_entity.h"
+
+static int
+pdcp_crypto_caps_cipher_verify(uint8_t dev_id, const struct rte_crypto_sym_xform *c_xfrm)
+{
+	const struct rte_cryptodev_symmetric_capability *cap;
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	int ret;
+
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	cap_idx.algo.cipher = c_xfrm->cipher.algo;
+
+	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
+	if (cap == NULL)
+		return -1;
+
+	ret = rte_cryptodev_sym_capability_check_cipher(cap, c_xfrm->cipher.key.length,
+							c_xfrm->cipher.iv.length);
+
+	return ret;
+}
+
+static int
+pdcp_crypto_caps_auth_verify(uint8_t dev_id, const struct rte_crypto_sym_xform *a_xfrm)
+{
+	const struct rte_cryptodev_symmetric_capability *cap;
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	int ret;
+
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = a_xfrm->auth.algo;
+
+	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
+	if (cap == NULL)
+		return -1;
+
+	ret = rte_cryptodev_sym_capability_check_auth(cap, a_xfrm->auth.key.length,
+						      a_xfrm->auth.digest_length,
+						      a_xfrm->auth.iv.length);
+
+	return ret;
+}
+
+static int
+pdcp_crypto_xfrm_validate(const struct rte_pdcp_entity_conf *conf,
+				 const struct rte_crypto_sym_xform *c_xfrm,
+				 const struct rte_crypto_sym_xform *a_xfrm,
+				 bool is_auth_then_cipher)
+{
+	uint16_t ciph_iv_len, auth_digest_len, auth_iv_len;
+	int ret;
+
+	/*
+	 * Uplink means PDCP entity is configured for transmit. Downlink means PDCP entity is
+	 * configured for receive. When integrity protection is enabled, PDCP always performs
+	 * digest-encrypted or auth-gen-encrypt for uplink (and decrypt-auth-verify for downlink).
+	 * So for uplink, crypto chain would be auth-cipher while for downlink it would be
+	 * cipher-auth.
+	 *
+	 * When integrity protection is not required, xform would be cipher only.
+	 */
+
+	if (c_xfrm == NULL)
+		return -EINVAL;
+
+	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK) {
+
+		/* With UPLINK, if auth is enabled, it should be before cipher */
+		if (a_xfrm != NULL && !is_auth_then_cipher)
+			return -EINVAL;
+
+		/* With UPLINK, cipher operation must be encrypt */
+		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+			return -EINVAL;
+
+		/* With UPLINK, auth operation (if present) must be generate */
+		if (a_xfrm != NULL && a_xfrm->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE)
+			return -EINVAL;
+
+	} else if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) {
+
+		/* With DOWNLINK, if auth is enabled, it should be after cipher */
+		if (a_xfrm != NULL && is_auth_then_cipher)
+			return -EINVAL;
+
+		/* With DOWNLINK, cipher operation must be decrypt */
+		if (c_xfrm->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT)
+			return -EINVAL;
+
+		/* With DOWNLINK, auth operation (if present) must be verify */
+		if (a_xfrm != NULL && a_xfrm->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY)
+			return -EINVAL;
+
+	} else {
+		return -EINVAL;
+	}
+
+	if ((c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_NULL) &&
+	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_AES_CTR) &&
+	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3) &&
+	    (c_xfrm->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2))
+		return -EINVAL;
+
+	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+		ciph_iv_len = 0;
+	else
+		ciph_iv_len = PDCP_IV_LENGTH;
+
+	if (ciph_iv_len != c_xfrm->cipher.iv.length)
+		return -EINVAL;
+
+	if (a_xfrm != NULL) {
+		if ((a_xfrm->auth.algo != RTE_CRYPTO_AUTH_NULL) &&
+		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_AES_CMAC) &&
+		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3) &&
+		    (a_xfrm->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2))
+			return -EINVAL;
+
+		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
+			auth_digest_len = 0;
+		else
+			auth_digest_len = 4;
+
+		if (auth_digest_len != a_xfrm->auth.digest_length)
+			return -EINVAL;
+
+		if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) ||
+		    (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2))
+			auth_iv_len = PDCP_IV_LENGTH;
+		else
+			auth_iv_len = 0;
+
+		if (a_xfrm->auth.iv.length != auth_iv_len)
+			return -EINVAL;
+	}
+
+	if (!rte_cryptodev_is_valid_dev(conf->dev_id))
+		return -EINVAL;
+
+	ret = pdcp_crypto_caps_cipher_verify(conf->dev_id, c_xfrm);
+	if (ret)
+		return -ENOTSUP;
+
+	if (a_xfrm != NULL) {
+		ret = pdcp_crypto_caps_auth_verify(conf->dev_id, a_xfrm);
+		if (ret)
+			return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+int
+pdcp_crypto_sess_create(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
+{
+	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
+	struct entity_priv *en_priv;
+	bool is_auth_then_cipher;
+	int ret;
+
+	if (entity == NULL || conf == NULL || conf->crypto_xfrm == NULL)
+		return -EINVAL;
+
+	en_priv = entity_priv_get(entity);
+
+	en_priv->dev_id = conf->dev_id;
+
+	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		c_xfrm = conf->crypto_xfrm;
+		a_xfrm = conf->crypto_xfrm->next;
+		is_auth_then_cipher = false;
+	} else if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		a_xfrm = conf->crypto_xfrm;
+		c_xfrm = conf->crypto_xfrm->next;
+		is_auth_then_cipher = true;
+	} else {
+		return -EINVAL;
+	}
+
+	ret = pdcp_crypto_xfrm_validate(conf, c_xfrm, a_xfrm, is_auth_then_cipher);
+	if (ret)
+		return ret;
+
+	if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+		c_xfrm->cipher.iv.offset = 0;
+	else
+		c_xfrm->cipher.iv.offset = PDCP_IV_OFFSET;
+
+	if (a_xfrm != NULL) {
+		if (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
+			a_xfrm->auth.iv.offset = 0;
+		else
+			if (c_xfrm->cipher.iv.offset)
+				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET + PDCP_IV_LENGTH;
+			else
+				a_xfrm->auth.iv.offset = PDCP_IV_OFFSET;
+	}
+
+	if (conf->sess_mpool == NULL)
+		return -EINVAL;
+
+	en_priv->crypto_sess = rte_cryptodev_sym_session_create(conf->dev_id, conf->crypto_xfrm,
+								conf->sess_mpool);
+	if (en_priv->crypto_sess == NULL) {
+		/* API returns positive values as error codes */
+		return -rte_errno;
+	}
+
+	rte_cryptodev_sym_session_opaque_data_set(en_priv->crypto_sess, (uint64_t)entity);
+
+	return 0;
+}
+
+int
+pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity)
+{
+	struct entity_priv *en_priv;
+
+	if (entity == NULL)
+		return -EINVAL;
+
+	en_priv = entity_priv_get(entity);
+
+	if (en_priv->crypto_sess != NULL) {
+		rte_cryptodev_sym_session_free(en_priv->dev_id, en_priv->crypto_sess);
+		en_priv->crypto_sess = NULL;
+	}
+
+	return 0;
+}
diff --git a/lib/pdcp/pdcp_crypto.h b/lib/pdcp/pdcp_crypto.h
new file mode 100644
index 0000000000..dc625b35d0
--- /dev/null
+++ b/lib/pdcp/pdcp_crypto.h
@@ -0,0 +1,20 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#ifndef _PDCP_CRYPTO_H_
+#define _PDCP_CRYPTO_H_
+
+#include <rte_crypto.h>
+#include <rte_crypto_sym.h>
+#include <rte_pdcp.h>
+
+#define PDCP_IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
+#define PDCP_IV_LENGTH 16
+
+int pdcp_crypto_sess_create(struct rte_pdcp_entity *entity,
+			    const struct rte_pdcp_entity_conf *conf);
+
+int pdcp_crypto_sess_destroy(struct rte_pdcp_entity *entity);
+
+#endif /* _PDCP_CRYPTO_H_ */
diff --git a/lib/pdcp/pdcp_entity.h b/lib/pdcp/pdcp_entity.h
new file mode 100644
index 0000000000..e312fd4a8c
--- /dev/null
+++ b/lib/pdcp/pdcp_entity.h
@@ -0,0 +1,218 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#ifndef _PDCP_ENTITY_H_
+#define _PDCP_ENTITY_H_
+
+#include <rte_common.h>
+#include <rte_crypto_sym.h>
+#include <rte_mempool.h>
+#include <rte_pdcp.h>
+#include <rte_security.h>
+
+struct entity_priv;
+
+#define PDCP_PDU_HDR_SIZE_SN_12 (RTE_ALIGN_MUL_CEIL(12, 8) / 8)
+#define PDCP_PDU_HDR_SIZE_SN_18 (RTE_ALIGN_MUL_CEIL(18, 8) / 8)
+
+#define PDCP_GET_SN_12_FROM_COUNT(c) ((c) & 0xfff)
+#define PDCP_GET_SN_18_FROM_COUNT(c) ((c) & 0x3ffff)
+
+#define PDCP_GET_HFN_SN_12_FROM_COUNT(c) (((c) >> 12) & 0xfffff)
+#define PDCP_GET_HFN_SN_18_FROM_COUNT(c) (((c) >> 18) & 0x3fff)
+
+#define PDCP_SET_COUNT_FROM_HFN_SN_12(h, s) ((((h) & 0xfffff) << 12) | ((s) & 0xfff))
+#define PDCP_SET_COUNT_FROM_HFN_SN_18(h, s) ((((h) & 0x3fff) << 18) | ((s) & 0x3ffff))
+
+#define PDCP_SN_12_WINDOW_SZ 0x800
+#define PDCP_SN_18_WINDOW_SZ 0x20000
+
+#define PDCP_SN_12_HFN_MAX ((1 << (32 - 12)) - 1)
+#define PDCP_SN_12_HFN_MIN 0
+#define PDCP_SN_18_HFN_MAX ((1 << (32 - 18)) - 1)
+#define PDCP_SN_18_HFN_MIN 0
+
+/* IV generation function based on the entity configuration */
+typedef void (*iv_gen_t)(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
+			 uint32_t count);
+
+enum pdcp_pdu_type {
+	PDCP_PDU_TYPE_CTRL = 0,
+	PDCP_PDU_TYPE_DATA = 1,
+};
+
+enum pdcp_up_ctrl_pdu_type {
+	PDCP_UP_CTRL_PDU_TYPE_STATUS_REPORT,
+	PDCP_UP_CTRL_PDU_TYPE_ROHC_FEEDBACK,
+	PDCP_UP_CTRL_PDU_TYPE_EHC_FEEDBACK,
+	PDCP_UP_CRTL_PDU_TYPE_UDC_FEEDBACK
+};
+
+struct entity_state {
+	uint32_t rx_next;
+	uint32_t tx_next;
+	uint32_t rx_deliv;
+	uint32_t rx_reord;
+};
+
+union auth_iv_partial {
+	/* For AES-CMAC, there is no IV, but message gets prepended */
+	struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+		uint64_t count : 32;
+		uint64_t zero_38_39 : 2;
+		uint64_t direction : 1;
+		uint64_t bearer : 5;
+		uint64_t zero_40_63 : 24;
+#else
+		uint64_t count : 32;
+		uint64_t bearer : 5;
+		uint64_t direction : 1;
+		uint64_t zero_38_39 : 2;
+		uint64_t zero_40_63 : 24;
+#endif
+	} aes_cmac;
+	struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+		uint64_t count : 32;
+		uint64_t zero_37_39 : 3;
+		uint64_t bearer : 5;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t rsvd_65_71 : 7;
+		uint64_t direction_64 : 1;
+		uint64_t rsvd_72_111 : 40;
+		uint64_t rsvd_113_119 : 7;
+		uint64_t direction_112 : 1;
+		uint64_t rsvd_120_127 : 8;
+#else
+		uint64_t count : 32;
+		uint64_t bearer : 5;
+		uint64_t zero_37_39 : 3;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t direction_64 : 1;
+		uint64_t rsvd_65_71 : 7;
+		uint64_t rsvd_72_111 : 40;
+		uint64_t direction_112 : 1;
+		uint64_t rsvd_113_119 : 7;
+		uint64_t rsvd_120_127 : 8;
+#endif
+	} zs;
+	uint64_t u64[2];
+};
+
+union cipher_iv_partial {
+	struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+		uint64_t count : 32;
+		uint64_t zero_38_39 : 2;
+		uint64_t direction : 1;
+		uint64_t bearer : 5;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t zero_64_127;
+#else
+		uint64_t count : 32;
+		uint64_t bearer : 5;
+		uint64_t direction : 1;
+		uint64_t zero_38_39 : 2;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t zero_64_127;
+#endif
+	} aes_ctr;
+	struct {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+		uint64_t count : 32;
+		uint64_t zero_38_39 : 2;
+		uint64_t direction : 1;
+		uint64_t bearer : 5;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t rsvd_64_127;
+#else
+		uint64_t count : 32;
+		uint64_t bearer : 5;
+		uint64_t direction : 1;
+		uint64_t zero_38_39 : 2;
+		uint64_t zero_40_63 : 24;
+
+		uint64_t rsvd_64_127;
+#endif
+	} zs;
+	uint64_t u64[2];
+};
+
+/*
+ * Layout of PDCP entity: [rte_pdcp_entity] [entity_priv] [entity_dl/ul]
+ */
+
+struct entity_priv {
+	/** Crypto sym session. */
+	struct rte_cryptodev_sym_session *crypto_sess;
+	/** Entity specific IV generation function. */
+	iv_gen_t iv_gen;
+	/** Pre-prepared auth IV. */
+	union auth_iv_partial auth_iv_part;
+	/** Pre-prepared cipher IV. */
+	union cipher_iv_partial cipher_iv_part;
+	/** Entity state variables. */
+	struct entity_state state;
+	/** Flags. */
+	struct {
+		/** PDCP PDU has 4 byte MAC-I. */
+		uint64_t is_authenticated : 1;
+		/** Cipher offset & length in bits. */
+		uint64_t is_ciph_in_bits : 1;
+		/** Auth offset & length in bits. */
+		uint64_t is_auth_in_bits : 1;
+		/** Is UL/transmitting PDCP entity */
+		uint64_t is_ul_entity : 1;
+	} flags;
+	/** Crypto op pool. */
+	struct rte_mempool *cop_pool;
+	/** PDCP header size. */
+	uint8_t hdr_sz;
+	/** PDCP AAD size. For AES-CMAC, additional message is prepended for the operation. */
+	uint8_t aad_sz;
+	/** Device ID of the device to be used for offload. */
+	uint8_t dev_id;
+};
+
+struct entity_priv_dl_part {
+	/* TODO - when in-order-delivery is supported, post PDCP packets would need to cached. */
+	uint8_t dummy;
+};
+
+struct entity_priv_ul_part {
+	/*
+	 * TODO - when re-establish is supported, both plain & post PDCP packets would need to be
+	 * cached.
+	 */
+	uint8_t dummy;
+};
+
+static inline struct entity_priv *
+entity_priv_get(const struct rte_pdcp_entity *entity) {
+	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity));
+}
+
+static inline struct entity_priv_dl_part *
+entity_dl_part_get(const struct rte_pdcp_entity *entity) {
+	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) + sizeof(struct entity_priv));
+}
+
+static inline struct entity_priv_ul_part *
+entity_ul_part_get(const struct rte_pdcp_entity *entity) {
+	return RTE_PTR_ADD(entity, sizeof(struct rte_pdcp_entity) + sizeof(struct entity_priv));
+}
+
+static inline int
+pdcp_hdr_size_get(enum rte_security_pdcp_sn_size sn_size)
+{
+	return RTE_ALIGN_MUL_CEIL(sn_size, 8) / 8;
+}
+
+#endif /* _PDCP_ENTITY_H_ */
diff --git a/lib/pdcp/pdcp_process.c b/lib/pdcp/pdcp_process.c
new file mode 100644
index 0000000000..282cf38ec4
--- /dev/null
+++ b/lib/pdcp/pdcp_process.c
@@ -0,0 +1,1195 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include <rte_crypto.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+#include <rte_memcpy.h>
+#include <rte_pdcp.h>
+#include <rte_pdcp_hdr.h>
+
+#include "pdcp_crypto.h"
+#include "pdcp_entity.h"
+#include "pdcp_process.h"
+
+#define PDCP_MAC_I_LEN 4
+
+/* Enum of supported algorithms for ciphering */
+enum pdcp_cipher_algo {
+	PDCP_CIPHER_ALGO_NULL,
+	PDCP_CIPHER_ALGO_AES,
+	PDCP_CIPHER_ALGO_ZUC,
+	PDCP_CIPHER_ALGO_SNOW3G,
+	PDCP_CIPHER_ALGO_MAX
+};
+
+/* Enum of supported algorithms for integrity */
+enum pdcp_auth_algo {
+	PDCP_AUTH_ALGO_NULL,
+	PDCP_AUTH_ALGO_AES,
+	PDCP_AUTH_ALGO_ZUC,
+	PDCP_AUTH_ALGO_SNOW3G,
+	PDCP_AUTH_ALGO_MAX
+};
+
+/* IV generation functions based on type of operation (cipher - auth) */
+
+static void
+pdcp_iv_gen_null_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
+{
+	/* No IV required for NULL cipher + NULL auth */
+	RTE_SET_USED(cop);
+	RTE_SET_USED(en_priv);
+	RTE_SET_USED(count);
+}
+
+static void
+pdcp_iv_gen_null_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
+			  uint32_t count)
+{
+	struct rte_crypto_sym_op *op = cop->sym;
+	struct rte_mbuf *mb = op->m_src;
+	uint8_t *m_ptr;
+	uint64_t m;
+
+	/* AES-CMAC requires message to be prepended with info on count etc */
+
+	/* Prepend by 8 bytes to add custom message */
+	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
+
+	m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+
+	rte_memcpy(m_ptr, &m, 8);
+}
+
+static void
+pdcp_iv_gen_null_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
+{
+	uint64_t iv_u64[2];
+	uint8_t *iv;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64[0], 8);
+
+	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
+	rte_memcpy(iv + 8, &iv_u64[1], 8);
+}
+
+static void
+pdcp_iv_gen_aes_ctr_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
+			 uint32_t count)
+{
+	uint64_t iv_u64[2];
+	uint8_t *iv;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	iv_u64[1] = 0;
+	rte_memcpy(iv, iv_u64, 16);
+}
+
+static void
+pdcp_iv_gen_zs_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
+{
+	uint64_t iv_u64;
+	uint8_t *iv;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	iv_u64 = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64, 8);
+	rte_memcpy(iv + 8, &iv_u64, 8);
+}
+
+static void
+pdcp_iv_gen_zs_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
+{
+	uint64_t iv_u64[2];
+	uint8_t *iv;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	/* Generating cipher IV */
+	iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64[0], 8);
+	rte_memcpy(iv + 8, &iv_u64[0], 8);
+
+	iv += PDCP_IV_LENGTH;
+
+	/* Generating auth IV */
+	iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64[0], 8);
+
+	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
+	rte_memcpy(iv + 8, &iv_u64[1], 8);
+}
+
+static void
+pdcp_iv_gen_zs_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
+			uint32_t count)
+{
+	struct rte_crypto_sym_op *op = cop->sym;
+	struct rte_mbuf *mb = op->m_src;
+	uint8_t *m_ptr, *iv;
+	uint64_t iv_u64[2];
+	uint64_t m;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+	iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64[0], 8);
+	rte_memcpy(iv + 8, &iv_u64[0], 8);
+
+	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
+	m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(m_ptr, &m, 8);
+}
+
+static void
+pdcp_iv_gen_aes_ctr_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
+			    uint32_t count)
+{
+	struct rte_crypto_sym_op *op = cop->sym;
+	struct rte_mbuf *mb = op->m_src;
+	uint8_t *m_ptr, *iv;
+	uint64_t iv_u64[2];
+	uint64_t m;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	iv_u64[1] = 0;
+	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
+
+	m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
+	m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(m_ptr, &m, 8);
+}
+
+static void
+pdcp_iv_gen_aes_ctr_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
+{
+	uint64_t iv_u64[2];
+	uint8_t *iv;
+
+	iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
+
+	iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	iv_u64[1] = 0;
+	rte_memcpy(iv, iv_u64, PDCP_IV_LENGTH);
+
+	iv += PDCP_IV_LENGTH;
+
+	iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
+	rte_memcpy(iv, &iv_u64[0], 8);
+
+	iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
+	rte_memcpy(iv + 8, &iv_u64[1], 8);
+}
+
+static int
+pdcp_crypto_xfrm_get(const struct rte_pdcp_entity_conf *conf, struct rte_crypto_sym_xform **c_xfrm,
+		     struct rte_crypto_sym_xform **a_xfrm)
+{
+	*c_xfrm = NULL;
+	*a_xfrm = NULL;
+
+	if (conf->crypto_xfrm == NULL)
+		return -EINVAL;
+
+	if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		*c_xfrm = conf->crypto_xfrm;
+		*a_xfrm = conf->crypto_xfrm->next;
+	} else if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		*a_xfrm = conf->crypto_xfrm;
+		*c_xfrm = conf->crypto_xfrm->next;
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+pdcp_iv_gen_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
+{
+	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
+	enum rte_security_pdcp_direction direction;
+	enum pdcp_cipher_algo ciph_algo;
+	enum pdcp_auth_algo auth_algo;
+	struct entity_priv *en_priv;
+	int ret;
+
+	en_priv = entity_priv_get(entity);
+
+	direction = conf->pdcp_xfrm.pkt_dir;
+	if (conf->reverse_iv_direction)
+		direction = !direction;
+
+	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
+	if (ret)
+		return ret;
+
+	if (c_xfrm == NULL)
+		return -EINVAL;
+
+	memset(&en_priv->auth_iv_part, 0, sizeof(en_priv->auth_iv_part));
+	memset(&en_priv->cipher_iv_part, 0, sizeof(en_priv->cipher_iv_part));
+
+	switch (c_xfrm->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_NULL:
+		ciph_algo = PDCP_CIPHER_ALGO_NULL;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		ciph_algo = PDCP_CIPHER_ALGO_AES;
+		en_priv->cipher_iv_part.aes_ctr.bearer = conf->pdcp_xfrm.bearer;
+		en_priv->cipher_iv_part.aes_ctr.direction = direction;
+		break;
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+		ciph_algo = PDCP_CIPHER_ALGO_SNOW3G;
+		en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
+		en_priv->cipher_iv_part.zs.direction = direction;
+		break;
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+		ciph_algo = PDCP_CIPHER_ALGO_ZUC;
+		en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
+		en_priv->cipher_iv_part.zs.direction = direction;
+		break;
+	default:
+		return -ENOTSUP;
+	}
+
+	if (a_xfrm != NULL) {
+		switch (a_xfrm->auth.algo) {
+		case RTE_CRYPTO_AUTH_NULL:
+			auth_algo = PDCP_AUTH_ALGO_NULL;
+			break;
+		case RTE_CRYPTO_AUTH_AES_CMAC:
+			auth_algo = PDCP_AUTH_ALGO_AES;
+			en_priv->auth_iv_part.aes_cmac.bearer = conf->pdcp_xfrm.bearer;
+			en_priv->auth_iv_part.aes_cmac.direction = direction;
+			break;
+		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+			auth_algo = PDCP_AUTH_ALGO_SNOW3G;
+			en_priv->auth_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
+			en_priv->auth_iv_part.zs.direction_64 = direction;
+			en_priv->auth_iv_part.zs.direction_112 = direction;
+			break;
+		case RTE_CRYPTO_AUTH_ZUC_EIA3:
+			auth_algo = PDCP_AUTH_ALGO_ZUC;
+			en_priv->auth_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
+			en_priv->auth_iv_part.zs.direction_64 = direction;
+			en_priv->auth_iv_part.zs.direction_112 = direction;
+			break;
+		default:
+			return -ENOTSUP;
+		}
+	} else {
+		auth_algo = PDCP_AUTH_ALGO_NULL;
+	}
+
+	static const iv_gen_t iv_gen_map[PDCP_CIPHER_ALGO_MAX][PDCP_AUTH_ALGO_MAX] = {
+		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_null_null,
+		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_null_aes_cmac,
+		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_null_zs,
+		[PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_null_zs,
+
+		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_aes_ctr_null,
+		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_aes_ctr_aes_cmac,
+		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_aes_ctr_zs,
+		[PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_aes_ctr_zs,
+
+		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_zs_null,
+		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_zs_aes_cmac,
+		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_zs_zs,
+		[PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_zs_zs,
+
+		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_zs_null,
+		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_zs_aes_cmac,
+		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_zs_zs,
+		[PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_zs_zs,
+	};
+
+	en_priv->iv_gen = iv_gen_map[ciph_algo][auth_algo];
+
+	return 0;
+}
+
+static inline void
+cop_prepare(const struct entity_priv *en_priv, struct rte_mbuf *mb, struct rte_crypto_op *cop,
+	    uint8_t data_offset, uint32_t count, const bool is_auth)
+{
+	const struct rte_crypto_op cop_init = {
+		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+		.sess_type = RTE_CRYPTO_OP_WITH_SESSION,
+	};
+	struct rte_crypto_sym_op *op;
+	uint32_t pkt_len;
+
+	const uint8_t ciph_shift = 3 * en_priv->flags.is_ciph_in_bits;
+	const uint8_t auth_shift = 3 * en_priv->flags.is_auth_in_bits;
+
+	op = cop->sym;
+	cop->raw = cop_init.raw;
+	op->m_src = mb;
+	op->m_dst = mb;
+
+	/* Set IV */
+	en_priv->iv_gen(cop, en_priv, count);
+
+	/* Prepare op */
+	pkt_len = rte_pktmbuf_pkt_len(mb);
+	op->cipher.data.offset = data_offset << ciph_shift;
+	op->cipher.data.length = (pkt_len - data_offset) << ciph_shift;
+
+	if (is_auth) {
+		op->auth.data.offset = 0;
+		op->auth.data.length = (pkt_len - PDCP_MAC_I_LEN) << auth_shift;
+		op->auth.digest.data = rte_pktmbuf_mtod_offset(mb, uint8_t *,
+							       (pkt_len - PDCP_MAC_I_LEN));
+	}
+
+	__rte_crypto_sym_op_attach_sym_session(op, en_priv->crypto_sess);
+}
+
+static inline bool
+pdcp_pre_process_uplane_sn_12_ul_set_sn(struct entity_priv *en_priv, struct rte_mbuf *mb,
+					uint32_t *count)
+{
+	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
+	const uint8_t hdr_sz = en_priv->hdr_sz;
+	uint32_t sn;
+
+	/* Prepend PDU header */
+	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_12_hdr *)rte_pktmbuf_prepend(mb, hdr_sz);
+	if (unlikely(pdu_hdr == NULL))
+		return false;
+
+	/* Update sequence num in the PDU header */
+	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1, __ATOMIC_RELAXED);
+	sn = PDCP_GET_SN_12_FROM_COUNT(*count);
+
+	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
+	pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
+	pdu_hdr->sn_7_0 = (sn & 0xff);
+	pdu_hdr->r = 0;
+	return true;
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	uint16_t nb_cop;
+	uint32_t count;
+	int i;
+
+	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	if (en_priv->flags.is_authenticated) {
+		for (i = 0; i < nb_cop; i++) {
+			if (unlikely(rte_pktmbuf_append(mb[i], PDCP_MAC_I_LEN) == NULL))
+				goto cop_free;
+			if (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
+									      &count)))
+				goto cop_free;
+			cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
+		}
+	} else {
+		for (i = 0; i < nb_cop; i++) {
+			if (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb[i],
+									      &count)))
+				goto cop_free;
+			cop_prepare(en_priv, mb[i], cop[i], data_offset, count, false);
+		}
+	}
+
+	*nb_err = num - nb_cop;
+	return nb_cop;
+cop_free:
+	/* Using mempool API since crypto API is not providing bulk free */
+	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
+	*nb_err = num - i;
+	return i;
+}
+
+static inline bool
+pdcp_pre_process_uplane_sn_18_ul_set_sn(struct entity_priv *en_priv, struct rte_mbuf *mb,
+					uint32_t *count)
+{
+	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
+	const uint8_t hdr_sz = en_priv->hdr_sz;
+	uint32_t sn;
+
+	/* Prepend PDU header */
+	pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_18_hdr *)rte_pktmbuf_prepend(mb, hdr_sz);
+	if (unlikely(pdu_hdr == NULL))
+		return false;
+
+	/* Update sequence num in the PDU header */
+	*count = __atomic_fetch_add(&en_priv->state.tx_next, 1, __ATOMIC_RELAXED);
+	sn = PDCP_GET_SN_18_FROM_COUNT(*count);
+
+	pdu_hdr->d_c = PDCP_PDU_TYPE_DATA;
+	pdu_hdr->sn_17_16 = ((sn & 0x30000) >> 16);
+	pdu_hdr->sn_15_8 = ((sn & 0xff00) >> 8);
+	pdu_hdr->sn_7_0 = (sn & 0xff);
+	pdu_hdr->r = 0;
+
+	return true;
+}
+
+static inline uint16_t
+pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	uint16_t nb_cop;
+	uint32_t count;
+	int i;
+
+	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	if (en_priv->flags.is_authenticated) {
+		for (i = 0; i < nb_cop; i++) {
+			if (unlikely(rte_pktmbuf_append(mb[i], PDCP_MAC_I_LEN) == NULL))
+				goto cop_free;
+			if (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
+									      &count)))
+				goto cop_free;
+			cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
+		}
+	} else {
+		for (i = 0; i < nb_cop; i++) {
+			if (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb[i],
+									      &count)))
+				goto cop_free;
+			cop_prepare(en_priv, mb[i], cop[i], data_offset, count, false);
+		}
+	}
+
+	*nb_err = num - nb_cop;
+	return nb_cop;
+
+cop_free:
+	/* Using mempool API since crypto API is not providing bulk free */
+	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
+	*nb_err = num - i;
+	return i;
+}
+
+static uint16_t
+pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
+	uint32_t count, sn;
+	uint16_t nb_cop;
+	int i;
+
+	const uint8_t hdr_sz = en_priv->hdr_sz;
+	const uint8_t data_offset = hdr_sz + en_priv->aad_sz;
+
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	for (i = 0; i < nb_cop; i++) {
+		/* Prepend PDU header */
+		pdu_hdr = (struct rte_pdcp_cp_data_pdu_sn_12_hdr *)rte_pktmbuf_prepend(mb[i],
+										       hdr_sz);
+		if (unlikely(pdu_hdr == NULL))
+			goto cop_free;
+		if (unlikely(rte_pktmbuf_append(mb[i], PDCP_MAC_I_LEN) == NULL))
+			goto cop_free;
+
+		/* Update sequence number in the PDU header */
+		count = __atomic_fetch_add(&en_priv->state.tx_next, 1, __ATOMIC_RELAXED);
+		sn = PDCP_GET_SN_12_FROM_COUNT(count);
+
+		pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
+		pdu_hdr->sn_7_0 = (sn & 0xff);
+		pdu_hdr->r = 0;
+
+		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
+	}
+
+	*nb_err = num - nb_cop;
+	return nb_cop;
+
+cop_free:
+	/* Using mempool API since crypto API is not providing bulk free */
+	rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[i], nb_cop - i);
+	*nb_err = num - i;
+	return i;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err_ret)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	const uint32_t hdr_trim_sz = en_priv->aad_sz;
+	int i, nb_success = 0, nb_err = 0;
+	struct rte_mbuf *err_mb[num];
+	struct rte_mbuf *mb;
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
+			err_mb[nb_err++] = mb;
+			continue;
+		}
+
+		if (hdr_trim_sz)
+			rte_pktmbuf_adj(mb, hdr_trim_sz);
+
+		out_mb[nb_success++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err_ret)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	const uint32_t hdr_trim_sz = en_priv->aad_sz;
+	int i, nb_success = 0, nb_err = 0;
+	struct rte_mbuf *err_mb[num];
+	struct rte_mbuf *mb;
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
+			err_mb[nb_err++] = mb;
+			continue;
+		}
+
+		if (hdr_trim_sz)
+			rte_pktmbuf_adj(mb, hdr_trim_sz);
+
+		out_mb[nb_success++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err_ret)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	const uint32_t hdr_trim_sz = en_priv->aad_sz;
+	int i, nb_success = 0, nb_err = 0;
+	struct rte_mbuf *mb, *err_mb[num];
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
+			err_mb[nb_err++] = mb;
+			continue;
+		}
+
+		if (hdr_trim_sz)
+			rte_pktmbuf_adj(mb, hdr_trim_sz);
+
+		out_mb[nb_success++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static inline int
+pdcp_sn_18_count_get(const struct rte_pdcp_entity *entity, int32_t rsn, uint32_t *count)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	uint32_t rhfn, rx_deliv;
+
+	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv, __ATOMIC_RELAXED);
+	rhfn = PDCP_GET_HFN_SN_18_FROM_COUNT(rx_deliv);
+
+	if (rsn < (int32_t)(PDCP_GET_SN_18_FROM_COUNT(rx_deliv) - PDCP_SN_18_WINDOW_SZ)) {
+		if (unlikely(rhfn == PDCP_SN_18_HFN_MAX))
+			return -ERANGE;
+		rhfn += 1;
+	} else if ((uint32_t)rsn >= (PDCP_GET_SN_18_FROM_COUNT(rx_deliv) + PDCP_SN_18_WINDOW_SZ)) {
+		if (unlikely(rhfn == PDCP_SN_18_HFN_MIN))
+			return -ERANGE;
+		rhfn -= 1;
+	}
+
+	*count = PDCP_SET_COUNT_FROM_HFN_SN_18(rhfn, rsn);
+
+	return 0;
+}
+
+static inline int
+pdcp_sn_12_count_get(const struct rte_pdcp_entity *entity, int32_t rsn, uint32_t *count)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	uint32_t rhfn, rx_deliv;
+
+	rx_deliv = __atomic_load_n(&en_priv->state.rx_deliv, __ATOMIC_RELAXED);
+	rhfn = PDCP_GET_HFN_SN_12_FROM_COUNT(rx_deliv);
+
+	if (rsn < (int32_t)(PDCP_GET_SN_12_FROM_COUNT(rx_deliv) - PDCP_SN_12_WINDOW_SZ)) {
+		if (unlikely(rhfn == PDCP_SN_12_HFN_MAX))
+			return -ERANGE;
+		rhfn += 1;
+	} else if ((uint32_t)rsn >= (PDCP_GET_SN_12_FROM_COUNT(rx_deliv) + PDCP_SN_12_WINDOW_SZ)) {
+		if (unlikely(rhfn == PDCP_SN_12_HFN_MIN))
+			return -ERANGE;
+		rhfn -= 1;
+	}
+
+	*count = PDCP_SET_COUNT_FROM_HFN_SN_12(rhfn, rsn);
+
+	return 0;
+}
+
+static inline uint16_t
+pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				       struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err,
+				       const bool is_integ_protected)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
+	uint16_t nb_cop;
+	int32_t rsn = 0;
+	uint32_t count;
+	int i;
+
+	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	for (i = 0; i < nb_cop; i++) {
+
+		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct rte_pdcp_up_data_pdu_sn_12_hdr *);
+
+		/* Check for PDU type */
+		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
+			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+		else
+			rte_panic("TODO: Control PDU not handled");
+
+		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
+			break;
+		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, is_integ_protected);
+	}
+
+	*nb_err = num - nb_cop;
+
+	return nb_cop;
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				    struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, nb_err, true);
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, nb_err, false);
+}
+
+static inline uint16_t
+pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				       struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err,
+				       const bool is_integ_protected)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
+	uint16_t nb_cop;
+	int32_t rsn = 0;
+	uint32_t count;
+	int i;
+
+	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	for (i = 0; i < nb_cop; i++) {
+		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct rte_pdcp_up_data_pdu_sn_18_hdr *);
+
+		/* Check for PDU type */
+		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
+			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8 << 8) |
+			       (pdu_hdr->sn_7_0));
+		else
+			rte_panic("TODO: Control PDU not handled");
+
+		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
+			break;
+		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, is_integ_protected);
+	}
+
+	*nb_err = num - nb_cop;
+
+	return nb_cop;
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				    struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, nb_err, true);
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, nb_err, false);
+}
+
+static uint16_t
+pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
+				 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
+	uint16_t nb_cop;
+	uint32_t count;
+	int32_t rsn;
+	int i;
+
+	const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+	nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+					  num);
+
+	for (i = 0; i < nb_cop; i++) {
+		pdu_hdr = rte_pktmbuf_mtod(mb[i], struct rte_pdcp_cp_data_pdu_sn_12_hdr *);
+		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
+			break;
+		cop_prepare(en_priv, mb[i], cop[i], data_offset, count, true);
+	}
+
+	*nb_err = num - nb_cop;
+	return nb_cop;
+}
+
+static inline bool
+pdcp_post_process_update_entity_state(const struct rte_pdcp_entity *entity,
+				      const uint32_t count)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+
+	if (count < __atomic_load_n(&en_priv->state.rx_deliv, __ATOMIC_RELAXED))
+		return false;
+
+	/* t-Reordering timer is not supported - SDU will be delivered immediately.
+	 * Update RX_DELIV to the COUNT value of the first PDCP SDU which has not
+	 * been delivered to upper layers
+	 */
+	__atomic_store_n(&en_priv->state.rx_deliv, (count + 1), __ATOMIC_RELAXED);
+
+	if (count >= __atomic_load_n(&en_priv->state.rx_next, __ATOMIC_RELAXED))
+		__atomic_store_n(&en_priv->state.rx_next, (count + 1), __ATOMIC_RELAXED);
+
+	return true;
+}
+
+static inline uint16_t
+pdcp_post_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity,
+					struct rte_mbuf *in_mb[],
+					struct rte_mbuf *out_mb[],
+					uint16_t num, uint16_t *nb_err_ret,
+					const bool is_integ_protected)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
+	int i, nb_success = 0, nb_err = 0, rsn = 0;
+	const uint32_t aad_sz = en_priv->aad_sz;
+	struct rte_mbuf *err_mb[num];
+	struct rte_mbuf *mb;
+	uint32_t count;
+
+	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+			goto error;
+		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct rte_pdcp_up_data_pdu_sn_12_hdr *,
+						  aad_sz);
+
+		/* Check for PDU type */
+		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
+			rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+		else
+			rte_panic("Control PDU should not be received");
+
+		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
+			goto error;
+
+		if (unlikely(!pdcp_post_process_update_entity_state(entity, count)))
+			goto error;
+
+		rte_pktmbuf_adj(mb, hdr_trim_sz);
+		if (is_integ_protected)
+			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
+		out_mb[nb_success++] = mb;
+		continue;
+
+error:
+		err_mb[nb_err++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity,
+				     struct rte_mbuf *in_mb[],
+				     struct rte_mbuf *out_mb[],
+				     uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb, out_mb, num, nb_err, true);
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb, out_mb, num, nb_err, false);
+}
+
+static inline uint16_t
+pdcp_post_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity,
+					struct rte_mbuf *in_mb[],
+					struct rte_mbuf *out_mb[],
+					uint16_t num, uint16_t *nb_err_ret,
+					const bool is_integ_protected)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
+	const uint32_t aad_sz = en_priv->aad_sz;
+	int i, nb_success = 0, nb_err = 0;
+	struct rte_mbuf *mb, *err_mb[num];
+	int32_t rsn = 0;
+	uint32_t count;
+
+	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+			goto error;
+
+		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct rte_pdcp_up_data_pdu_sn_18_hdr *,
+						  aad_sz);
+
+		/* Check for PDU type */
+		if (likely(pdu_hdr->d_c == PDCP_PDU_TYPE_DATA))
+			rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8 << 8) |
+			       (pdu_hdr->sn_7_0));
+		else
+			rte_panic("Control PDU should not be received");
+
+		if (unlikely(pdcp_sn_18_count_get(entity, rsn, &count)))
+			goto error;
+
+		if (unlikely(!pdcp_post_process_update_entity_state(entity, count)))
+			goto error;
+
+		rte_pktmbuf_adj(mb, hdr_trim_sz);
+		if (is_integ_protected)
+			rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
+		out_mb[nb_success++] = mb;
+		continue;
+
+error:
+		err_mb[nb_err++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity,
+				     struct rte_mbuf *in_mb[],
+				     struct rte_mbuf *out_mb[],
+				     uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb, out_mb, num, nb_err, true);
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err)
+{
+	return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb, out_mb, num, nb_err, false);
+}
+
+static uint16_t
+pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity,
+				  struct rte_mbuf *in_mb[],
+				  struct rte_mbuf *out_mb[],
+				  uint16_t num, uint16_t *nb_err_ret)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+	struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
+	const uint32_t aad_sz = en_priv->aad_sz;
+	int i, nb_success = 0, nb_err = 0;
+	struct rte_mbuf *err_mb[num];
+	struct rte_mbuf *mb;
+	uint32_t count;
+	int32_t rsn;
+
+	const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+	for (i = 0; i < num; i++) {
+		mb = in_mb[i];
+		if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+			goto error;
+
+		pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct rte_pdcp_cp_data_pdu_sn_12_hdr *,
+						  aad_sz);
+		rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+
+		if (unlikely(pdcp_sn_12_count_get(entity, rsn, &count)))
+			goto error;
+
+		if (unlikely(!pdcp_post_process_update_entity_state(entity, count)))
+			goto error;
+
+		rte_pktmbuf_adj(mb, hdr_trim_sz);
+		rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
+		out_mb[nb_success++] = mb;
+		continue;
+
+error:
+		err_mb[nb_err++] = mb;
+	}
+
+	if (unlikely(nb_err != 0))
+		rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
+
+	*nb_err_ret = nb_err;
+	return nb_success;
+}
+
+static int
+pdcp_pre_process_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
+{
+	struct entity_priv *en_priv = entity_priv_get(entity);
+
+	entity->pre_process = NULL;
+	entity->post_process = NULL;
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
+		entity->pre_process = pdcp_pre_process_cplane_sn_12_ul;
+		entity->post_process = pdcp_post_process_cplane_sn_12_ul;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)) {
+		entity->pre_process = pdcp_pre_process_cplane_sn_12_dl;
+		entity->post_process = pdcp_post_process_cplane_sn_12_dl;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_12_ul;
+		entity->post_process = pdcp_post_process_uplane_sn_12_ul;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_18_ul;
+		entity->post_process = pdcp_post_process_uplane_sn_18_ul;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+	    (en_priv->flags.is_authenticated)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_12_dl_ip;
+		entity->post_process = pdcp_post_process_uplane_sn_12_dl_ip;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+	    (!en_priv->flags.is_authenticated)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_12_dl;
+		entity->post_process = pdcp_post_process_uplane_sn_12_dl;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+	    (en_priv->flags.is_authenticated)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_18_dl_ip;
+		entity->post_process = pdcp_post_process_uplane_sn_18_dl_ip;
+	}
+
+	if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+	    (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
+	    (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+	    (!en_priv->flags.is_authenticated)) {
+		entity->pre_process = pdcp_pre_process_uplane_sn_18_dl;
+		entity->post_process = pdcp_post_process_uplane_sn_18_dl;
+	}
+
+	if (entity->pre_process == NULL || entity->post_process == NULL)
+		return -ENOTSUP;
+
+	return 0;
+}
+
+static int
+pdcp_entity_priv_populate(struct entity_priv *en_priv, const struct rte_pdcp_entity_conf *conf)
+{
+	struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
+	int ret;
+
+	/**
+	 * flags.is_authenticated
+	 *
+	 * MAC-I would be added in case of control plane packets and when authentication
+	 * transform is not NULL.
+	 */
+
+	if (conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL)
+		en_priv->flags.is_authenticated = 1;
+
+	ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
+	if (ret)
+		return ret;
+
+	if (a_xfrm != NULL)
+		en_priv->flags.is_authenticated = 1;
+
+	/**
+	 * flags.is_ciph_in_bits
+	 *
+	 * For ZUC & SNOW3G cipher algos, offset & length need to be provided in bits.
+	 */
+
+	if ((c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) ||
+	    (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_ZUC_EEA3))
+		en_priv->flags.is_ciph_in_bits = 1;
+
+	/**
+	 * flags.is_auth_in_bits
+	 *
+	 * For ZUC & SNOW3G authentication algos, offset & length need to be provided in bits.
+	 */
+
+	if (a_xfrm != NULL) {
+		if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) ||
+		    (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3))
+			en_priv->flags.is_auth_in_bits = 1;
+	}
+
+	/**
+	 * flags.is_ul_entity
+	 *
+	 * Indicate whether the entity is UL/transmitting PDCP entity.
+	 */
+	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
+		en_priv->flags.is_ul_entity = 1;
+
+	/**
+	 * hdr_sz
+	 *
+	 * PDCP header size of the entity
+	 */
+	en_priv->hdr_sz = pdcp_hdr_size_get(conf->pdcp_xfrm.sn_size);
+
+	/**
+	 * aad_sz
+	 *
+	 * For AES-CMAC, additional message is prepended for processing. Need to be trimmed after
+	 * crypto processing is done.
+	 */
+	if (a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC)
+		en_priv->aad_sz = 8;
+	else
+		en_priv->aad_sz = 0;
+
+	return 0;
+}
+
+int
+pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
+{
+	struct entity_priv *en_priv;
+	int ret;
+
+	if (entity == NULL || conf == NULL)
+		return -EINVAL;
+
+	en_priv = entity_priv_get(entity);
+
+	ret = pdcp_iv_gen_func_set(entity, conf);
+	if (ret)
+		return ret;
+
+	ret = pdcp_entity_priv_populate(en_priv, conf);
+	if (ret)
+		return ret;
+
+	ret = pdcp_pre_process_func_set(entity, conf);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/lib/pdcp/pdcp_process.h b/lib/pdcp/pdcp_process.h
new file mode 100644
index 0000000000..c92ab34c40
--- /dev/null
+++ b/lib/pdcp/pdcp_process.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#ifndef _PDCP_PROCESS_H_
+#define _PDCP_PROCESS_H_
+
+#include <rte_pdcp.h>
+
+int
+pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf);
+
+#endif /* _PDCP_PROCESS_H_ */
diff --git a/lib/pdcp/rte_pdcp.c b/lib/pdcp/rte_pdcp.c
new file mode 100644
index 0000000000..b1533971c2
--- /dev/null
+++ b/lib/pdcp/rte_pdcp.c
@@ -0,0 +1,136 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include <rte_pdcp.h>
+#include <rte_malloc.h>
+
+#include "pdcp_crypto.h"
+#include "pdcp_entity.h"
+#include "pdcp_process.h"
+
+static int
+pdcp_entity_size_get(const struct rte_pdcp_entity_conf *conf)
+{
+	int size;
+
+	size = sizeof(struct rte_pdcp_entity) + sizeof(struct entity_priv);
+
+	if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)
+		size += sizeof(struct entity_priv_dl_part);
+	else if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
+		size += sizeof(struct entity_priv_ul_part);
+	else
+		return -EINVAL;
+
+	return RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE);
+}
+
+struct rte_pdcp_entity *
+rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf)
+{
+	struct rte_pdcp_entity *entity = NULL;
+	struct entity_priv *en_priv;
+	int ret;
+
+	if (conf == NULL || conf->cop_pool == NULL) {
+		rte_errno = -EINVAL;
+		return NULL;
+	}
+
+	if (conf->pdcp_xfrm.en_ordering || conf->pdcp_xfrm.remove_duplicates || conf->is_slrb ||
+	    conf->en_sec_offload) {
+		rte_errno = -ENOTSUP;
+		return NULL;
+	}
+
+	/*
+	 * 6.3.2 PDCP SN
+	 * Length: 12 or 18 bits as indicated in table 6.3.2-1. The length of the PDCP SN is
+	 * configured by upper layers (pdcp-SN-SizeUL, pdcp-SN-SizeDL, or sl-PDCP-SN-Size in
+	 * TS 38.331 [3])
+	 */
+	if ((conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) &&
+	    (conf->pdcp_xfrm.sn_size != RTE_SECURITY_PDCP_SN_SIZE_18)) {
+		rte_errno = -ENOTSUP;
+		return NULL;
+	}
+
+	if (conf->pdcp_xfrm.hfn || conf->pdcp_xfrm.hfn_threshold) {
+		rte_errno = -EINVAL;
+		return NULL;
+	}
+
+	entity = rte_zmalloc_socket("pdcp_entity", pdcp_entity_size_get(conf),
+				    RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (entity == NULL) {
+		rte_errno = -ENOMEM;
+		return NULL;
+	}
+
+	en_priv = entity_priv_get(entity);
+
+	en_priv->state.rx_deliv = conf->count;
+	en_priv->state.tx_next = conf->count;
+	en_priv->cop_pool = conf->cop_pool;
+
+	/* Setup crypto session */
+	ret = pdcp_crypto_sess_create(entity, conf);
+	if (ret)
+		goto entity_free;
+
+	ret = pdcp_process_func_set(entity, conf);
+	if (ret)
+		goto crypto_sess_destroy;
+
+	return entity;
+
+crypto_sess_destroy:
+	pdcp_crypto_sess_destroy(entity);
+entity_free:
+	rte_free(entity);
+	rte_errno = ret;
+	return NULL;
+}
+
+int
+rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity, struct rte_mbuf *out_mb[])
+{
+	int ret;
+
+	if (pdcp_entity == NULL)
+		return -EINVAL;
+
+	/* Teardown crypto sessions */
+	ret = pdcp_crypto_sess_destroy(pdcp_entity);
+	if (ret)
+		return ret;
+
+	rte_free(pdcp_entity);
+
+	RTE_SET_USED(out_mb);
+	return 0;
+}
+
+int
+rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
+			struct rte_mbuf *out_mb[])
+{
+	struct entity_priv *en_priv;
+
+	if (pdcp_entity == NULL)
+		return -EINVAL;
+
+	en_priv = entity_priv_get(pdcp_entity);
+
+	if (en_priv->flags.is_ul_entity) {
+		en_priv->state.tx_next = 0;
+	} else {
+		en_priv->state.rx_next = 0;
+		en_priv->state.rx_deliv = 0;
+	}
+
+	RTE_SET_USED(out_mb);
+
+	return 0;
+}
diff --git a/lib/pdcp/rte_pdcp.h b/lib/pdcp/rte_pdcp.h
new file mode 100644
index 0000000000..b6c7f32c05
--- /dev/null
+++ b/lib/pdcp/rte_pdcp.h
@@ -0,0 +1,263 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#ifndef _RTE_PDCP_H_
+#define _RTE_PDCP_H_
+
+/**
+ * @file rte_pdcp.h
+ *
+ * RTE PDCP support.
+ *
+ * librte_pdcp provides a framework for PDCP protocol processing.
+ */
+
+#include <rte_compat.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_mempool.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Forward declarations */
+struct rte_pdcp_entity;
+
+/* PDCP pre-process function based on entity configuration */
+typedef uint16_t (*rte_pdcp_pre_p_t)(const struct rte_pdcp_entity *entity,
+				     struct rte_mbuf *mb[],
+				     struct rte_crypto_op *cop[],
+				     uint16_t num, uint16_t *nb_err);
+
+/* PDCP post-process function based on entity configuration */
+typedef uint16_t (*rte_pdcp_post_p_t)(const struct rte_pdcp_entity *entity,
+				      struct rte_mbuf *in_mb[],
+				      struct rte_mbuf *out_mb[],
+				      uint16_t num, uint16_t *nb_err);
+
+/**
+ * PDCP entity.
+ */
+struct rte_pdcp_entity {
+	/** Entity specific pre-process handle. */
+	rte_pdcp_pre_p_t pre_process;
+	/** Entity specific post-process handle. */
+	rte_pdcp_post_p_t post_process;
+	/**
+	 * PDCP entities may hold packets for purposes of in-order delivery (in
+	 * case of receiving PDCP entity) and re-transmission (in case of
+	 * transmitting PDCP entity).
+	 *
+	 * For receiving PDCP entity, it may hold packets when in-order
+	 * delivery is enabled. The packets would be cached until either a
+	 * packet that completes the sequence arrives or when discard timer
+	 * expires.
+	 *
+	 * When post-processing of PDCP packet which completes a sequence is
+	 * done, the API may return more packets than enqueued. Application is
+	 * expected to provide *rte_pdcp_pkt_post_process()* with *out_mb*
+	 * which can hold maximum number of packets which may be returned.
+	 *
+	 * For transmitting PDCP entity, during re-establishment (5.1.2),
+	 * entity may be required to perform re-transmission of the buffers
+	 * after applying new ciphering & integrity algorithms. For performing
+	 * crypto operation, *rte_pdcp_entity_re_establish()* would return as
+	 * many crypto_ops as the ones cached.
+	 */
+	uint16_t max_pkt_cache;
+	/** User area for saving application data. */
+	uint64_t user_area[2];
+} __rte_cache_aligned;
+
+/**
+ * PDCP entity configuration to be used for establishing an entity.
+ */
+struct rte_pdcp_entity_conf {
+	/** PDCP transform for the entity. */
+	struct rte_security_pdcp_xform pdcp_xfrm;
+	/** Crypto transform applicable for the entity. */
+	struct rte_crypto_sym_xform *crypto_xfrm;
+	/** Mempool for crypto symmetric session. */
+	struct rte_mempool *sess_mpool;
+	/** Crypto op pool.*/
+	struct rte_mempool *cop_pool;
+	/**
+	 * 32 bit count value (HFN + SN) to be used for the first packet.
+	 * pdcp_xfrm.hfn would be ignored as the HFN would be derived from this value.
+	 */
+	uint32_t count;
+	/** Indicate whether the PDCP entity belongs to Side Link Radio Bearer. */
+	bool is_slrb;
+	/** Enable security offload on the device specified. */
+	bool en_sec_offload;
+	/** Enable non-atomic usage of entity. */
+	bool en_non_atomic;
+	/** Device on which security/crypto session need to be created. */
+	uint8_t dev_id;
+	/** Reverse direction during IV generation. Can be used to simulate UE crypto processing.*/
+	bool reverse_iv_direction;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * 5.1.1 PDCP entity establishment
+ *
+ * Establish PDCP entity based on provided input configuration.
+ *
+ * @param conf
+ *   Parameters to be used for initializing PDCP entity object.
+ * @return
+ *   - Valid handle if success
+ *   - NULL in case of failure. rte_errno will be set to error code
+ */
+__rte_experimental
+struct rte_pdcp_entity *
+rte_pdcp_entity_establish(const struct rte_pdcp_entity_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * 5.1.3 PDCP entity release
+ *
+ * Release PDCP entity.
+ *
+ * For UL/transmitting PDCP entity, all stored PDCP SDUs would be dropped.
+ * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned in
+ * *out_mb* buffer. The buffer should be large enough to hold all cached
+ * packets in the entity.
+ *
+ * @param pdcp_entity
+ *   Pointer to the PDCP entity to be released.
+ * @param[out] out_mb
+ *   The address of an array that can hold up to *rte_pdcp_entity.max_pkt_cache*
+ *   pointers to *rte_mbuf* structures.
+ * @return
+ *   -  0: Success and no cached packets to return
+ *   - >0: Success and the number of packets returned in out_mb
+ *   - <0: Error code in case of failures
+ */
+__rte_experimental
+int
+rte_pdcp_entity_release(struct rte_pdcp_entity *pdcp_entity,
+			struct rte_mbuf *out_mb[]);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * 5.1.4 PDCP entity suspend
+ *
+ * Suspend PDCP entity.
+ *
+ * For DL/receiving PDCP entity, the stored PDCP SDUs would be returned in
+ * *out_mb* buffer. The buffer should be large enough to hold all cached
+ * packets in the entity.
+ *
+ * For UL/transmitting PDCP entity, *out_mb* buffer would be unused.
+ *
+ * @param pdcp_entity
+ *   Pointer to the PDCP entity to be suspended.
+ * @param[out] out_mb
+ *   The address of an array that can hold up to *rte_pdcp_entity.max_pkt_cache*
+ *   pointers to *rte_mbuf* structures.
+ * @return
+ *   -  0: Success and no cached packets to return
+ *   - >0: Success and the number of packets returned in out_mb
+ *   - <0: Error code in case of failures
+ */
+__rte_experimental
+int
+rte_pdcp_entity_suspend(struct rte_pdcp_entity *pdcp_entity,
+			struct rte_mbuf *out_mb[]);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * For input mbufs and given PDCP entity pre-process the mbufs and prepare
+ * crypto ops that can be enqueued to the cryptodev associated with given
+ * session. Only error packets would be moved returned in the input buffer,
+ * *mb*, and it is the responsibility of the application to free the same.
+ *
+ * @param entity
+ *   Pointer to the *rte_pdcp_entity* object the packets belong to.
+ * @param[in, out] mb
+ *   The address of an array of *num* pointers to *rte_mbuf* structures
+ *   which contain the input packets. Any error packets would be returned in the
+ *   same buffer.
+ * @param[out] cop
+ *   The address of an array that can hold up to *num* pointers to
+ *   *rte_crypto_op* structures. Crypto ops would be allocated by
+ *   ``rte_pdcp_pkt_pre_process`` API.
+ * @param num
+ *   The maximum number of packets to process.
+ * @param[out] nb_err
+ *   Pointer to return the number of error packets returned in *mb*
+ * @return
+ *   Count of crypto_ops prepared
+ */
+__rte_experimental
+static inline uint16_t
+rte_pdcp_pkt_pre_process(const struct rte_pdcp_entity *entity,
+			 struct rte_mbuf *mb[], struct rte_crypto_op *cop[],
+			 uint16_t num, uint16_t *nb_err)
+{
+	return entity->pre_process(entity, mb, cop, num, nb_err);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * For input mbufs and given PDCP entity, perform PDCP post-processing of the
+ * mbufs.
+ *
+ * Input mbufs are the ones retrieved from crypto_ops dequeued from cryptodev
+ * and grouped by *rte_pdcp_pkt_crypto_group()*.
+ *
+ * The post-processed packets would be returned in the *out_mb* buffer.
+ * The resultant mbufs would be grouped into success packets and error packets.
+ * Error packets would be grouped in the end of the array and it is the
+ * responsibility of the application to handle the same.
+ *
+ * When in-order delivery is enabled, PDCP entity may buffer packets and would
+ * deliver packets only when all prior packets have been post-processed. That
+ * would result in returning more/less packets than enqueued.
+ *
+ * @param entity
+ *   Pointer to the *rte_pdcp_entity* object the packets belong to.
+ * @param in_mb
+ *   The address of an array of *num* pointers to *rte_mbuf* structures.
+ * @param[out] out_mb
+ *   The address of an array of *num* pointers to *rte_mbuf* structures
+ *   to output packets after PDCP post-processing.
+ * @param num
+ *   The maximum number of packets to process.
+ * @param[out] nb_err
+ *   The number of error packets returned in *out_mb* buffer.
+ * @return
+ *   Count of packets returned in *out_mb* buffer.
+ */
+__rte_experimental
+static inline uint16_t
+rte_pdcp_pkt_post_process(const struct rte_pdcp_entity *entity,
+			  struct rte_mbuf *in_mb[],
+			  struct rte_mbuf *out_mb[],
+			  uint16_t num, uint16_t *nb_err)
+{
+	return entity->post_process(entity, in_mb, out_mb, num, nb_err);
+}
+
+#include <rte_pdcp_group.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PDCP_H_ */
diff --git a/lib/pdcp/rte_pdcp_group.h b/lib/pdcp/rte_pdcp_group.h
new file mode 100644
index 0000000000..2c01c19d4e
--- /dev/null
+++ b/lib/pdcp/rte_pdcp_group.h
@@ -0,0 +1,133 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#ifndef _RTE_PDCP_GROUP_H_
+#define _RTE_PDCP_GROUP_H_
+
+/**
+ * @file rte_pdcp_group.h
+ *
+ * RTE PDCP grouping support.
+ * It is not recommended to include this file directly, include <rte_pdcp.h>
+ * instead.
+ * Provides helper functions to process completed crypto-ops and group related
+ * packets by sessions they belong to.
+ */
+
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Group packets belonging to same PDCP entity.
+ */
+struct rte_pdcp_group {
+	union {
+		uint64_t val;
+		void *ptr;
+	} id; /**< Grouped by value */
+	struct rte_mbuf **m;  /**< Start of the group */
+	uint32_t cnt;         /**< Number of entries in the group */
+	int32_t rc;           /**< Status code associated with the group */
+};
+
+/**
+ * Take crypto-op as an input and extract pointer to related PDCP entity.
+ * @param cop
+ *   The address of an input *rte_crypto_op* structure.
+ * @return
+ *   The pointer to the related *rte_pdcp_entity* structure.
+ */
+static inline struct rte_pdcp_entity *
+rte_pdcp_en_from_cop(const struct rte_crypto_op *cop)
+{
+	void *sess = cop->sym[0].session;
+
+	if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		return (struct rte_pdcp_entity *)(uintptr_t)
+			rte_security_session_opaque_data_get(sess);
+	} else if (cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		return (struct rte_pdcp_entity *)(uintptr_t)
+			rte_cryptodev_sym_session_opaque_data_get(sess);
+	}
+
+	return NULL;
+}
+
+/**
+ * Take as input completed crypto ops, extract related mbufs and group them by
+ * *rte_pdcp_entity* they belong to. Mbuf for which the crypto operation has
+ * failed would be flagged using *RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED* flag
+ * in rte_mbuf.ol_flags. The crypto_ops would be freed after the grouping.
+ *
+ * Note that application must ensure only crypto-ops prepared by lib_pdcp is
+ * provided back to @see rte_pdcp_pkt_crypto_group().
+ *
+ * @param cop
+ *   The address of an array of *num* pointers to the input *rte_crypto_op*
+ *   structures.
+ * @param[out] mb
+ *   The address of an array of *num* pointers to output *rte_mbuf* structures.
+ * @param[out] grp
+ *   The address of an array of *num* to output *rte_pdcp_group* structures.
+ * @param num
+ *   The maximum number of crypto-ops to process.
+ * @return
+ *   Number of filled elements in *grp* array.
+ *
+ */
+static inline uint16_t
+rte_pdcp_pkt_crypto_group(struct rte_crypto_op *cop[], struct rte_mbuf *mb[],
+			  struct rte_pdcp_group grp[], uint16_t num)
+{
+	uint32_t i, j = 0, n = 0;
+	void *ns, *ps = NULL;
+	struct rte_mbuf *m;
+
+	for (i = 0; i != num; i++) {
+		m = cop[i]->sym[0].m_src;
+		ns = cop[i]->sym[0].session;
+
+		m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
+		if (cop[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+			m->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+
+		/* Different entity */
+		if (ps != ns) {
+
+			/* Finalize open group and start a new one */
+			if (ps != NULL) {
+				grp[n].cnt = mb + j - grp[n].m;
+				n++;
+			}
+
+			/* Start new group */
+			grp[n].m = mb + j;
+			ps = ns;
+			grp[n].id.ptr =	rte_pdcp_en_from_cop(cop[i]);
+		}
+
+		mb[j++] = m;
+		rte_crypto_op_free(cop[i]);
+	}
+
+	/* Finalize last group */
+	if (ps != NULL) {
+		grp[n].cnt = mb + j - grp[n].m;
+		n++;
+	}
+
+	return n;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PDCP_GROUP_H_ */
diff --git a/lib/pdcp/version.map b/lib/pdcp/version.map
new file mode 100644
index 0000000000..8fa9d5d7cc
--- /dev/null
+++ b/lib/pdcp/version.map
@@ -0,0 +1,13 @@ 
+EXPERIMENTAL {
+	global:
+
+	# added in 22.11
+	rte_pdcp_entity_establish;
+	rte_pdcp_entity_release;
+	rte_pdcp_entity_suspend;
+
+	rte_pdcp_pkt_post_process;
+	rte_pdcp_pkt_pre_process;
+
+	local: *;
+};