[v4,04/10] lib: introduce ipsec library

Message ID 1544804589-10338-4-git-send-email-konstantin.ananyev@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series [v3,1/9] cryptodev: add opaque userdata pointer into crypto sym session |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Ananyev, Konstantin Dec. 14, 2018, 4:23 p.m. UTC
  Introduce librte_ipsec library.
The library is supposed to utilize existing DPDK crypto-dev and
security API to provide application with transparent IPsec processing API.
That initial commit provides some base API to manage
IPsec Security Association (SA) object.

Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
---
 MAINTAINERS                            |   5 +
 config/common_base                     |   5 +
 lib/Makefile                           |   2 +
 lib/librte_ipsec/Makefile              |  24 ++
 lib/librte_ipsec/ipsec_sqn.h           |  48 ++++
 lib/librte_ipsec/meson.build           |  10 +
 lib/librte_ipsec/rte_ipsec_sa.h        | 139 +++++++++++
 lib/librte_ipsec/rte_ipsec_version.map |  10 +
 lib/librte_ipsec/sa.c                  | 327 +++++++++++++++++++++++++
 lib/librte_ipsec/sa.h                  |  77 ++++++
 lib/meson.build                        |   2 +
 mk/rte.app.mk                          |   2 +
 12 files changed, 651 insertions(+)
 create mode 100644 lib/librte_ipsec/Makefile
 create mode 100644 lib/librte_ipsec/ipsec_sqn.h
 create mode 100644 lib/librte_ipsec/meson.build
 create mode 100644 lib/librte_ipsec/rte_ipsec_sa.h
 create mode 100644 lib/librte_ipsec/rte_ipsec_version.map
 create mode 100644 lib/librte_ipsec/sa.c
 create mode 100644 lib/librte_ipsec/sa.h
  

Comments

Akhil Goyal Dec. 19, 2018, 12:08 p.m. UTC | #1
On 12/14/2018 9:53 PM, Konstantin Ananyev wrote:
> Introduce librte_ipsec library.
> The library is supposed to utilize existing DPDK crypto-dev and
> security API to provide application with transparent IPsec processing API.
> That initial commit provides some base API to manage
> IPsec Security Association (SA) object.
>
> Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Declan Doherty <declan.doherty@intel.com>
> ---
>   MAINTAINERS                            |   5 +
>   config/common_base                     |   5 +
>   lib/Makefile                           |   2 +
>   lib/librte_ipsec/Makefile              |  24 ++
>   lib/librte_ipsec/ipsec_sqn.h           |  48 ++++
>   lib/librte_ipsec/meson.build           |  10 +
>   lib/librte_ipsec/rte_ipsec_sa.h        | 139 +++++++++++
>   lib/librte_ipsec/rte_ipsec_version.map |  10 +
>   lib/librte_ipsec/sa.c                  | 327 +++++++++++++++++++++++++
>   lib/librte_ipsec/sa.h                  |  77 ++++++
>   lib/meson.build                        |   2 +
>   mk/rte.app.mk                          |   2 +
>   12 files changed, 651 insertions(+)
>   create mode 100644 lib/librte_ipsec/Makefile
>   create mode 100644 lib/librte_ipsec/ipsec_sqn.h
>   create mode 100644 lib/librte_ipsec/meson.build
>   create mode 100644 lib/librte_ipsec/rte_ipsec_sa.h
>   create mode 100644 lib/librte_ipsec/rte_ipsec_version.map
>   create mode 100644 lib/librte_ipsec/sa.c
>   create mode 100644 lib/librte_ipsec/sa.h
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 71ba31208..3cf0a84a2 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1071,6 +1071,11 @@ F: doc/guides/prog_guide/pdump_lib.rst
>   F: app/pdump/
>   F: doc/guides/tools/pdump.rst
>   
> +IPsec - EXPERIMENTAL
> +M: Konstantin Ananyev <konstantin.ananyev@intel.com>
> +F: lib/librte_ipsec/
> +M: Bernard Iremonger <bernard.iremonger@intel.com>
> +F: test/test/test_ipsec.c
>   
Please add "T: git://dpdk.org/next/dpdk-next-crypto" as it would be 
maintained in crypto sub tree in future.
>   Packet Framework
>   ----------------
> diff --git a/config/common_base b/config/common_base
> index d12ae98bc..32499d772 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -925,6 +925,11 @@ CONFIG_RTE_LIBRTE_BPF=y
>   # allow load BPF from ELF files (requires libelf)
>   CONFIG_RTE_LIBRTE_BPF_ELF=n
>   
> +#
> +# Compile librte_ipsec
> +#
> +CONFIG_RTE_LIBRTE_IPSEC=y
> +
>   #
>   # Compile the test application
>   #
> diff --git a/lib/Makefile b/lib/Makefile
> index b7370ef97..5dc774604 100644
> --- a/lib/Makefile
> +++ b/lib/Makefile
> @@ -106,6 +106,8 @@ DEPDIRS-librte_gso := librte_eal librte_mbuf librte_ethdev librte_net
>   DEPDIRS-librte_gso += librte_mempool
>   DIRS-$(CONFIG_RTE_LIBRTE_BPF) += librte_bpf
>   DEPDIRS-librte_bpf := librte_eal librte_mempool librte_mbuf librte_ethdev
> +DIRS-$(CONFIG_RTE_LIBRTE_IPSEC) += librte_ipsec
> +DEPDIRS-librte_ipsec := librte_eal librte_mbuf librte_cryptodev librte_security
>   DIRS-$(CONFIG_RTE_LIBRTE_TELEMETRY) += librte_telemetry
>   DEPDIRS-librte_telemetry := librte_eal librte_metrics librte_ethdev
>   
> diff --git a/lib/librte_ipsec/Makefile b/lib/librte_ipsec/Makefile
> new file mode 100644
> index 000000000..7758dcc6d
> --- /dev/null
> +++ b/lib/librte_ipsec/Makefile
> @@ -0,0 +1,24 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2018 Intel Corporation
> +
> +include $(RTE_SDK)/mk/rte.vars.mk
> +
> +# library name
> +LIB = librte_ipsec.a
> +
> +CFLAGS += -O3
> +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
> +CFLAGS += -DALLOW_EXPERIMENTAL_API
> +LDLIBS += -lrte_eal -lrte_mbuf -lrte_cryptodev -lrte_security
> +
> +EXPORT_MAP := rte_ipsec_version.map
> +
> +LIBABIVER := 1
> +
> +# all source are stored in SRCS-y
> +SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
> +
> +# install header files
> +SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sa.h
> +
> +include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/lib/librte_ipsec/ipsec_sqn.h b/lib/librte_ipsec/ipsec_sqn.h
> new file mode 100644
> index 000000000..1935f6e30
> --- /dev/null
> +++ b/lib/librte_ipsec/ipsec_sqn.h
> @@ -0,0 +1,48 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation
> + */
> +
> +#ifndef _IPSEC_SQN_H_
> +#define _IPSEC_SQN_H_
> +
> +#define WINDOW_BUCKET_BITS		6 /* uint64_t */
> +#define WINDOW_BUCKET_SIZE		(1 << WINDOW_BUCKET_BITS)
> +#define WINDOW_BIT_LOC_MASK		(WINDOW_BUCKET_SIZE - 1)
> +
> +/* minimum number of bucket, power of 2*/
> +#define WINDOW_BUCKET_MIN		2
> +#define WINDOW_BUCKET_MAX		(INT16_MAX + 1)
> +
> +#define IS_ESN(sa)	((sa)->sqn_mask == UINT64_MAX)
> +
> +/*
> + * for given size, calculate required number of buckets.
> + */
> +static uint32_t
> +replay_num_bucket(uint32_t wsz)
> +{
> +	uint32_t nb;
> +
> +	nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
> +		WINDOW_BUCKET_SIZE);
> +	nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
> +
> +	return nb;
> +}
> +
> +/**
> + * Based on number of buckets calculated required size for the
> + * structure that holds replay window and sequence number (RSN) information.
> + */
> +static size_t
> +rsn_size(uint32_t nb_bucket)
> +{
> +	size_t sz;
> +	struct replay_sqn *rsn;
> +
> +	sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
> +	sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
> +	return sz;
> +}
> +
> +#endif /* _IPSEC_SQN_H_ */
> diff --git a/lib/librte_ipsec/meson.build b/lib/librte_ipsec/meson.build
> new file mode 100644
> index 000000000..52c78eaeb
> --- /dev/null
> +++ b/lib/librte_ipsec/meson.build
> @@ -0,0 +1,10 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2018 Intel Corporation
> +
> +allow_experimental_apis = true
> +
> +sources=files('sa.c')
> +
> +install_headers = files('rte_ipsec_sa.h')
> +
> +deps += ['mbuf', 'net', 'cryptodev', 'security']
we need net in meson and not in Makefile ?
> diff --git a/lib/librte_ipsec/rte_ipsec_sa.h b/lib/librte_ipsec/rte_ipsec_sa.h
> new file mode 100644
> index 000000000..4e36fd99b
> --- /dev/null
> +++ b/lib/librte_ipsec/rte_ipsec_sa.h
> @@ -0,0 +1,139 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation
> + */
> +
> +#ifndef _RTE_IPSEC_SA_H_
> +#define _RTE_IPSEC_SA_H_
> +
> +/**
> + * @file rte_ipsec_sa.h
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Defines API to manage IPsec Security Association (SA) objects.
> + */
> +
> +#include <rte_common.h>
> +#include <rte_cryptodev.h>
> +#include <rte_security.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * An opaque structure to represent Security Association (SA).
> + */
> +struct rte_ipsec_sa;
> +
> +/**
> + * SA initialization parameters.
> + */
> +struct rte_ipsec_sa_prm {
> +
> +	uint64_t userdata; /**< provided and interpreted by user */
> +	uint64_t flags;  /**< see RTE_IPSEC_SAFLAG_* below */
> +	/** ipsec configuration */
> +	struct rte_security_ipsec_xform ipsec_xform;
> +	struct rte_crypto_sym_xform *crypto_xform;
comment missing
> +	union {
> +		struct {
> +			uint8_t hdr_len;     /**< tunnel header len */
> +			uint8_t hdr_l3_off;  /**< offset for IPv4/IPv6 header */
> +			uint8_t next_proto;  /**< next header protocol */
> +			const void *hdr;     /**< tunnel header template */
> +		} tun; /**< tunnel mode repated parameters */
spell check
> +		struct {
> +			uint8_t proto;  /**< next header protocol */
> +		} trs; /**< transport mode repated parameters */
spell check
> +	};
> +
> +	uint32_t replay_win_sz;
> +	/**< window size to enable sequence replay attack handling.
> +	 * Replay checking is disabled if the window size is 0.
> +	 */
As per discussions on ML, comments shall either be before the param or 
it can be in the same line as param and not in next line. Please check 
in rest of the patch as well.
> +};
> +
> +/**
> + * SA type is an 64-bit value that contain the following information:
> + * - IP version (IPv4/IPv6)
> + * - IPsec proto (ESP/AH)
> + * - inbound/outbound
> + * - mode (TRANSPORT/TUNNEL)
> + * - for TUNNEL outer IP version (IPv4/IPv6)
> + * ...
> + */
> +
> +enum {
> +	RTE_SATP_LOG_IPV,
> +	RTE_SATP_LOG_PROTO,
> +	RTE_SATP_LOG_DIR,
> +	RTE_SATP_LOG_MODE,
> +	RTE_SATP_LOG_NUM
> +};
what is the significance of LOG here.
> +
> +#define RTE_IPSEC_SATP_IPV_MASK		(1ULL << RTE_SATP_LOG_IPV)
> +#define RTE_IPSEC_SATP_IPV4		(0ULL << RTE_SATP_LOG_IPV)
> +#define RTE_IPSEC_SATP_IPV6		(1ULL << RTE_SATP_LOG_IPV)
> +
> +#define RTE_IPSEC_SATP_PROTO_MASK	(1ULL << RTE_SATP_LOG_PROTO)
> +#define RTE_IPSEC_SATP_PROTO_AH		(0ULL << RTE_SATP_LOG_PROTO)
> +#define RTE_IPSEC_SATP_PROTO_ESP	(1ULL << RTE_SATP_LOG_PROTO)
> +
> +#define RTE_IPSEC_SATP_DIR_MASK		(1ULL << RTE_SATP_LOG_DIR)
> +#define RTE_IPSEC_SATP_DIR_IB		(0ULL << RTE_SATP_LOG_DIR)
> +#define RTE_IPSEC_SATP_DIR_OB		(1ULL << RTE_SATP_LOG_DIR)
> +
> +#define RTE_IPSEC_SATP_MODE_MASK	(3ULL << RTE_SATP_LOG_MODE)
> +#define RTE_IPSEC_SATP_MODE_TRANS	(0ULL << RTE_SATP_LOG_MODE)
> +#define RTE_IPSEC_SATP_MODE_TUNLV4	(1ULL << RTE_SATP_LOG_MODE)
> +#define RTE_IPSEC_SATP_MODE_TUNLV6	(2ULL << RTE_SATP_LOG_MODE)
> +
> +/**
> + * get type of given SA
> + * @return
> + *   SA type value.
> + */
> +uint64_t __rte_experimental
> +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
> +
> +/**
> + * Calculate requied SA size based on provided input parameters.
spell check
> + * @param prm
> + *   Parameters that wil be used to initialise SA object.
> + * @return
> + *   - Actual size required for SA with given parameters.
> + *   - -EINVAL if the parameters are invalid.
> + */
> +int __rte_experimental
> +rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm);
> +
> +/**
> + * initialise SA based on provided input parameters.
> + * @param sa
> + *   SA object to initialise.
> + * @param prm
> + *   Parameters used to initialise given SA object.
> + * @param size
> + *   size of the provided buffer for SA.
> + * @return
> + *   - Actual size of SA object if operation completed successfully.
> + *   - -EINVAL if the parameters are invalid.
> + *   - -ENOSPC if the size of the provided buffer is not big enough.
> + */
> +int __rte_experimental
> +rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
> +	uint32_t size);
> +
> +/**
> + * cleanup SA
> + * @param sa
> + *   Pointer to SA object to de-initialize.
> + */
> +void __rte_experimental
> +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_IPSEC_SA_H_ */
> diff --git a/lib/librte_ipsec/rte_ipsec_version.map b/lib/librte_ipsec/rte_ipsec_version.map
> new file mode 100644
> index 000000000..1a66726b8
> --- /dev/null
> +++ b/lib/librte_ipsec/rte_ipsec_version.map
> @@ -0,0 +1,10 @@
> +EXPERIMENTAL {
> +	global:
> +
> +	rte_ipsec_sa_fini;
> +	rte_ipsec_sa_init;
> +	rte_ipsec_sa_size;
> +	rte_ipsec_sa_type;
> +
> +	local: *;
> +};
> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> new file mode 100644
> index 000000000..f927a82bf
> --- /dev/null
> +++ b/lib/librte_ipsec/sa.c
> @@ -0,0 +1,327 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation
> + */
> +
> +#include <rte_ipsec_sa.h>
> +#include <rte_esp.h>
> +#include <rte_ip.h>
> +#include <rte_errno.h>
> +
> +#include "sa.h"
> +#include "ipsec_sqn.h"
> +
> +/* some helper structures */
> +struct crypto_xform {
> +	struct rte_crypto_auth_xform *auth;
> +	struct rte_crypto_cipher_xform *cipher;
> +	struct rte_crypto_aead_xform *aead;
> +};
shouldn't this be union as aead cannot be with cipher and auth cases.

extra line
> +
> +
> +static int
> +check_crypto_xform(struct crypto_xform *xform)
> +{
> +	uintptr_t p;
> +
> +	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
what is the intent of this?
> +
> +	/* either aead or both auth and cipher should be not NULLs */
> +	if (xform->aead) {
> +		if (p)
> +			return -EINVAL;
> +	} else if (p == (uintptr_t)xform->auth) {
> +		return -EINVAL;
> +	}
This function does not look good. It will miss the case of cipher only
> +
> +	return 0;
> +}
> +
> +static int
> +fill_crypto_xform(struct crypto_xform *xform,
> +	const struct rte_ipsec_sa_prm *prm)
> +{
> +	struct rte_crypto_sym_xform *xf;
> +
> +	memset(xform, 0, sizeof(*xform));
> +
> +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
> +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +			if (xform->auth != NULL)
> +				return -EINVAL;
> +			xform->auth = &xf->auth;
> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +			if (xform->cipher != NULL)
> +				return -EINVAL;
> +			xform->cipher = &xf->cipher;
> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +			if (xform->aead != NULL)
> +				return -EINVAL;
> +			xform->aead = &xf->aead;
> +		} else
> +			return -EINVAL;
> +	}
> +
> +	return check_crypto_xform(xform);
> +}
how is this function handling the inbound and outbound cases.
In inbound first xform is auth and then cipher.
In outbound first is cipher and then auth. I think this should be 
checked in the lib.
Here for loop should not be there, as there would be at max only 2 xforms.
> +
> +uint64_t __rte_experimental
> +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
> +{
> +	return sa->type;
> +}
> +
> +static int32_t
> +ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
> +{
> +	uint32_t n, sz;
> +
> +	n = 0;
> +	if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
> +			RTE_IPSEC_SATP_DIR_IB)
> +		n = replay_num_bucket(wsz);
> +
> +	if (n > WINDOW_BUCKET_MAX)
> +		return -EINVAL;
> +
> +	*nb_bucket = n;
> +
> +	sz = rsn_size(n);
> +	sz += sizeof(struct rte_ipsec_sa);
> +	return sz;
> +}
> +
> +void __rte_experimental
> +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
> +{
> +	memset(sa, 0, sa->size);
> +}
Where is the memory of "sa" getting initialized?
> +
> +static int
> +fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
> +{
> +	uint64_t tp;
> +
> +	tp = 0;
> +
> +	if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
> +		tp |= RTE_IPSEC_SATP_PROTO_AH;
> +	else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
> +		tp |= RTE_IPSEC_SATP_PROTO_ESP;
> +	else
> +		return -EINVAL;
> +
> +	if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
> +		tp |= RTE_IPSEC_SATP_DIR_OB;
> +	else if (prm->ipsec_xform.direction ==
> +			RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
> +		tp |= RTE_IPSEC_SATP_DIR_IB;
> +	else
> +		return -EINVAL;
> +
> +	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
> +		if (prm->ipsec_xform.tunnel.type ==
> +				RTE_SECURITY_IPSEC_TUNNEL_IPV4)
> +			tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
> +		else if (prm->ipsec_xform.tunnel.type ==
> +				RTE_SECURITY_IPSEC_TUNNEL_IPV6)
> +			tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
> +		else
> +			return -EINVAL;
> +
> +		if (prm->tun.next_proto == IPPROTO_IPIP)
> +			tp |= RTE_IPSEC_SATP_IPV4;
> +		else if (prm->tun.next_proto == IPPROTO_IPV6)
> +			tp |= RTE_IPSEC_SATP_IPV6;
> +		else
> +			return -EINVAL;
> +	} else if (prm->ipsec_xform.mode ==
> +			RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
> +		tp |= RTE_IPSEC_SATP_MODE_TRANS;
> +		if (prm->trs.proto == IPPROTO_IPIP)
> +			tp |= RTE_IPSEC_SATP_IPV4;
> +		else if (prm->trs.proto == IPPROTO_IPV6)
> +			tp |= RTE_IPSEC_SATP_IPV6;
> +		else
> +			return -EINVAL;
> +	} else
> +		return -EINVAL;
> +
> +	*type = tp;
> +	return 0;
> +}
> +
> +static void
> +esp_inb_init(struct rte_ipsec_sa *sa)
> +{
> +	/* these params may differ with new algorithms support */
> +	sa->ctp.auth.offset = 0;
> +	sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
> +	sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
> +	sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
> +}
> +
> +static void
> +esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
> +{
> +	sa->proto = prm->tun.next_proto;
> +	esp_inb_init(sa);
> +}
> +
> +static void
> +esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
> +{
> +	sa->sqn.outb = 1;
> +
> +	/* these params may differ with new algorithms support */
> +	sa->ctp.auth.offset = hlen;
> +	sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
> +	if (sa->aad_len != 0) {
> +		sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
> +			sa->iv_len;
> +		sa->ctp.cipher.length = 0;
> +	} else {
> +		sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
> +		sa->ctp.cipher.length = sa->iv_len;
> +	}
> +}
> +
> +static void
> +esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
> +{
> +	sa->proto = prm->tun.next_proto;
> +	sa->hdr_len = prm->tun.hdr_len;
> +	sa->hdr_l3_off = prm->tun.hdr_l3_off;
> +	memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
> +
> +	esp_outb_init(sa, sa->hdr_len);
> +}
> +
> +static int
> +esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
> +	const struct crypto_xform *cxf)
> +{
> +	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
> +				RTE_IPSEC_SATP_MODE_MASK;
> +
> +	if (cxf->aead != NULL) {
> +		/* RFC 4106 */
> +		if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
> +			return -EINVAL;
> +		sa->icv_len = cxf->aead->digest_length;
> +		sa->iv_ofs = cxf->aead->iv.offset;
> +		sa->iv_len = sizeof(uint64_t);
> +		sa->pad_align = 4;
hard coding ??
> +	} else {
> +		sa->icv_len = cxf->auth->digest_length;
> +		sa->iv_ofs = cxf->cipher->iv.offset;
> +		sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
> +		if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
> +			sa->pad_align = 4;
> +			sa->iv_len = 0;
> +		} else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
> +			sa->pad_align = IPSEC_MAX_IV_SIZE;
> +			sa->iv_len = IPSEC_MAX_IV_SIZE;
> +		} else
> +			return -EINVAL;
> +	}
> +
> +	sa->udata = prm->userdata;
> +	sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
> +	sa->salt = prm->ipsec_xform.salt;
> +
> +	switch (sa->type & msk) {
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
> +		esp_inb_tun_init(sa, prm);
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
> +		esp_inb_init(sa);
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
> +		esp_outb_tun_init(sa, prm);
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
> +		esp_outb_init(sa, 0);
> +		break;
> +	}
> +
> +	return 0;
> +}
> +
> +int __rte_experimental
> +rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
> +{
> +	uint64_t type;
> +	uint32_t nb;
> +	int32_t rc;
> +
> +	if (prm == NULL)
> +		return -EINVAL;
> +
> +	/* determine SA type */
> +	rc = fill_sa_type(prm, &type);
> +	if (rc != 0)
> +		return rc;
> +
> +	/* determine required size */
> +	return ipsec_sa_size(prm->replay_win_sz, type, &nb);
> +}
> +
> +int __rte_experimental
> +rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
> +	uint32_t size)
> +{
> +	int32_t rc, sz;
> +	uint32_t nb;
> +	uint64_t type;
> +	struct crypto_xform cxf;
> +
> +	if (sa == NULL || prm == NULL)
> +		return -EINVAL;
> +
> +	/* determine SA type */
> +	rc = fill_sa_type(prm, &type);
> +	if (rc != 0)
> +		return rc;
> +
> +	/* determine required size */
> +	sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
> +	if (sz < 0)
> +		return sz;
> +	else if (size < (uint32_t)sz)
> +		return -ENOSPC;
> +
> +	/* only esp is supported right now */
> +	if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
> +		return -EINVAL;
> +
> +	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> +			prm->tun.hdr_len > sizeof(sa->hdr))
> +		return -EINVAL;
> +
> +	rc = fill_crypto_xform(&cxf, prm);
> +	if (rc != 0)
> +		return rc;
> +
> +	sa->type = type;
> +	sa->size = sz;
> +
> +	/* check for ESN flag */
> +	sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
> +		UINT32_MAX : UINT64_MAX;
> +
> +	rc = esp_sa_init(sa, prm, &cxf);
> +	if (rc != 0)
> +		rte_ipsec_sa_fini(sa);
> +
> +	/* fill replay window related fields */
> +	if (nb != 0) {
move this where nb is getting updated.
> +		sa->replay.win_sz = prm->replay_win_sz;
> +		sa->replay.nb_bucket = nb;
> +		sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
> +		sa->sqn.inb = (struct replay_sqn *)(sa + 1);
> +	}
> +
> +	return sz;
> +}
> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
> new file mode 100644
> index 000000000..5d113891a
> --- /dev/null
> +++ b/lib/librte_ipsec/sa.h
> @@ -0,0 +1,77 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation
> + */
> +
> +#ifndef _SA_H_
> +#define _SA_H_
> +
> +#define IPSEC_MAX_HDR_SIZE	64
> +#define IPSEC_MAX_IV_SIZE	16
> +#define IPSEC_MAX_IV_QWORD	(IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
> +
> +/* these definitions probably has to be in rte_crypto_sym.h */
> +union sym_op_ofslen {
> +	uint64_t raw;
> +	struct {
> +		uint32_t offset;
> +		uint32_t length;
> +	};
> +};
These are already there in rte_crypto_sym_op. What is the need to 
redefine it.
offset and length can change on per packet basis, it cannot be at init 
time and on runtime you would have sym_op
> +
> +union sym_op_data {
> +#ifdef __SIZEOF_INT128__
> +	__uint128_t raw;
> +#endif
> +	struct {
> +		uint8_t *va;
> +		rte_iova_t pa;
> +	};
> +};
rte_crypto_sym_op has all this information I guess(in mbuf)
> +
> +struct replay_sqn {
> +	uint64_t sqn;
> +	__extension__ uint64_t window[0];
> +};
> +
> +struct rte_ipsec_sa {
> +	uint64_t type;     /* type of given SA */
> +	uint64_t udata;    /* user defined */
> +	uint32_t size;     /* size of given sa object */
> +	uint32_t spi;
> +	/* sqn calculations related */
> +	uint64_t sqn_mask;
> +	struct {
> +		uint32_t win_sz;
> +		uint16_t nb_bucket;
> +		uint16_t bucket_index_mask;
> +	} replay;
> +	/* template for crypto op fields */
> +	struct {
> +		union sym_op_ofslen cipher;
> +		union sym_op_ofslen auth;
> +	} ctp;
> +	uint32_t salt;
> +	uint8_t proto;    /* next proto */
> +	uint8_t aad_len;
> +	uint8_t hdr_len;
> +	uint8_t hdr_l3_off;
> +	uint8_t icv_len;
> +	uint8_t sqh_len;
> +	uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
> +	uint8_t iv_len;
> +	uint8_t pad_align;
> +
> +	/* template for tunnel header */
> +	uint8_t hdr[IPSEC_MAX_HDR_SIZE];
> +
> +	/*
> +	 * sqn and replay window
> +	 */
> +	union {
> +		uint64_t outb;
> +		struct replay_sqn *inb;
> +	} sqn;
> +
> +} __rte_cache_aligned;
> +
remove  extra lines
> +#endif /* _SA_H_ */
> diff --git a/lib/meson.build b/lib/meson.build
> index bb7f443f9..69684ef14 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -22,6 +22,8 @@ libraries = [ 'compat', # just a header, used for versioning
>   	'kni', 'latencystats', 'lpm', 'member',
>   	'meter', 'power', 'pdump', 'rawdev',
>   	'reorder', 'sched', 'security', 'vhost',
> +	#ipsec lib depends on crypto and security
> +	'ipsec',
>   	# add pkt framework libs which use other libs from above
>   	'port', 'table', 'pipeline',
>   	# flow_classify lib depends on pkt framework table lib
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index 5699d979d..f4cd75252 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -67,6 +67,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_BPF_ELF),y)
>   _LDLIBS-$(CONFIG_RTE_LIBRTE_BPF)            += -lelf
>   endif
>   
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_IPSEC)            += -lrte_ipsec
> +
>   _LDLIBS-y += --whole-archive
>   
>   _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE)        += -lrte_cfgfile
  
Thomas Monjalon Dec. 19, 2018, 12:39 p.m. UTC | #2
19/12/2018 13:08, Akhil Goyal:
> On 12/14/2018 9:53 PM, Konstantin Ananyev wrote:
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -1071,6 +1071,11 @@ F: doc/guides/prog_guide/pdump_lib.rst
> >   F: app/pdump/
> >   F: doc/guides/tools/pdump.rst
> >   
> > +IPsec - EXPERIMENTAL
> > +M: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > +F: lib/librte_ipsec/
> > +M: Bernard Iremonger <bernard.iremonger@intel.com>
> > +F: test/test/test_ipsec.c
> >   
> Please add "T: git://dpdk.org/next/dpdk-next-crypto" as it would be 
> maintained in crypto sub tree in future.

Right

And for keeping a logical order, please move it after IP frag and GRO/GSO.
  
Ananyev, Konstantin Dec. 20, 2018, 2:06 p.m. UTC | #3
> > diff --git a/lib/librte_ipsec/meson.build b/lib/librte_ipsec/meson.build
> > new file mode 100644
> > index 000000000..52c78eaeb
> > --- /dev/null
> > +++ b/lib/librte_ipsec/meson.build
> > @@ -0,0 +1,10 @@
> > +# SPDX-License-Identifier: BSD-3-Clause
> > +# Copyright(c) 2018 Intel Corporation
> > +
> > +allow_experimental_apis = true
> > +
> > +sources=files('sa.c')
> > +
> > +install_headers = files('rte_ipsec_sa.h')
> > +
> > +deps += ['mbuf', 'net', 'cryptodev', 'security']
> we need net in meson and not in Makefile ?

I suppose we need it both, will update.

> > +
> > +enum {
> > +	RTE_SATP_LOG_IPV,
> > +	RTE_SATP_LOG_PROTO,
> > +	RTE_SATP_LOG_DIR,
> > +	RTE_SATP_LOG_MODE,
> > +	RTE_SATP_LOG_NUM
> > +};
> what is the significance of LOG here.

_LOG_ is for logarithm of 2 here.

> 
> > diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> > new file mode 100644
> > index 000000000..f927a82bf
> > --- /dev/null
> > +++ b/lib/librte_ipsec/sa.c
> > @@ -0,0 +1,327 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2018 Intel Corporation
> > + */
> > +
> > +#include <rte_ipsec_sa.h>
> > +#include <rte_esp.h>
> > +#include <rte_ip.h>
> > +#include <rte_errno.h>
> > +
> > +#include "sa.h"
> > +#include "ipsec_sqn.h"
> > +
> > +/* some helper structures */
> > +struct crypto_xform {
> > +	struct rte_crypto_auth_xform *auth;
> > +	struct rte_crypto_cipher_xform *cipher;
> > +	struct rte_crypto_aead_xform *aead;
> > +};
> shouldn't this be union as aead cannot be with cipher and auth cases.

That's used internally to collect/analyze xforms provided by prm->crypto_xform.


> 
> extra line
> > +
> > +
> > +static int
> > +check_crypto_xform(struct crypto_xform *xform)
> > +{
> > +	uintptr_t p;
> > +
> > +	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
> what is the intent of this?

It is used below to check that if aead is present both cipher and auth
are  not.

> > +
> > +	/* either aead or both auth and cipher should be not NULLs */
> > +	if (xform->aead) {
> > +		if (p)
> > +			return -EINVAL;
> > +	} else if (p == (uintptr_t)xform->auth) {
> > +		return -EINVAL;
> > +	}
> This function does not look good. It will miss the case of cipher only

Cipher only is not supported right now and  I am not aware about plans
to support it in future.
If someone would like to add cipher onl,then yes he/she probably would
have to update this function.

> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +fill_crypto_xform(struct crypto_xform *xform,
> > +	const struct rte_ipsec_sa_prm *prm)
> > +{
> > +	struct rte_crypto_sym_xform *xf;
> > +
> > +	memset(xform, 0, sizeof(*xform));
> > +
> > +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
> > +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> > +			if (xform->auth != NULL)
> > +				return -EINVAL;
> > +			xform->auth = &xf->auth;
> > +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> > +			if (xform->cipher != NULL)
> > +				return -EINVAL;
> > +			xform->cipher = &xf->cipher;
> > +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > +			if (xform->aead != NULL)
> > +				return -EINVAL;
> > +			xform->aead = &xf->aead;
> > +		} else
> > +			return -EINVAL;
> > +	}
> > +
> > +	return check_crypto_xform(xform);
> > +}
> how is this function handling the inbound and outbound cases.
> In inbound first xform is auth and then cipher.
> In outbound first is cipher and then auth. I think this should be
> checked in the lib.

Interesting, I didn't know about such limitation.
My understanding was that the any order (<auth,cipher>, <cipher,auth>)
for both inbound and outbound is acceptable.
Is that order restriction is documented somewhere?

> Here for loop should not be there, as there would be at max only 2 xforms.
> > +
> > +uint64_t __rte_experimental
> > +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
> > +{
> > +	return sa->type;
> > +}
> > +
> > +static int32_t
> > +ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
> > +{
> > +	uint32_t n, sz;
> > +
> > +	n = 0;
> > +	if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
> > +			RTE_IPSEC_SATP_DIR_IB)
> > +		n = replay_num_bucket(wsz);
> > +
> > +	if (n > WINDOW_BUCKET_MAX)
> > +		return -EINVAL;
> > +
> > +	*nb_bucket = n;
> > +
> > +	sz = rsn_size(n);
> > +	sz += sizeof(struct rte_ipsec_sa);
> > +	return sz;
> > +}
> > +
> > +void __rte_experimental
> > +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
> > +{
> > +	memset(sa, 0, sa->size);
> > +}
> Where is the memory of "sa" getting initialized?

Not sure I understand your question...
Do you mean we missed memset(sa, 0, size)
in rte_ipsec_sa_init()?

> > +
> > +static int
> > +esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
> > +	const struct crypto_xform *cxf)
> > +{
> > +	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
> > +				RTE_IPSEC_SATP_MODE_MASK;
> > +
> > +	if (cxf->aead != NULL) {
> > +		/* RFC 4106 */
> > +		if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
> > +			return -EINVAL;
> > +		sa->icv_len = cxf->aead->digest_length;
> > +		sa->iv_ofs = cxf->aead->iv.offset;
> > +		sa->iv_len = sizeof(uint64_t);
> > +		sa->pad_align = 4;
> hard coding ??

Will add some define or enum.


> > +	} else {
> > +		sa->icv_len = cxf->auth->digest_length;
> > +		sa->iv_ofs = cxf->cipher->iv.offset;
> > +		sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
> > +		if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
> > +			sa->pad_align = 4;
> > +			sa->iv_len = 0;
> > +		} else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
> > +			sa->pad_align = IPSEC_MAX_IV_SIZE;
> > +			sa->iv_len = IPSEC_MAX_IV_SIZE;
> > +		} else
> > +			return -EINVAL;
> > +	}
> > +


> > +
> > +int __rte_experimental
> > +rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
> > +	uint32_t size)
> > +{
> > +	int32_t rc, sz;
> > +	uint32_t nb;
> > +	uint64_t type;
> > +	struct crypto_xform cxf;
> > +
> > +	if (sa == NULL || prm == NULL)
> > +		return -EINVAL;
> > +
> > +	/* determine SA type */
> > +	rc = fill_sa_type(prm, &type);
> > +	if (rc != 0)
> > +		return rc;
> > +
> > +	/* determine required size */
> > +	sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
> > +	if (sz < 0)
> > +		return sz;
> > +	else if (size < (uint32_t)sz)
> > +		return -ENOSPC;
> > +
> > +	/* only esp is supported right now */
> > +	if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
> > +		return -EINVAL;
> > +
> > +	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> > +			prm->tun.hdr_len > sizeof(sa->hdr))
> > +		return -EINVAL;
> > +
> > +	rc = fill_crypto_xform(&cxf, prm);
> > +	if (rc != 0)
> > +		return rc;
> > +
> > +	sa->type = type;
> > +	sa->size = sz;
> > +
> > +	/* check for ESN flag */
> > +	sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
> > +		UINT32_MAX : UINT64_MAX;
> > +
> > +	rc = esp_sa_init(sa, prm, &cxf);
> > +	if (rc != 0)
> > +		rte_ipsec_sa_fini(sa);
> > +
> > +	/* fill replay window related fields */
> > +	if (nb != 0) {
> move this where nb is getting updated.

I don't think it is a good idea.
We calulate nb first and required sa size first without updating provided memory buffer.
If the buffer is not big enough, will return an error without updating the buffer.
Cleaner and safer to keep it as it is.

> > +		sa->replay.win_sz = prm->replay_win_sz;
> > +		sa->replay.nb_bucket = nb;
> > +		sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
> > +		sa->sqn.inb = (struct replay_sqn *)(sa + 1);
> > +	}
> > +
> > +	return sz;
> > +}
  
Thomas Monjalon Dec. 20, 2018, 2:14 p.m. UTC | #4
20/12/2018 15:06, Ananyev, Konstantin:
> > > +enum {
> > > +	RTE_SATP_LOG_IPV,
> > > +	RTE_SATP_LOG_PROTO,
> > > +	RTE_SATP_LOG_DIR,
> > > +	RTE_SATP_LOG_MODE,
> > > +	RTE_SATP_LOG_NUM
> > > +};
> > what is the significance of LOG here.
> 
> _LOG_ is for logarithm of 2 here.

_LOG2_ ?
  
Ananyev, Konstantin Dec. 20, 2018, 2:26 p.m. UTC | #5
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Thursday, December 20, 2018 2:14 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: Akhil Goyal <akhil.goyal@nxp.com>; dev@dpdk.org; Awal, Mohammad Abdul <mohammad.abdul.awal@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v4 04/10] lib: introduce ipsec library
> 
> 20/12/2018 15:06, Ananyev, Konstantin:
> > > > +enum {
> > > > +	RTE_SATP_LOG_IPV,
> > > > +	RTE_SATP_LOG_PROTO,
> > > > +	RTE_SATP_LOG_DIR,
> > > > +	RTE_SATP_LOG_MODE,
> > > > +	RTE_SATP_LOG_NUM
> > > > +};
> > > what is the significance of LOG here.
> >
> > _LOG_ is for logarithm of 2 here.
> 
> _LOG2_ ?
> 

Ok, will update.
  
Ananyev, Konstantin Dec. 20, 2018, 6:17 p.m. UTC | #6
> > > +
> > > +static int
> > > +fill_crypto_xform(struct crypto_xform *xform,
> > > +	const struct rte_ipsec_sa_prm *prm)
> > > +{
> > > +	struct rte_crypto_sym_xform *xf;
> > > +
> > > +	memset(xform, 0, sizeof(*xform));
> > > +
> > > +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
> > > +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> > > +			if (xform->auth != NULL)
> > > +				return -EINVAL;
> > > +			xform->auth = &xf->auth;
> > > +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> > > +			if (xform->cipher != NULL)
> > > +				return -EINVAL;
> > > +			xform->cipher = &xf->cipher;
> > > +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> > > +			if (xform->aead != NULL)
> > > +				return -EINVAL;
> > > +			xform->aead = &xf->aead;
> > > +		} else
> > > +			return -EINVAL;
> > > +	}
> > > +
> > > +	return check_crypto_xform(xform);
> > > +}
> > how is this function handling the inbound and outbound cases.
> > In inbound first xform is auth and then cipher.
> > In outbound first is cipher and then auth. I think this should be
> > checked in the lib.
> 
> Interesting, I didn't know about such limitation.
> My understanding was that the any order (<auth,cipher>, <cipher,auth>)
> for both inbound and outbound is acceptable.
> Is that order restriction is documented somewhere?
> 

Actually, if such restriction really exists, and cryptodev framework obeys it,
then crypto session creation will fail anyway.

> > Here for loop should not be there, as there would be at max only 2 xforms.
  
Akhil Goyal Dec. 21, 2018, 11:53 a.m. UTC | #7
On 12/20/2018 7:36 PM, Ananyev, Konstantin wrote:
>
>>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
>>> new file mode 100644
>>> index 000000000..f927a82bf
>>> --- /dev/null
>>> +++ b/lib/librte_ipsec/sa.c
>>> @@ -0,0 +1,327 @@
>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>> + * Copyright(c) 2018 Intel Corporation
>>> + */
>>> +
>>> +#include <rte_ipsec_sa.h>
>>> +#include <rte_esp.h>
>>> +#include <rte_ip.h>
>>> +#include <rte_errno.h>
>>> +
>>> +#include "sa.h"
>>> +#include "ipsec_sqn.h"
>>> +
>>> +/* some helper structures */
>>> +struct crypto_xform {
>>> +	struct rte_crypto_auth_xform *auth;
>>> +	struct rte_crypto_cipher_xform *cipher;
>>> +	struct rte_crypto_aead_xform *aead;
>>> +};
>> shouldn't this be union as aead cannot be with cipher and auth cases.
> That's used internally to collect/analyze xforms provided by prm->crypto_xform.

>
>
>> extra line
>>> +
>>> +
>>> +static int
>>> +check_crypto_xform(struct crypto_xform *xform)
>>> +{
>>> +	uintptr_t p;
>>> +
>>> +	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
>> what is the intent of this?
> It is used below to check that if aead is present both cipher and auth
> are  not.
>
>>> +
>>> +	/* either aead or both auth and cipher should be not NULLs */
>>> +	if (xform->aead) {
>>> +		if (p)
>>> +			return -EINVAL;
>>> +	} else if (p == (uintptr_t)xform->auth) {
>>> +		return -EINVAL;
>>> +	}
>> This function does not look good. It will miss the case of cipher only
> Cipher only is not supported right now and  I am not aware about plans
> to support it in future.
> If someone would like to add cipher onl,then yes he/she probably would
> have to update this function.
I know that cipher_only is not supported and nobody will support it in 
case of ipsec.
My point is if somebody gives only auth or only cipher xform, then this 
function would not be able to detect that case and will not return error.

>>> +
>>> +	return 0;
>>> +}
>>> +
>>> +static int
>>> +fill_crypto_xform(struct crypto_xform *xform,
>>> +	const struct rte_ipsec_sa_prm *prm)
>>> +{
>>> +	struct rte_crypto_sym_xform *xf;
>>> +
>>> +	memset(xform, 0, sizeof(*xform));
>>> +
>>> +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
>>> +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
>>> +			if (xform->auth != NULL)
>>> +				return -EINVAL;
>>> +			xform->auth = &xf->auth;
>>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
>>> +			if (xform->cipher != NULL)
>>> +				return -EINVAL;
>>> +			xform->cipher = &xf->cipher;
>>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
>>> +			if (xform->aead != NULL)
>>> +				return -EINVAL;
>>> +			xform->aead = &xf->aead;
>>> +		} else
>>> +			return -EINVAL;
>>> +	}
>>> +
>>> +	return check_crypto_xform(xform);
>>> +}
>> how is this function handling the inbound and outbound cases.
>> In inbound first xform is auth and then cipher.
>> In outbound first is cipher and then auth. I think this should be
>> checked in the lib.
> Interesting, I didn't know about such limitation.
> My understanding was that the any order (<auth,cipher>, <cipher,auth>)
> for both inbound and outbound is acceptable.
> Is that order restriction is documented somewhere?
/**
  * Symmetric crypto transform structure.
  *
  * This is used to specify the crypto transforms required, multiple 
transforms
  * can be chained together to specify a chain transforms such as 
authentication
  * then cipher, or cipher then authentication. Each transform structure can
  * hold a single transform, the type field is used to specify which 
transform
  * is contained within the union
  */
struct rte_crypto_sym_xform {

This is not a limitation, this is how it is designed to handle 2 cases 
of crypto - auth then cipher and cipher then auth.


>> Here for loop should not be there, as there would be at max only 2 xforms.
>>> +
>>> +uint64_t __rte_experimental
>>> +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
>>> +{
>>> +	return sa->type;
>>> +}
>>> +
>>> +static int32_t
>>> +ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
>>> +{
>>> +	uint32_t n, sz;
>>> +
>>> +	n = 0;
>>> +	if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
>>> +			RTE_IPSEC_SATP_DIR_IB)
>>> +		n = replay_num_bucket(wsz);
>>> +
>>> +	if (n > WINDOW_BUCKET_MAX)
>>> +		return -EINVAL;
>>> +
>>> +	*nb_bucket = n;
>>> +
>>> +	sz = rsn_size(n);
>>> +	sz += sizeof(struct rte_ipsec_sa);
>>> +	return sz;
>>> +}
>>> +
>>> +void __rte_experimental
>>> +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
>>> +{
>>> +	memset(sa, 0, sa->size);
>>> +}
>> Where is the memory of "sa" getting initialized?
> Not sure I understand your question...
> Do you mean we missed memset(sa, 0, size)
> in rte_ipsec_sa_init()?
Sorry I did not ask the correct question, I was asking  - where it is 
allocated?
Is it application's responsibility?
>
>
>>> +
>>> +int __rte_experimental
>>> +rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
>>> +	uint32_t size)
>>> +{
>>> +	int32_t rc, sz;
>>> +	uint32_t nb;
>>> +	uint64_t type;
>>> +	struct crypto_xform cxf;
>>> +
>>> +	if (sa == NULL || prm == NULL)
>>> +		return -EINVAL;
>>> +
>>> +	/* determine SA type */
>>> +	rc = fill_sa_type(prm, &type);
>>> +	if (rc != 0)
>>> +		return rc;
>>> +
>>> +	/* determine required size */
>>> +	sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
>>> +	if (sz < 0)
>>> +		return sz;
>>> +	else if (size < (uint32_t)sz)
>>> +		return -ENOSPC;
>>> +
>>> +	/* only esp is supported right now */
>>> +	if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
>>> +		return -EINVAL;
>>> +
>>> +	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
>>> +			prm->tun.hdr_len > sizeof(sa->hdr))
>>> +		return -EINVAL;
>>> +
>>> +	rc = fill_crypto_xform(&cxf, prm);
>>> +	if (rc != 0)
>>> +		return rc;
>>> +
>>> +	sa->type = type;
>>> +	sa->size = sz;
>>> +
>>> +	/* check for ESN flag */
>>> +	sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
>>> +		UINT32_MAX : UINT64_MAX;
>>> +
>>> +	rc = esp_sa_init(sa, prm, &cxf);
>>> +	if (rc != 0)
>>> +		rte_ipsec_sa_fini(sa);
>>> +
>>> +	/* fill replay window related fields */
>>> +	if (nb != 0) {
>> move this where nb is getting updated.
> I don't think it is a good idea.
> We calulate nb first and required sa size first without updating provided memory buffer.
> If the buffer is not big enough, will return an error without updating the buffer.
> Cleaner and safer to keep it as it is.
ok
>>> +		sa->replay.win_sz = prm->replay_win_sz;
>>> +		sa->replay.nb_bucket = nb;
>>> +		sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
>>> +		sa->sqn.inb = (struct replay_sqn *)(sa + 1);
>>> +	}
>>> +
>>> +	return sz;
>>> +}
  
Akhil Goyal Dec. 21, 2018, 11:57 a.m. UTC | #8
On 12/20/2018 11:47 PM, Ananyev, Konstantin wrote:
>>>> +
>>>> +static int
>>>> +fill_crypto_xform(struct crypto_xform *xform,
>>>> +	const struct rte_ipsec_sa_prm *prm)
>>>> +{
>>>> +	struct rte_crypto_sym_xform *xf;
>>>> +
>>>> +	memset(xform, 0, sizeof(*xform));
>>>> +
>>>> +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
>>>> +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
>>>> +			if (xform->auth != NULL)
>>>> +				return -EINVAL;
>>>> +			xform->auth = &xf->auth;
>>>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
>>>> +			if (xform->cipher != NULL)
>>>> +				return -EINVAL;
>>>> +			xform->cipher = &xf->cipher;
>>>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
>>>> +			if (xform->aead != NULL)
>>>> +				return -EINVAL;
>>>> +			xform->aead = &xf->aead;
>>>> +		} else
>>>> +			return -EINVAL;
>>>> +	}
>>>> +
>>>> +	return check_crypto_xform(xform);
>>>> +}
>>> how is this function handling the inbound and outbound cases.
>>> In inbound first xform is auth and then cipher.
>>> In outbound first is cipher and then auth. I think this should be
>>> checked in the lib.
>> Interesting, I didn't know about such limitation.
>> My understanding was that the any order (<auth,cipher>, <cipher,auth>)
>> for both inbound and outbound is acceptable.
>> Is that order restriction is documented somewhere?
>>
> Actually, if such restriction really exists, and cryptodev framework obeys it,
> then crypto session creation will fail anyway.
ipsec library should not rely on other components to give error.
it should handle the cases which it is expected to.
As per my understanding, IPSEC is a cipher then authenticate protocol 
for outbound case and it should give error in other case.
Similarly, auth then cipher in case of inbound case.
>>> Here for loop should not be there, as there would be at max only 2 xforms.
  
Ananyev, Konstantin Dec. 21, 2018, 12:41 p.m. UTC | #9
> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Friday, December 21, 2018 11:53 AM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; dev@dpdk.org
> Cc: Thomas Monjalon <thomas@monjalon.net>; Awal, Mohammad Abdul <mohammad.abdul.awal@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v4 04/10] lib: introduce ipsec library
> 
> 
> 
> On 12/20/2018 7:36 PM, Ananyev, Konstantin wrote:
> >
> >>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> >>> new file mode 100644
> >>> index 000000000..f927a82bf
> >>> --- /dev/null
> >>> +++ b/lib/librte_ipsec/sa.c
> >>> @@ -0,0 +1,327 @@
> >>> +/* SPDX-License-Identifier: BSD-3-Clause
> >>> + * Copyright(c) 2018 Intel Corporation
> >>> + */
> >>> +
> >>> +#include <rte_ipsec_sa.h>
> >>> +#include <rte_esp.h>
> >>> +#include <rte_ip.h>
> >>> +#include <rte_errno.h>
> >>> +
> >>> +#include "sa.h"
> >>> +#include "ipsec_sqn.h"
> >>> +
> >>> +/* some helper structures */
> >>> +struct crypto_xform {
> >>> +	struct rte_crypto_auth_xform *auth;
> >>> +	struct rte_crypto_cipher_xform *cipher;
> >>> +	struct rte_crypto_aead_xform *aead;
> >>> +};
> >> shouldn't this be union as aead cannot be with cipher and auth cases.
> > That's used internally to collect/analyze xforms provided by prm->crypto_xform.
> 
> >
> >
> >> extra line
> >>> +
> >>> +
> >>> +static int
> >>> +check_crypto_xform(struct crypto_xform *xform)
> >>> +{
> >>> +	uintptr_t p;
> >>> +
> >>> +	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
> >> what is the intent of this?
> > It is used below to check that if aead is present both cipher and auth
> > are  not.
> >
> >>> +
> >>> +	/* either aead or both auth and cipher should be not NULLs */
> >>> +	if (xform->aead) {
> >>> +		if (p)
> >>> +			return -EINVAL;
> >>> +	} else if (p == (uintptr_t)xform->auth) {
> >>> +		return -EINVAL;
> >>> +	}
> >> This function does not look good. It will miss the case of cipher only
> > Cipher only is not supported right now and  I am not aware about plans
> > to support it in future.
> > If someone would like to add cipher onl,then yes he/she probably would
> > have to update this function.
> I know that cipher_only is not supported and nobody will support it in
> case of ipsec.
> My point is if somebody gives only auth or only cipher xform, then this
> function would not be able to detect that case and will not return error.

fill_crypto_xform() (the function below) will detect it and return an error:
+		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			if (xform->auth != NULL)
+				return -EINVAL;

> 
> >>> +
> >>> +	return 0;
> >>> +}
> >>> +
> >>> +static int
> >>> +fill_crypto_xform(struct crypto_xform *xform,
> >>> +	const struct rte_ipsec_sa_prm *prm)
> >>> +{
> >>> +	struct rte_crypto_sym_xform *xf;
> >>> +
> >>> +	memset(xform, 0, sizeof(*xform));
> >>> +
> >>> +	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
> >>> +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> >>> +			if (xform->auth != NULL)
> >>> +				return -EINVAL;
> >>> +			xform->auth = &xf->auth;
> >>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> >>> +			if (xform->cipher != NULL)
> >>> +				return -EINVAL;
> >>> +			xform->cipher = &xf->cipher;
> >>> +		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> >>> +			if (xform->aead != NULL)
> >>> +				return -EINVAL;
> >>> +			xform->aead = &xf->aead;
> >>> +		} else
> >>> +			return -EINVAL;
> >>> +	}
> >>> +
> >>> +	return check_crypto_xform(xform);
> >>> +}
> >> how is this function handling the inbound and outbound cases.
> >> In inbound first xform is auth and then cipher.
> >> In outbound first is cipher and then auth. I think this should be
> >> checked in the lib.
> > Interesting, I didn't know about such limitation.
> > My understanding was that the any order (<auth,cipher>, <cipher,auth>)
> > for both inbound and outbound is acceptable.
> > Is that order restriction is documented somewhere?
> /**
>   * Symmetric crypto transform structure.
>   *
>   * This is used to specify the crypto transforms required, multiple
> transforms
>   * can be chained together to specify a chain transforms such as
> authentication
>   * then cipher, or cipher then authentication. Each transform structure can
>   * hold a single transform, the type field is used to specify which
> transform
>   * is contained within the union
>   */
> struct rte_crypto_sym_xform {

Yes, I read this but I don't see where it says that order of xforms implicitly
defines order of operations for that session within crypto-dev.
Or is it just me?
I don't mind to add extra check here, just want to be sure it is really required
for crypto PMD to work correctly.

> 
> This is not a limitation, this is how it is designed to handle 2 cases
> of crypto - auth then cipher and cipher then auth.
> 

Ok, if you sure it is a valid check - I'll add it.

> 
> >> Here for loop should not be there, as there would be at max only 2 xforms.
> >>> +
> >>> +uint64_t __rte_experimental
> >>> +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
> >>> +{
> >>> +	return sa->type;
> >>> +}
> >>> +
> >>> +static int32_t
> >>> +ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
> >>> +{
> >>> +	uint32_t n, sz;
> >>> +
> >>> +	n = 0;
> >>> +	if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
> >>> +			RTE_IPSEC_SATP_DIR_IB)
> >>> +		n = replay_num_bucket(wsz);
> >>> +
> >>> +	if (n > WINDOW_BUCKET_MAX)
> >>> +		return -EINVAL;
> >>> +
> >>> +	*nb_bucket = n;
> >>> +
> >>> +	sz = rsn_size(n);
> >>> +	sz += sizeof(struct rte_ipsec_sa);
> >>> +	return sz;
> >>> +}
> >>> +
> >>> +void __rte_experimental
> >>> +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
> >>> +{
> >>> +	memset(sa, 0, sa->size);
> >>> +}
> >> Where is the memory of "sa" getting initialized?
> > Not sure I understand your question...
> > Do you mean we missed memset(sa, 0, size)
> > in rte_ipsec_sa_init()?
> Sorry I did not ask the correct question, I was asking  - where it is
> allocated?
> Is it application's responsibility?

Yes, it is an application responsibility to allocate the memory buffer.
But looking at code again - actually we did miss memset() here,
will update.
  
Ananyev, Konstantin Dec. 21, 2018, 12:54 p.m. UTC | #10
> >
> >
> > On 12/20/2018 7:36 PM, Ananyev, Konstantin wrote:
> > >
> > >>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> > >>> new file mode 100644
> > >>> index 000000000..f927a82bf
> > >>> --- /dev/null
> > >>> +++ b/lib/librte_ipsec/sa.c
> > >>> @@ -0,0 +1,327 @@
> > >>> +/* SPDX-License-Identifier: BSD-3-Clause
> > >>> + * Copyright(c) 2018 Intel Corporation
> > >>> + */
> > >>> +
> > >>> +#include <rte_ipsec_sa.h>
> > >>> +#include <rte_esp.h>
> > >>> +#include <rte_ip.h>
> > >>> +#include <rte_errno.h>
> > >>> +
> > >>> +#include "sa.h"
> > >>> +#include "ipsec_sqn.h"
> > >>> +
> > >>> +/* some helper structures */
> > >>> +struct crypto_xform {
> > >>> +	struct rte_crypto_auth_xform *auth;
> > >>> +	struct rte_crypto_cipher_xform *cipher;
> > >>> +	struct rte_crypto_aead_xform *aead;
> > >>> +};
> > >> shouldn't this be union as aead cannot be with cipher and auth cases.
> > > That's used internally to collect/analyze xforms provided by prm->crypto_xform.
> >
> > >
> > >
> > >> extra line
> > >>> +
> > >>> +
> > >>> +static int
> > >>> +check_crypto_xform(struct crypto_xform *xform)
> > >>> +{
> > >>> +	uintptr_t p;
> > >>> +
> > >>> +	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
> > >> what is the intent of this?
> > > It is used below to check that if aead is present both cipher and auth
> > > are  not.
> > >
> > >>> +
> > >>> +	/* either aead or both auth and cipher should be not NULLs */
> > >>> +	if (xform->aead) {
> > >>> +		if (p)
> > >>> +			return -EINVAL;
> > >>> +	} else if (p == (uintptr_t)xform->auth) {
> > >>> +		return -EINVAL;
> > >>> +	}
> > >> This function does not look good. It will miss the case of cipher only
> > > Cipher only is not supported right now and  I am not aware about plans
> > > to support it in future.
> > > If someone would like to add cipher onl,then yes he/she probably would
> > > have to update this function.
> > I know that cipher_only is not supported and nobody will support it in
> > case of ipsec.
> > My point is if somebody gives only auth or only cipher xform, then this
> > function would not be able to detect that case and will not return error.
> 
> fill_crypto_xform() (the function below) will detect it and return an error:
> +		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +			if (xform->auth != NULL)
> +				return -EINVAL;


Please ignore the comment above - was thinking about different thing.
Yes extra check is needed for case when only cipher xform is provided.
  

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 71ba31208..3cf0a84a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1071,6 +1071,11 @@  F: doc/guides/prog_guide/pdump_lib.rst
 F: app/pdump/
 F: doc/guides/tools/pdump.rst
 
+IPsec - EXPERIMENTAL
+M: Konstantin Ananyev <konstantin.ananyev@intel.com>
+F: lib/librte_ipsec/
+M: Bernard Iremonger <bernard.iremonger@intel.com>
+F: test/test/test_ipsec.c
 
 Packet Framework
 ----------------
diff --git a/config/common_base b/config/common_base
index d12ae98bc..32499d772 100644
--- a/config/common_base
+++ b/config/common_base
@@ -925,6 +925,11 @@  CONFIG_RTE_LIBRTE_BPF=y
 # allow load BPF from ELF files (requires libelf)
 CONFIG_RTE_LIBRTE_BPF_ELF=n
 
+#
+# Compile librte_ipsec
+#
+CONFIG_RTE_LIBRTE_IPSEC=y
+
 #
 # Compile the test application
 #
diff --git a/lib/Makefile b/lib/Makefile
index b7370ef97..5dc774604 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -106,6 +106,8 @@  DEPDIRS-librte_gso := librte_eal librte_mbuf librte_ethdev librte_net
 DEPDIRS-librte_gso += librte_mempool
 DIRS-$(CONFIG_RTE_LIBRTE_BPF) += librte_bpf
 DEPDIRS-librte_bpf := librte_eal librte_mempool librte_mbuf librte_ethdev
+DIRS-$(CONFIG_RTE_LIBRTE_IPSEC) += librte_ipsec
+DEPDIRS-librte_ipsec := librte_eal librte_mbuf librte_cryptodev librte_security
 DIRS-$(CONFIG_RTE_LIBRTE_TELEMETRY) += librte_telemetry
 DEPDIRS-librte_telemetry := librte_eal librte_metrics librte_ethdev
 
diff --git a/lib/librte_ipsec/Makefile b/lib/librte_ipsec/Makefile
new file mode 100644
index 000000000..7758dcc6d
--- /dev/null
+++ b/lib/librte_ipsec/Makefile
@@ -0,0 +1,24 @@ 
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ipsec.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_cryptodev -lrte_security
+
+EXPORT_MAP := rte_ipsec_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_IPSEC)-include += rte_ipsec_sa.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ipsec/ipsec_sqn.h b/lib/librte_ipsec/ipsec_sqn.h
new file mode 100644
index 000000000..1935f6e30
--- /dev/null
+++ b/lib/librte_ipsec/ipsec_sqn.h
@@ -0,0 +1,48 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IPSEC_SQN_H_
+#define _IPSEC_SQN_H_
+
+#define WINDOW_BUCKET_BITS		6 /* uint64_t */
+#define WINDOW_BUCKET_SIZE		(1 << WINDOW_BUCKET_BITS)
+#define WINDOW_BIT_LOC_MASK		(WINDOW_BUCKET_SIZE - 1)
+
+/* minimum number of bucket, power of 2*/
+#define WINDOW_BUCKET_MIN		2
+#define WINDOW_BUCKET_MAX		(INT16_MAX + 1)
+
+#define IS_ESN(sa)	((sa)->sqn_mask == UINT64_MAX)
+
+/*
+ * for given size, calculate required number of buckets.
+ */
+static uint32_t
+replay_num_bucket(uint32_t wsz)
+{
+	uint32_t nb;
+
+	nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
+		WINDOW_BUCKET_SIZE);
+	nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
+
+	return nb;
+}
+
+/**
+ * Based on number of buckets calculated required size for the
+ * structure that holds replay window and sequence number (RSN) information.
+ */
+static size_t
+rsn_size(uint32_t nb_bucket)
+{
+	size_t sz;
+	struct replay_sqn *rsn;
+
+	sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
+	sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
+	return sz;
+}
+
+#endif /* _IPSEC_SQN_H_ */
diff --git a/lib/librte_ipsec/meson.build b/lib/librte_ipsec/meson.build
new file mode 100644
index 000000000..52c78eaeb
--- /dev/null
+++ b/lib/librte_ipsec/meson.build
@@ -0,0 +1,10 @@ 
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+
+sources=files('sa.c')
+
+install_headers = files('rte_ipsec_sa.h')
+
+deps += ['mbuf', 'net', 'cryptodev', 'security']
diff --git a/lib/librte_ipsec/rte_ipsec_sa.h b/lib/librte_ipsec/rte_ipsec_sa.h
new file mode 100644
index 000000000..4e36fd99b
--- /dev/null
+++ b/lib/librte_ipsec/rte_ipsec_sa.h
@@ -0,0 +1,139 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_IPSEC_SA_H_
+#define _RTE_IPSEC_SA_H_
+
+/**
+ * @file rte_ipsec_sa.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Defines API to manage IPsec Security Association (SA) objects.
+ */
+
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * An opaque structure to represent Security Association (SA).
+ */
+struct rte_ipsec_sa;
+
+/**
+ * SA initialization parameters.
+ */
+struct rte_ipsec_sa_prm {
+
+	uint64_t userdata; /**< provided and interpreted by user */
+	uint64_t flags;  /**< see RTE_IPSEC_SAFLAG_* below */
+	/** ipsec configuration */
+	struct rte_security_ipsec_xform ipsec_xform;
+	struct rte_crypto_sym_xform *crypto_xform;
+	union {
+		struct {
+			uint8_t hdr_len;     /**< tunnel header len */
+			uint8_t hdr_l3_off;  /**< offset for IPv4/IPv6 header */
+			uint8_t next_proto;  /**< next header protocol */
+			const void *hdr;     /**< tunnel header template */
+		} tun; /**< tunnel mode repated parameters */
+		struct {
+			uint8_t proto;  /**< next header protocol */
+		} trs; /**< transport mode repated parameters */
+	};
+
+	uint32_t replay_win_sz;
+	/**< window size to enable sequence replay attack handling.
+	 * Replay checking is disabled if the window size is 0.
+	 */
+};
+
+/**
+ * SA type is an 64-bit value that contain the following information:
+ * - IP version (IPv4/IPv6)
+ * - IPsec proto (ESP/AH)
+ * - inbound/outbound
+ * - mode (TRANSPORT/TUNNEL)
+ * - for TUNNEL outer IP version (IPv4/IPv6)
+ * ...
+ */
+
+enum {
+	RTE_SATP_LOG_IPV,
+	RTE_SATP_LOG_PROTO,
+	RTE_SATP_LOG_DIR,
+	RTE_SATP_LOG_MODE,
+	RTE_SATP_LOG_NUM
+};
+
+#define RTE_IPSEC_SATP_IPV_MASK		(1ULL << RTE_SATP_LOG_IPV)
+#define RTE_IPSEC_SATP_IPV4		(0ULL << RTE_SATP_LOG_IPV)
+#define RTE_IPSEC_SATP_IPV6		(1ULL << RTE_SATP_LOG_IPV)
+
+#define RTE_IPSEC_SATP_PROTO_MASK	(1ULL << RTE_SATP_LOG_PROTO)
+#define RTE_IPSEC_SATP_PROTO_AH		(0ULL << RTE_SATP_LOG_PROTO)
+#define RTE_IPSEC_SATP_PROTO_ESP	(1ULL << RTE_SATP_LOG_PROTO)
+
+#define RTE_IPSEC_SATP_DIR_MASK		(1ULL << RTE_SATP_LOG_DIR)
+#define RTE_IPSEC_SATP_DIR_IB		(0ULL << RTE_SATP_LOG_DIR)
+#define RTE_IPSEC_SATP_DIR_OB		(1ULL << RTE_SATP_LOG_DIR)
+
+#define RTE_IPSEC_SATP_MODE_MASK	(3ULL << RTE_SATP_LOG_MODE)
+#define RTE_IPSEC_SATP_MODE_TRANS	(0ULL << RTE_SATP_LOG_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV4	(1ULL << RTE_SATP_LOG_MODE)
+#define RTE_IPSEC_SATP_MODE_TUNLV6	(2ULL << RTE_SATP_LOG_MODE)
+
+/**
+ * get type of given SA
+ * @return
+ *   SA type value.
+ */
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
+
+/**
+ * Calculate requied SA size based on provided input parameters.
+ * @param prm
+ *   Parameters that wil be used to initialise SA object.
+ * @return
+ *   - Actual size required for SA with given parameters.
+ *   - -EINVAL if the parameters are invalid.
+ */
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm);
+
+/**
+ * initialise SA based on provided input parameters.
+ * @param sa
+ *   SA object to initialise.
+ * @param prm
+ *   Parameters used to initialise given SA object.
+ * @param size
+ *   size of the provided buffer for SA.
+ * @return
+ *   - Actual size of SA object if operation completed successfully.
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOSPC if the size of the provided buffer is not big enough.
+ */
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+	uint32_t size);
+
+/**
+ * cleanup SA
+ * @param sa
+ *   Pointer to SA object to de-initialize.
+ */
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IPSEC_SA_H_ */
diff --git a/lib/librte_ipsec/rte_ipsec_version.map b/lib/librte_ipsec/rte_ipsec_version.map
new file mode 100644
index 000000000..1a66726b8
--- /dev/null
+++ b/lib/librte_ipsec/rte_ipsec_version.map
@@ -0,0 +1,10 @@ 
+EXPERIMENTAL {
+	global:
+
+	rte_ipsec_sa_fini;
+	rte_ipsec_sa_init;
+	rte_ipsec_sa_size;
+	rte_ipsec_sa_type;
+
+	local: *;
+};
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
new file mode 100644
index 000000000..f927a82bf
--- /dev/null
+++ b/lib/librte_ipsec/sa.c
@@ -0,0 +1,327 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_ipsec_sa.h>
+#include <rte_esp.h>
+#include <rte_ip.h>
+#include <rte_errno.h>
+
+#include "sa.h"
+#include "ipsec_sqn.h"
+
+/* some helper structures */
+struct crypto_xform {
+	struct rte_crypto_auth_xform *auth;
+	struct rte_crypto_cipher_xform *cipher;
+	struct rte_crypto_aead_xform *aead;
+};
+
+
+static int
+check_crypto_xform(struct crypto_xform *xform)
+{
+	uintptr_t p;
+
+	p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
+
+	/* either aead or both auth and cipher should be not NULLs */
+	if (xform->aead) {
+		if (p)
+			return -EINVAL;
+	} else if (p == (uintptr_t)xform->auth) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+fill_crypto_xform(struct crypto_xform *xform,
+	const struct rte_ipsec_sa_prm *prm)
+{
+	struct rte_crypto_sym_xform *xf;
+
+	memset(xform, 0, sizeof(*xform));
+
+	for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
+		if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			if (xform->auth != NULL)
+				return -EINVAL;
+			xform->auth = &xf->auth;
+		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			if (xform->cipher != NULL)
+				return -EINVAL;
+			xform->cipher = &xf->cipher;
+		} else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+			if (xform->aead != NULL)
+				return -EINVAL;
+			xform->aead = &xf->aead;
+		} else
+			return -EINVAL;
+	}
+
+	return check_crypto_xform(xform);
+}
+
+uint64_t __rte_experimental
+rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
+{
+	return sa->type;
+}
+
+static int32_t
+ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
+{
+	uint32_t n, sz;
+
+	n = 0;
+	if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
+			RTE_IPSEC_SATP_DIR_IB)
+		n = replay_num_bucket(wsz);
+
+	if (n > WINDOW_BUCKET_MAX)
+		return -EINVAL;
+
+	*nb_bucket = n;
+
+	sz = rsn_size(n);
+	sz += sizeof(struct rte_ipsec_sa);
+	return sz;
+}
+
+void __rte_experimental
+rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
+{
+	memset(sa, 0, sa->size);
+}
+
+static int
+fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
+{
+	uint64_t tp;
+
+	tp = 0;
+
+	if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+		tp |= RTE_IPSEC_SATP_PROTO_AH;
+	else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+		tp |= RTE_IPSEC_SATP_PROTO_ESP;
+	else
+		return -EINVAL;
+
+	if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+		tp |= RTE_IPSEC_SATP_DIR_OB;
+	else if (prm->ipsec_xform.direction ==
+			RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+		tp |= RTE_IPSEC_SATP_DIR_IB;
+	else
+		return -EINVAL;
+
+	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		if (prm->ipsec_xform.tunnel.type ==
+				RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+			tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
+		else if (prm->ipsec_xform.tunnel.type ==
+				RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+			tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
+		else
+			return -EINVAL;
+
+		if (prm->tun.next_proto == IPPROTO_IPIP)
+			tp |= RTE_IPSEC_SATP_IPV4;
+		else if (prm->tun.next_proto == IPPROTO_IPV6)
+			tp |= RTE_IPSEC_SATP_IPV6;
+		else
+			return -EINVAL;
+	} else if (prm->ipsec_xform.mode ==
+			RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+		tp |= RTE_IPSEC_SATP_MODE_TRANS;
+		if (prm->trs.proto == IPPROTO_IPIP)
+			tp |= RTE_IPSEC_SATP_IPV4;
+		else if (prm->trs.proto == IPPROTO_IPV6)
+			tp |= RTE_IPSEC_SATP_IPV6;
+		else
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	*type = tp;
+	return 0;
+}
+
+static void
+esp_inb_init(struct rte_ipsec_sa *sa)
+{
+	/* these params may differ with new algorithms support */
+	sa->ctp.auth.offset = 0;
+	sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+	sa->ctp.cipher.offset = sizeof(struct esp_hdr) + sa->iv_len;
+	sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+}
+
+static void
+esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+	sa->proto = prm->tun.next_proto;
+	esp_inb_init(sa);
+}
+
+static void
+esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
+{
+	sa->sqn.outb = 1;
+
+	/* these params may differ with new algorithms support */
+	sa->ctp.auth.offset = hlen;
+	sa->ctp.auth.length = sizeof(struct esp_hdr) + sa->iv_len + sa->sqh_len;
+	if (sa->aad_len != 0) {
+		sa->ctp.cipher.offset = hlen + sizeof(struct esp_hdr) +
+			sa->iv_len;
+		sa->ctp.cipher.length = 0;
+	} else {
+		sa->ctp.cipher.offset = sa->hdr_len + sizeof(struct esp_hdr);
+		sa->ctp.cipher.length = sa->iv_len;
+	}
+}
+
+static void
+esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
+{
+	sa->proto = prm->tun.next_proto;
+	sa->hdr_len = prm->tun.hdr_len;
+	sa->hdr_l3_off = prm->tun.hdr_l3_off;
+	memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
+
+	esp_outb_init(sa, sa->hdr_len);
+}
+
+static int
+esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+	const struct crypto_xform *cxf)
+{
+	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+				RTE_IPSEC_SATP_MODE_MASK;
+
+	if (cxf->aead != NULL) {
+		/* RFC 4106 */
+		if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM)
+			return -EINVAL;
+		sa->icv_len = cxf->aead->digest_length;
+		sa->iv_ofs = cxf->aead->iv.offset;
+		sa->iv_len = sizeof(uint64_t);
+		sa->pad_align = 4;
+	} else {
+		sa->icv_len = cxf->auth->digest_length;
+		sa->iv_ofs = cxf->cipher->iv.offset;
+		sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
+		if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) {
+			sa->pad_align = 4;
+			sa->iv_len = 0;
+		} else if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+			sa->pad_align = IPSEC_MAX_IV_SIZE;
+			sa->iv_len = IPSEC_MAX_IV_SIZE;
+		} else
+			return -EINVAL;
+	}
+
+	sa->udata = prm->userdata;
+	sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
+	sa->salt = prm->ipsec_xform.salt;
+
+	switch (sa->type & msk) {
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		esp_inb_tun_init(sa, prm);
+		break;
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+		esp_inb_init(sa);
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		esp_outb_tun_init(sa, prm);
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+		esp_outb_init(sa, 0);
+		break;
+	}
+
+	return 0;
+}
+
+int __rte_experimental
+rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
+{
+	uint64_t type;
+	uint32_t nb;
+	int32_t rc;
+
+	if (prm == NULL)
+		return -EINVAL;
+
+	/* determine SA type */
+	rc = fill_sa_type(prm, &type);
+	if (rc != 0)
+		return rc;
+
+	/* determine required size */
+	return ipsec_sa_size(prm->replay_win_sz, type, &nb);
+}
+
+int __rte_experimental
+rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
+	uint32_t size)
+{
+	int32_t rc, sz;
+	uint32_t nb;
+	uint64_t type;
+	struct crypto_xform cxf;
+
+	if (sa == NULL || prm == NULL)
+		return -EINVAL;
+
+	/* determine SA type */
+	rc = fill_sa_type(prm, &type);
+	if (rc != 0)
+		return rc;
+
+	/* determine required size */
+	sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
+	if (sz < 0)
+		return sz;
+	else if (size < (uint32_t)sz)
+		return -ENOSPC;
+
+	/* only esp is supported right now */
+	if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+		return -EINVAL;
+
+	if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+			prm->tun.hdr_len > sizeof(sa->hdr))
+		return -EINVAL;
+
+	rc = fill_crypto_xform(&cxf, prm);
+	if (rc != 0)
+		return rc;
+
+	sa->type = type;
+	sa->size = sz;
+
+	/* check for ESN flag */
+	sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
+		UINT32_MAX : UINT64_MAX;
+
+	rc = esp_sa_init(sa, prm, &cxf);
+	if (rc != 0)
+		rte_ipsec_sa_fini(sa);
+
+	/* fill replay window related fields */
+	if (nb != 0) {
+		sa->replay.win_sz = prm->replay_win_sz;
+		sa->replay.nb_bucket = nb;
+		sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
+		sa->sqn.inb = (struct replay_sqn *)(sa + 1);
+	}
+
+	return sz;
+}
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
new file mode 100644
index 000000000..5d113891a
--- /dev/null
+++ b/lib/librte_ipsec/sa.h
@@ -0,0 +1,77 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SA_H_
+#define _SA_H_
+
+#define IPSEC_MAX_HDR_SIZE	64
+#define IPSEC_MAX_IV_SIZE	16
+#define IPSEC_MAX_IV_QWORD	(IPSEC_MAX_IV_SIZE / sizeof(uint64_t))
+
+/* these definitions probably has to be in rte_crypto_sym.h */
+union sym_op_ofslen {
+	uint64_t raw;
+	struct {
+		uint32_t offset;
+		uint32_t length;
+	};
+};
+
+union sym_op_data {
+#ifdef __SIZEOF_INT128__
+	__uint128_t raw;
+#endif
+	struct {
+		uint8_t *va;
+		rte_iova_t pa;
+	};
+};
+
+struct replay_sqn {
+	uint64_t sqn;
+	__extension__ uint64_t window[0];
+};
+
+struct rte_ipsec_sa {
+	uint64_t type;     /* type of given SA */
+	uint64_t udata;    /* user defined */
+	uint32_t size;     /* size of given sa object */
+	uint32_t spi;
+	/* sqn calculations related */
+	uint64_t sqn_mask;
+	struct {
+		uint32_t win_sz;
+		uint16_t nb_bucket;
+		uint16_t bucket_index_mask;
+	} replay;
+	/* template for crypto op fields */
+	struct {
+		union sym_op_ofslen cipher;
+		union sym_op_ofslen auth;
+	} ctp;
+	uint32_t salt;
+	uint8_t proto;    /* next proto */
+	uint8_t aad_len;
+	uint8_t hdr_len;
+	uint8_t hdr_l3_off;
+	uint8_t icv_len;
+	uint8_t sqh_len;
+	uint8_t iv_ofs; /* offset for algo-specific IV inside crypto op */
+	uint8_t iv_len;
+	uint8_t pad_align;
+
+	/* template for tunnel header */
+	uint8_t hdr[IPSEC_MAX_HDR_SIZE];
+
+	/*
+	 * sqn and replay window
+	 */
+	union {
+		uint64_t outb;
+		struct replay_sqn *inb;
+	} sqn;
+
+} __rte_cache_aligned;
+
+#endif /* _SA_H_ */
diff --git a/lib/meson.build b/lib/meson.build
index bb7f443f9..69684ef14 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -22,6 +22,8 @@  libraries = [ 'compat', # just a header, used for versioning
 	'kni', 'latencystats', 'lpm', 'member',
 	'meter', 'power', 'pdump', 'rawdev',
 	'reorder', 'sched', 'security', 'vhost',
+	#ipsec lib depends on crypto and security
+	'ipsec',
 	# add pkt framework libs which use other libs from above
 	'port', 'table', 'pipeline',
 	# flow_classify lib depends on pkt framework table lib
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 5699d979d..f4cd75252 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -67,6 +67,8 @@  ifeq ($(CONFIG_RTE_LIBRTE_BPF_ELF),y)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_BPF)            += -lelf
 endif
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_IPSEC)            += -lrte_ipsec
+
 _LDLIBS-y += --whole-archive
 
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE)        += -lrte_cfgfile