[v3,1/2] examples/ipsec-secgw: fix 1st packet dropped for inline crypto

Message ID 1554384495-7936-2-git-send-email-bernard.iremonger@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series examples/ipsec-secgw: fix 1st pkt dropped |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Iremonger, Bernard April 4, 2019, 1:28 p.m. UTC
  Inline crypto installs a flow rule in the NIC. This flow
rule must be installed before the first inbound packet is
received.

The create_session() function installs the flow rule.

Refactor ipsec-secgw.c, sa.c, ipsec.h and ipsec.c to create
sessions at startup to fix the issue of the first packet
being dropped for inline crypto.

The create_session() function is now called at initialisation in
sa_add_rules() which is called from sa_init().
The return code for add_rules is checked.
Calls to create_session() in other functions are dropped.

Add crypto_devid_fill() in ipsec-secgw.c
Add max_session_size() in ipsec-secgw.c
Add check_cryptodev_capability() in ipsec.c
Add check_cryptodev_aead_capability() in ipsec.c
Add create_sec_session() and create_crypto_session() in ipsec.c

The crypto_dev_fill() function has been added to find the
enabled crypto devices.

The max_session_size() function has been added to calculate memory
requirements.

The check_cryptodev_capability() and check_cryptodev_aead_capability()
functions have been added to check that the SA is supported by the
crypto device.

The create_session() function is refactored to use the
create_sec_session() and create_crypto_session() functions.

The cryprodev_init() function has been refactored to drop calls to
rte_mempool_create() and to drop calculation of memory requirements.

The main() function has been refactored to call crypto_devid_fill()
and max_session_size() and to call session_pool_init() and
session_priv_pool_init().
The ports are started now before adding a flow rule in main().
The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
now called after the ports have been started.

Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: stable@dpdk.org

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 examples/ipsec-secgw/ipsec-secgw.c   | 271 +++++++++--------
 examples/ipsec-secgw/ipsec.c         | 569 +++++++++++++++++++----------------
 examples/ipsec-secgw/ipsec.h         |  10 +-
 examples/ipsec-secgw/ipsec_process.c |  38 +--
 examples/ipsec-secgw/sa.c            |  42 ++-
 5 files changed, 495 insertions(+), 435 deletions(-)
  

Comments

Akhil Goyal April 17, 2019, 11:51 a.m. UTC | #1
> 
> Inline crypto installs a flow rule in the NIC. This flow
> rule must be installed before the first inbound packet is
> received.
> 
> The create_session() function installs the flow rule.
> 
> Refactor ipsec-secgw.c, sa.c, ipsec.h and ipsec.c to create
> sessions at startup to fix the issue of the first packet
> being dropped for inline crypto.
> 
> The create_session() function is now called at initialisation in
> sa_add_rules() which is called from sa_init().
> The return code for add_rules is checked.
> Calls to create_session() in other functions are dropped.
> 
> Add crypto_devid_fill() in ipsec-secgw.c
> Add max_session_size() in ipsec-secgw.c
> Add check_cryptodev_capability() in ipsec.c
> Add check_cryptodev_aead_capability() in ipsec.c
> Add create_sec_session() and create_crypto_session() in ipsec.c
> 
> The crypto_dev_fill() function has been added to find the
> enabled crypto devices.
> 
> The max_session_size() function has been added to calculate memory
> requirements.
> 
> The check_cryptodev_capability() and check_cryptodev_aead_capability()
> functions have been added to check that the SA is supported by the
> crypto device.
> 
> The create_session() function is refactored to use the
> create_sec_session() and create_crypto_session() functions.
> 
> The cryprodev_init() function has been refactored to drop calls to
> rte_mempool_create() and to drop calculation of memory requirements.
> 
> The main() function has been refactored to call crypto_devid_fill()
> and max_session_size() and to call session_pool_init() and
> session_priv_pool_init().
> The ports are started now before adding a flow rule in main().
> The sa_init(), sp4_init(), sp6_init() and rt_init() functions are
> now called after the ports have been started.
> 
> Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
> Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> ---
>  examples/ipsec-secgw/ipsec-secgw.c   | 271 +++++++++--------
>  examples/ipsec-secgw/ipsec.c         | 569 +++++++++++++++++++----------------
>  examples/ipsec-secgw/ipsec.h         |  10 +-
>  examples/ipsec-secgw/ipsec_process.c |  38 +--
>  examples/ipsec-secgw/sa.c            |  42 ++-
>  5 files changed, 495 insertions(+), 435 deletions(-)
> 
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-
> secgw/ipsec-secgw.c
> index ffbd00b..cc8bb57 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -182,6 +182,14 @@ struct lcore_params {
>  	uint8_t lcore_id;
>  } __rte_cache_aligned;
> 
> +/*
> + * Number of enabled crypto devices
> + * This number is needed when checking crypto device capabilities
> + */
> +uint8_t crypto_dev_num;
> +/* array of crypto device ID's */
> +uint8_t crypto_devid[RTE_CRYPTO_MAX_DEVS];
> +
>  static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> 
>  static struct lcore_params *lcore_params;
> @@ -1623,13 +1631,27 @@ check_cryptodev_mask(uint8_t cdev_id)
>  	return -1;
>  }
> 
> +static void
> +crypto_devid_fill(void)
> +{
> +	uint32_t i, n;
> +
> +	n = rte_cryptodev_count();
> +
> +	for (i = 0; i != n; i++) {
> +		if (check_cryptodev_mask(i) == 0)
> +			crypto_devid[crypto_dev_num++] = i;
> +	}
> +}
> +
>  static int32_t
>  cryptodevs_init(void)
>  {
>  	struct rte_cryptodev_config dev_conf;
>  	struct rte_cryptodev_qp_conf qp_conf;
>  	uint16_t idx, max_nb_qps, qp, i;
> -	int16_t cdev_id, port_id;
> +	int16_t cdev_id;
> +	uint32_t dev_max_sess;
>  	struct rte_hash_parameters params = { 0 };
> 
>  	params.entries = CDEV_MAP_ENTRIES;
> @@ -1652,45 +1674,6 @@ cryptodevs_init(void)
> 
>  	printf("lcore/cryptodev/qp mappings:\n");
> 
> -	uint32_t max_sess_sz = 0, sess_sz;
> -	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
> -		void *sec_ctx;
> -
> -		/* Get crypto priv session size */
> -		sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
> -		if (sess_sz > max_sess_sz)
> -			max_sess_sz = sess_sz;
> -
> -		/*
> -		 * If crypto device is security capable, need to check the
> -		 * size of security session as well.
> -		 */
> -
> -		/* Get security context of the crypto device */
> -		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
> -		if (sec_ctx == NULL)
> -			continue;
> -
> -		/* Get size of security session */
> -		sess_sz = rte_security_session_get_size(sec_ctx);
> -		if (sess_sz > max_sess_sz)
> -			max_sess_sz = sess_sz;
> -	}
> -	RTE_ETH_FOREACH_DEV(port_id) {
> -		void *sec_ctx;
> -
> -		if ((enabled_port_mask & (1 << port_id)) == 0)
> -			continue;
> -
> -		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
> -		if (sec_ctx == NULL)
> -			continue;
> -
> -		sess_sz = rte_security_session_get_size(sec_ctx);
> -		if (sess_sz > max_sess_sz)
> -			max_sess_sz = sess_sz;
> -	}
> -
>  	idx = 0;
>  	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
>  		struct rte_cryptodev_info cdev_info;
> @@ -1722,51 +1705,12 @@ cryptodevs_init(void)
>  		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
>  		dev_conf.nb_queue_pairs = qp;
> 
> -		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
> +		dev_max_sess = cdev_info.sym.max_nb_sessions;
>  		if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
>  			rte_exit(EXIT_FAILURE,
>  				"Device does not support at least %u "
>  				"sessions", CDEV_MP_NB_OBJS);
> 
> -		if (!socket_ctx[dev_conf.socket_id].session_pool) {
> -			char mp_name[RTE_MEMPOOL_NAMESIZE];
> -			struct rte_mempool *sess_mp;
> -
> -			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> -					"sess_mp_%u", dev_conf.socket_id);
> -			sess_mp = rte_cryptodev_sym_session_pool_create(
> -					mp_name, CDEV_MP_NB_OBJS,
> -					0, CDEV_MP_CACHE_SZ, 0,
> -					dev_conf.socket_id);
> -			socket_ctx[dev_conf.socket_id].session_pool =
> sess_mp;
> -		}
> -
> -		if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
> -			char mp_name[RTE_MEMPOOL_NAMESIZE];
> -			struct rte_mempool *sess_mp;
> -
> -			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> -					"sess_mp_priv_%u",
> dev_conf.socket_id);
> -			sess_mp = rte_mempool_create(mp_name,
> -					CDEV_MP_NB_OBJS,
> -					max_sess_sz,
> -					CDEV_MP_CACHE_SZ,
> -					0, NULL, NULL, NULL,
> -					NULL, dev_conf.socket_id,
> -					0);
> -			socket_ctx[dev_conf.socket_id].session_priv_pool =
> -					sess_mp;
> -		}
> -
> -		if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
> -				!socket_ctx[dev_conf.socket_id].session_pool)
> -			rte_exit(EXIT_FAILURE,
> -				"Cannot create session pool on socket %d\n",
> -				dev_conf.socket_id);
> -		else
> -			printf("Allocated session pool on socket %d\n",
> -					dev_conf.socket_id);
> -
>  		if (rte_cryptodev_configure(cdev_id, &dev_conf))
>  			rte_panic("Failed to initialize cryptodev %u\n",
>  					cdev_id);
> @@ -1787,38 +1731,6 @@ cryptodevs_init(void)
>  					cdev_id);
>  	}
> 
> -	/* create session pools for eth devices that implement security */
> -	RTE_ETH_FOREACH_DEV(port_id) {
> -		if ((enabled_port_mask & (1 << port_id)) &&
> -				rte_eth_dev_get_sec_ctx(port_id)) {
> -			int socket_id = rte_eth_dev_socket_id(port_id);
> -
> -			if (!socket_ctx[socket_id].session_pool) {
> -				char mp_name[RTE_MEMPOOL_NAMESIZE];
> -				struct rte_mempool *sess_mp;
> -
> -				snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> -						"sess_mp_%u", socket_id);
> -				sess_mp = rte_mempool_create(mp_name,
> -						(CDEV_MP_NB_OBJS * 2),
> -						max_sess_sz,
> -						CDEV_MP_CACHE_SZ,
> -						0, NULL, NULL, NULL,
> -						NULL, socket_id,
> -						0);
> -				if (sess_mp == NULL)
> -					rte_exit(EXIT_FAILURE,
> -						"Cannot create session pool "
> -						"on socket %d\n", socket_id);
> -				else
> -					printf("Allocated session pool "
> -						"on socket %d\n", socket_id);
> -				socket_ctx[socket_id].session_pool = sess_mp;
> -			}
> -		}
> -	}
> -
> -
>  	printf("\n");
> 
>  	return 0;
> @@ -1984,6 +1896,98 @@ port_init(uint16_t portid, uint64_t req_rx_offloads,
> uint64_t req_tx_offloads)
>  	printf("\n");
>  }
> 
> +static size_t
> +max_session_size(void)
> +{
> +	size_t max_sz, sz;
> +	void *sec_ctx;
> +	int16_t cdev_id, port_id, n;
> +
> +	max_sz = 0;
> +	n =  rte_cryptodev_count();
> +	for (cdev_id = 0; cdev_id != n; cdev_id++) {
> +		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
> +		if (sz > max_sz)
> +			max_sz = sz;
> +		/*
> +		 * If crypto device is security capable, need to check the
> +		 * size of security session as well.
> +		 */
> +
> +		/* Get security context of the crypto device */
> +		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
> +		if (sec_ctx == NULL)
> +			continue;
> +
> +		/* Get size of security session */
> +		sz = rte_security_session_get_size(sec_ctx);
> +		if (sz > max_sz)
> +			max_sz = sz;
> +	}
> +
> +	RTE_ETH_FOREACH_DEV(port_id) {
> +		if ((enabled_port_mask & (1 << port_id)) == 0)
> +			continue;
> +
> +		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
> +		if (sec_ctx == NULL)
> +			continue;
> +
> +		sz = rte_security_session_get_size(sec_ctx);
> +		if (sz > max_sz)
> +			max_sz = sz;
> +	}
> +
> +	return max_sz;
> +}
> +
> +static void
> +session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
> +{
> +	char mp_name[RTE_MEMPOOL_NAMESIZE];
> +	struct rte_mempool *sess_mp;
> +
> +	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> +			"sess_mp_%u", socket_id);
> +	sess_mp = rte_cryptodev_sym_session_pool_create(
> +			mp_name, CDEV_MP_NB_OBJS,
> +			sess_sz, CDEV_MP_CACHE_SZ, 0,
> +			socket_id);
> +	ctx->session_pool = sess_mp;
> +
> +	if (ctx->session_pool == NULL)
> +		rte_exit(EXIT_FAILURE,
> +			"Cannot init session pool on socket %d\n", socket_id);
> +	else
> +		printf("Allocated session pool on socket %d\n",	socket_id);
> +}
> +
> +static void
> +session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
> +	size_t sess_sz)
> +{
> +	char mp_name[RTE_MEMPOOL_NAMESIZE];
> +	struct rte_mempool *sess_mp;
> +
> +	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> +			"sess_mp_priv_%u", socket_id);
> +	sess_mp = rte_mempool_create(mp_name,
> +			CDEV_MP_NB_OBJS,
> +			sess_sz,
> +			CDEV_MP_CACHE_SZ,
> +			0, NULL, NULL, NULL,
> +			NULL, socket_id,
> +			0);
> +	ctx->session_priv_pool = sess_mp;
> +	if (ctx->session_priv_pool == NULL)
> +		rte_exit(EXIT_FAILURE,
> +			"Cannot init session priv pool on socket %d\n",
> +			socket_id);
> +	else
> +		printf("Allocated session priv pool on socket %d\n",
> +			socket_id);
> +}
> +
>  static void
>  pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
>  {
> @@ -2064,9 +2068,11 @@ main(int32_t argc, char **argv)
>  {
>  	int32_t ret;
>  	uint32_t lcore_id;
> +	uint32_t i;
>  	uint8_t socket_id;
>  	uint16_t portid;
>  	uint64_t req_rx_offloads, req_tx_offloads;
> +	size_t sess_sz;
> 
>  	/* init EAL */
>  	ret = rte_eal_init(argc, argv);
> @@ -2094,7 +2100,10 @@ main(int32_t argc, char **argv)
> 
>  	nb_lcores = rte_lcore_count();
> 
> -	/* Replicate each context per socket */
> +	crypto_devid_fill();
> +
> +	sess_sz = max_session_size();
> +
>  	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
>  		if (rte_lcore_is_enabled(lcore_id) == 0)
>  			continue;
> @@ -2104,20 +2113,17 @@ main(int32_t argc, char **argv)
>  		else
>  			socket_id = 0;
> 
> +		/* mbuf_pool is initialised by the pool_init() function*/
>  		if (socket_ctx[socket_id].mbuf_pool)
>  			continue;
> 
> -		/* initilaze SPD */
> -		sp4_init(&socket_ctx[socket_id], socket_id);
> -
> -		sp6_init(&socket_ctx[socket_id], socket_id);
> -
> -		/* initilaze SAD */
> -		sa_init(&socket_ctx[socket_id], socket_id);
> -
> -		rt_init(&socket_ctx[socket_id], socket_id);
> -
>  		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
> +		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
> +		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
> +			sess_sz);
> +
> +		if (!numa_on)
> +			break;
>  	}
> 
>  	RTE_ETH_FOREACH_DEV(portid) {
> @@ -2135,7 +2141,11 @@ main(int32_t argc, char **argv)
>  		if ((enabled_port_mask & (1 << portid)) == 0)
>  			continue;
> 
> -		/* Start device */
> +		/*
> +		 * Start device
> +		 * note: device must be started before a flow rule
> +		 * can be installed.
> +		 */
>  		ret = rte_eth_dev_start(portid);
>  		if (ret < 0)
>  			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
> @@ -2153,6 +2163,19 @@ main(int32_t argc, char **argv)
>  			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback,
> NULL);
>  	}
> 
> +	/* Replicate each context per socket */
> +	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
> +		socket_id = rte_socket_id_by_idx(i);
> +		if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
> +			(socket_ctx[socket_id].sa_in == NULL) &&
> +			(socket_ctx[socket_id].sa_out == NULL)) {
> +			sa_init(&socket_ctx[socket_id], socket_id);
> +			sp4_init(&socket_ctx[socket_id], socket_id);
> +			sp6_init(&socket_ctx[socket_id], socket_id);
> +			rt_init(&socket_ctx[socket_id], socket_id);
> +		}
> +	}
> +
>  	check_all_ports_link_status(enabled_port_mask);
> 
>  	/* launch per-lcore init on every lcore */
> diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
> index 4352cb8..e31d472 100644
> --- a/examples/ipsec-secgw/ipsec.c
> +++ b/examples/ipsec-secgw/ipsec.c
> @@ -39,42 +39,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct
> rte_security_ipsec_xform *ipsec)
>  	ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
>  }
> 
> -int
> -create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
> +static int
> +create_sec_session(struct ipsec_sa *sa, struct rte_mempool *pool)
>  {
> -	struct rte_cryptodev_info cdev_info;
> -	unsigned long cdev_id_qp = 0;
> +	const struct rte_security_capability *sec_cap;
> +	struct rte_security_ctx *ctx;
>  	int32_t ret = 0;
> -	struct cdev_key key = { 0 };
> -
> -	key.lcore_id = (uint8_t)rte_lcore_id();
> -
> -	key.cipher_algo = (uint8_t)sa->cipher_algo;
> -	key.auth_algo = (uint8_t)sa->auth_algo;
> -	key.aead_algo = (uint8_t)sa->aead_algo;
> -
> -	if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
> -		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
> -				(void **)&cdev_id_qp);
> -		if (ret < 0) {
> -			RTE_LOG(ERR, IPSEC,
> -				"No cryptodev: core %u, cipher_algo %u, "
> -				"auth_algo %u, aead_algo %u\n",
> -				key.lcore_id,
> -				key.cipher_algo,
> -				key.auth_algo,
> -				key.aead_algo);
> -			return -1;
> -		}
> -	}
> 
> -	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev
> "
> -			"%u qp %u\n", sa->spi,
> -			ipsec_ctx->tbl[cdev_id_qp].id,
> -			ipsec_ctx->tbl[cdev_id_qp].qp);
> +	if ((sa == NULL) || (pool == NULL))
> +		return -EINVAL;
> 
> -	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
> -		struct rte_security_session_conf sess_conf = {
> +	struct rte_security_session_conf sess_conf = {
>  			.action_type = sa->type,
>  			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
>  			{.ipsec = {
> @@ -90,247 +65,340 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct
> ipsec_sa *sa)
>  			} },
>  			.crypto_xform = sa->xforms,
>  			.userdata = NULL,
> -
>  		};
> 
> -		if (sa->type ==
> RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> -			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
> -
> 	rte_cryptodev_get_sec_ctx(
> -							ipsec_ctx-
> >tbl[cdev_id_qp].id);
> -
> -			/* Set IPsec parameters in conf */
> -			set_ipsec_conf(sa, &(sess_conf.ipsec));
> -
> -			sa->sec_session = rte_security_session_create(ctx,
> -					&sess_conf, ipsec_ctx->session_pool);
> -			if (sa->sec_session == NULL) {
> -				RTE_LOG(ERR, IPSEC,
> -				"SEC Session init failed: err: %d\n", ret);
> -				return -1;
> -			}
> -		} else if (sa->type ==
> RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
> -			struct rte_flow_error err;
> -			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
> -
> 	rte_eth_dev_get_sec_ctx(
> -							sa->portid);
> -			const struct rte_security_capability *sec_cap;
> -			int ret = 0;
> -
> -			sa->sec_session = rte_security_session_create(ctx,
> -					&sess_conf, ipsec_ctx->session_pool);
> -			if (sa->sec_session == NULL) {
> -				RTE_LOG(ERR, IPSEC,
> -				"SEC Session init failed: err: %d\n", ret);
> -				return -1;
> -			}
> +	if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> +		ctx = (struct rte_security_ctx *)
> +				rte_eth_dev_get_sec_ctx(sa->portid);
> 
> -			sec_cap = rte_security_capabilities_get(ctx);
> +		/* Set IPsec parameters in conf */
> +		set_ipsec_conf(sa, &(sess_conf.ipsec));
> 
> -			/* iterate until ESP tunnel*/
> -			while (sec_cap->action !=
> -					RTE_SECURITY_ACTION_TYPE_NONE)
> {
> +		sa->sec_session = rte_security_session_create(ctx,
> +				&sess_conf, pool);
> +		if (sa->sec_session == NULL) {
> +			RTE_LOG(ERR, IPSEC,
> +				"SEC Session init failed: err: %d\n",
> +				ret);
> +			return -1;
> +		}
> +	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
> +		struct rte_flow_error err;
> +		ctx = (struct rte_security_ctx *)
> +				rte_eth_dev_get_sec_ctx(sa->portid);
> +		sa->sec_session = rte_security_session_create(ctx,
> +				&sess_conf, pool);
> +		if (sa->sec_session == NULL) {
> +			RTE_LOG(ERR, IPSEC, "SEC Session init failed\n");
> +			return -1;
> +		}
> 
> -				if (sec_cap->action == sa->type &&
> -				    sec_cap->protocol ==
> -					RTE_SECURITY_PROTOCOL_IPSEC &&
> -				    sec_cap->ipsec.mode ==
> -
> 	RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> -				    sec_cap->ipsec.direction == sa->direction)
> -					break;
> -				sec_cap++;
> -			}
> +		sec_cap = rte_security_capabilities_get(ctx);
> +
> +		/* iterate until ESP tunnel*/
> +		while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE)
> {
> +			if (sec_cap->action == sa->type &&
> +				sec_cap->protocol ==
> +				RTE_SECURITY_PROTOCOL_IPSEC &&
> +				sec_cap->ipsec.mode ==
> +				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> +				sec_cap->ipsec.direction == sa->direction)
> +				break;
> +			sec_cap++;
> +		}
> 
> -			if (sec_cap->action ==
> RTE_SECURITY_ACTION_TYPE_NONE) {
> -				RTE_LOG(ERR, IPSEC,
> +		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
> +			RTE_LOG(ERR, IPSEC,
>  				"No suitable security capability found\n");
>  				return -1;
> -			}
> +		}
> 
> -			sa->ol_flags = sec_cap->ol_flags;
> -			sa->security_ctx = ctx;
> -			sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
> -
> -			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
> -			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
> -			if (sa->flags & IP6_TUNNEL) {
> -				sa->pattern[1].spec = &sa->ipv6_spec;
> -				memcpy(sa->ipv6_spec.hdr.dst_addr,
> -					sa->dst.ip.ip6.ip6_b, 16);
> -				memcpy(sa->ipv6_spec.hdr.src_addr,
> -				       sa->src.ip.ip6.ip6_b, 16);
> -			} else {
> -				sa->pattern[1].spec = &sa->ipv4_spec;
> -				sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
> -				sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
> -			}
> +		sa->ol_flags = sec_cap->ol_flags;
> +		sa->security_ctx = ctx;
> +		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
> +
> +		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
> +		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
> +		if (sa->flags & IP6_TUNNEL) {
> +			sa->pattern[1].spec = &sa->ipv6_spec;
> +			memcpy(sa->ipv6_spec.hdr.dst_addr,
> +				sa->dst.ip.ip6.ip6_b, 16);
> +			memcpy(sa->ipv6_spec.hdr.src_addr,
> +			       sa->src.ip.ip6.ip6_b, 16);
> +		} else {
> +			sa->pattern[1].spec = &sa->ipv4_spec;
> +			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
> +			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
> +		}
> 
> -			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
> -			sa->pattern[2].spec = &sa->esp_spec;
> -			sa->pattern[2].mask = &rte_flow_item_esp_mask;
> -			sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
> +		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
> +		sa->pattern[2].spec = &sa->esp_spec;
> +		sa->pattern[2].mask = &rte_flow_item_esp_mask;
> +		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
> 
> -			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
> +		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
> 
> -			sa->action[0].type =
> RTE_FLOW_ACTION_TYPE_SECURITY;
> -			sa->action[0].conf = sa->sec_session;
> +		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
> +		sa->action[0].conf = sa->sec_session;
> 
> -			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
> +		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
> 
> -			sa->attr.egress = (sa->direction ==
> +		sa->attr.egress = (sa->direction ==
> 
> 	RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
> -			sa->attr.ingress = (sa->direction ==
> +		sa->attr.ingress = (sa->direction ==
> 
> 	RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
> -			if (sa->attr.ingress) {
> -				uint8_t rss_key[40];
> -				struct rte_eth_rss_conf rss_conf = {
> -					.rss_key = rss_key,
> -					.rss_key_len = 40,
> -				};
> -				struct rte_eth_dev *eth_dev;
> -				uint16_t
> queue[RTE_MAX_QUEUES_PER_PORT];
> -				struct rte_flow_action_rss action_rss;
> -				unsigned int i;
> -				unsigned int j;
> -
> -				sa->action[2].type =
> RTE_FLOW_ACTION_TYPE_END;
> -				/* Try RSS. */
> -				sa->action[1].type =
> RTE_FLOW_ACTION_TYPE_RSS;
> -				sa->action[1].conf = &action_rss;
> -				eth_dev = ctx->device;
> -				rte_eth_dev_rss_hash_conf_get(sa->portid,
> -							      &rss_conf);
> -				for (i = 0, j = 0;
> -				     i < eth_dev->data->nb_rx_queues; ++i)
> -					if (eth_dev->data->rx_queues[i])
> -						queue[j++] = i;
> +		if (sa->attr.ingress) {
> +			uint8_t rss_key[40];
> +			struct rte_eth_rss_conf rss_conf = {
> +				.rss_key = rss_key,
> +				.rss_key_len = 40,
> +			};
> +			struct rte_eth_dev *eth_dev;
> +			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
> +			struct rte_flow_action_rss action_rss;
> +			unsigned int i;
> +			unsigned int j;
> +
> +			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
> +			/* Try RSS. */
> +			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
> +			sa->action[1].conf = &action_rss;
> +			eth_dev = ctx->device;
> +			rte_eth_dev_rss_hash_conf_get(sa->portid,
> +						&rss_conf);
> +			for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
> +				++i)
> +				if (eth_dev->data->rx_queues[i])
> +					queue[j++] = i;
Compilation error

/home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c: In function 'create_sec_session':
/home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:169:4: error: this 'for' clause does not guard... [-Werror=misleading-indentation]
    for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
    ^~~
/home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:173:5: note: ...this statement, but the latter is misleadingly indented as if it is guarded by the 'for'
     action_rss = (struct rte_flow_action_rss){
     ^~~~~~~~~~

>  				action_rss = (struct rte_flow_action_rss){
>  					.types = rss_conf.rss_hf,
>  					.key_len = rss_conf.rss_key_len,
>  					.queue_num = j,
>  					.key = rss_key,
>  					.queue = queue,
> -				};
> -				ret = rte_flow_validate(sa->portid, &sa->attr,
> -							sa->pattern, sa-
> >action,
> -							&err);
> -				if (!ret)
> -					goto flow_create;
> -				/* Try Queue. */
> -				sa->action[1].type =
> RTE_FLOW_ACTION_TYPE_QUEUE;
> -				sa->action[1].conf =
> -					&(struct rte_flow_action_queue){
> -					.index = 0,
> -				};
> -				ret = rte_flow_validate(sa->portid, &sa->attr,
> -							sa->pattern, sa-
> >action,
> -							&err);
> -				/* Try End. */
> -				sa->action[1].type =
> RTE_FLOW_ACTION_TYPE_END;
> -				sa->action[1].conf = NULL;
> -				ret = rte_flow_validate(sa->portid, &sa->attr,
> +			};
> +			ret = rte_flow_validate(sa->portid, &sa->attr,
> +						sa->pattern, sa->action,
> +						&err);
> +			if (!ret)
> +				goto flow_create;
> +			/* Try Queue. */
> +			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
> +			sa->action[1].conf =
> +				&(struct rte_flow_action_queue){
> +				.index = 0,
> +			};
> +			ret = rte_flow_validate(sa->portid, &sa->attr,
> +						sa->pattern, sa->action,
> +						&err);
> +			/* Try End. */
> +			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
> +			sa->action[1].conf = NULL;
> +			ret = rte_flow_validate(sa->portid, &sa->attr,
>  							sa->pattern, sa-
> >action,
>  							&err);
> -				if (ret)
> -					goto flow_create_failure;
> -			} else if (sa->attr.egress &&
> -				   (sa->ol_flags &
> -				    RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))
> {
> -				sa->action[1].type =
> -					RTE_FLOW_ACTION_TYPE_PASSTHRU;
> -				sa->action[2].type =
> -					RTE_FLOW_ACTION_TYPE_END;
> -			}
> +			if (ret)
> +				goto flow_create_failure;
> +		} else if (sa->attr.egress &&
> +				(sa->ol_flags &
> +				RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
> +			sa->action[1].type =
> +				RTE_FLOW_ACTION_TYPE_PASSTHRU;
> +			sa->action[2].type =
> +				RTE_FLOW_ACTION_TYPE_END;
> +		}
>  flow_create:
> -			sa->flow = rte_flow_create(sa->portid,
> -				&sa->attr, sa->pattern, sa->action, &err);
> -			if (sa->flow == NULL) {
> +		sa->flow = rte_flow_create(sa->portid,
> +			&sa->attr, sa->pattern, sa->action, &err);
> +		if (sa->flow == NULL) {
>  flow_create_failure:
> -				RTE_LOG(ERR, IPSEC,
> -					"Failed to create ipsec flow msg: %s\n",
> -					err.message);
> -				return -1;
> -			}
> -		} else if (sa->type ==
> -
> 	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> -			struct rte_security_ctx *ctx =
> -					(struct rte_security_ctx *)
> -					rte_eth_dev_get_sec_ctx(sa->portid);
> -			const struct rte_security_capability *sec_cap;
> -
> -			if (ctx == NULL) {
> -				RTE_LOG(ERR, IPSEC,
> -				"Ethernet device doesn't have security features
> registered\n");
> -				return -1;
> -			}
> +			RTE_LOG(ERR, IPSEC,
> +				"Failed to create ipsec flow msg: %s\n",
> +				err.message);
> +			return -1;
> +		}
> +	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
> {
> +		struct rte_security_ctx *ctx =
> +				(struct rte_security_ctx *)
> +				rte_eth_dev_get_sec_ctx(sa->portid);
> +		const struct rte_security_capability *sec_cap;
> 
> -			/* Set IPsec parameters in conf */
> -			set_ipsec_conf(sa, &(sess_conf.ipsec));
> -
> -			/* Save SA as userdata for the security session. When
> -			 * the packet is received, this userdata will be
> -			 * retrieved using the metadata from the packet.
> -			 *
> -			 * The PMD is expected to set similar metadata for
> other
> -			 * operations, like rte_eth_event, which are tied to
> -			 * security session. In such cases, the userdata could
> -			 * be obtained to uniquely identify the security
> -			 * parameters denoted.
> -			 */
> -
> -			sess_conf.userdata = (void *) sa;
> -
> -			sa->sec_session = rte_security_session_create(ctx,
> -					&sess_conf, ipsec_ctx->session_pool);
> -			if (sa->sec_session == NULL) {
> -				RTE_LOG(ERR, IPSEC,
> -				"SEC Session init failed: err: %d\n", ret);
> -				return -1;
> -			}
> +		if (ctx == NULL) {
> +			RTE_LOG(ERR, IPSEC,
> +			"Ethernet device doesn't have security features
> registered\n");
> +			return -1;
> +		}
> +
> +		/* Set IPsec parameters in conf */
> +		set_ipsec_conf(sa, &(sess_conf.ipsec));
> +
> +		/* Save SA as userdata for the security session. When
> +		 * the packet is received, this userdata will be
> +		 * retrieved using the metadata from the packet.
> +		 *
> +		 * The PMD is expected to set similar metadata for other
> +		 * operations, like rte_eth_event, which are tied to
> +		 * security session. In such cases, the userdata could
> +		 * be obtained to uniquely identify the security
> +		 * parameters denoted.
> +		 */
> +
> +		sess_conf.userdata = (void *) sa;
> +
> +		sa->sec_session = rte_security_session_create(ctx,
> +			&sess_conf, pool);
> +		if (sa->sec_session == NULL) {
> +			RTE_LOG(ERR, IPSEC,
> +			"SEC Session init failed: err: %d\n", ret);
> +			return -1;
> +		}
> 
> -			sec_cap = rte_security_capabilities_get(ctx);
> +		sec_cap = rte_security_capabilities_get(ctx);
> 
> -			if (sec_cap == NULL) {
> -				RTE_LOG(ERR, IPSEC,
> -				"No capabilities registered\n");
> -				return -1;
> -			}
> +		if (sec_cap == NULL) {
> +			RTE_LOG(ERR, IPSEC,
> +			"No capabilities registered\n");
> +			return -1;
> +		}
> 
> -			/* iterate until ESP tunnel*/
> -			while (sec_cap->action !=
> +		/* iterate until ESP tunnel*/
> +		while (sec_cap->action !=
>  					RTE_SECURITY_ACTION_TYPE_NONE)
> {
> 
> -				if (sec_cap->action == sa->type &&
> -				    sec_cap->protocol ==
> -					RTE_SECURITY_PROTOCOL_IPSEC &&
> -				    sec_cap->ipsec.mode ==
> -
> 	RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> -				    sec_cap->ipsec.direction == sa->direction)
> -					break;
> -				sec_cap++;
> -			}
> +			if (sec_cap->action == sa->type &&
> +				sec_cap->protocol ==
> +				RTE_SECURITY_PROTOCOL_IPSEC &&
> +				sec_cap->ipsec.mode ==
> +				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> +				sec_cap->ipsec.direction == sa->direction)
> +				break;
> +			sec_cap++;
> +		}
> 
> -			if (sec_cap->action ==
> RTE_SECURITY_ACTION_TYPE_NONE) {
> -				RTE_LOG(ERR, IPSEC,
> -				"No suitable security capability found\n");
> -				return -1;
> -			}
> +		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
> +			RTE_LOG(ERR, IPSEC,
> +			"No suitable security capability found\n");
> +			return -1;
> +		}
> +
> +		sa->ol_flags = sec_cap->ol_flags;
> +		sa->security_ctx = ctx;
> +	} else
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +#define CDEV_IV_SIZE 12
> +
> +static int
> +check_cryptodev_aead_capablity(const struct ipsec_sa *ss, uint8_t dev_id)
> +{
> +	struct rte_cryptodev_sym_capability_idx cap_idx;
> +	const struct rte_cryptodev_symmetric_capability *cap;
> 
> -			sa->ol_flags = sec_cap->ol_flags;
> -			sa->security_ctx = ctx;
> +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
> +	cap_idx.algo.aead = ss->aead_algo;
> +
> +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> +	if (cap == NULL)
> +		return -ENOENT;
> +
> +	return rte_cryptodev_sym_capability_check_aead(cap,
> +					ss->cipher_key_len,
> +					ss->digest_len,
> +					ss->aad_len,
> +					CDEV_IV_SIZE);
> +}
> +
> +static int
> +check_cryptodev_capablity(const struct ipsec_sa *ss, uint8_t dev_id)
> +{
> +	struct rte_cryptodev_sym_capability_idx cap_idx;
> +	const struct rte_cryptodev_symmetric_capability *cap;
> +	uint16_t auth_iv_len;
> +	int rc = -1;
> +
> +	if (ss == NULL)
> +		return rc;
> +
> +	if (ss->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
> +		return check_cryptodev_aead_capablity(ss, dev_id);
> +
> +	auth_iv_len = 0;
> +
> +	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
> +	cap_idx.algo.auth = ss->auth_algo;
> +	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
> +	if (cap != NULL) {
> +		rc = rte_cryptodev_sym_capability_check_auth(
> +				cap, ss->auth_key_len, ss->digest_len,
> +				auth_iv_len);
> +		if (rc == 0) {
> +			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> +			cap_idx.algo.cipher = ss->cipher_algo;
> +			cap = rte_cryptodev_sym_capability_get(dev_id,
> +					&cap_idx);
> +			if (cap != NULL)
> +				rc =
> rte_cryptodev_sym_capability_check_cipher(
> +						cap, ss->cipher_key_len,
> +						ss->iv_len);
>  		}
> -	} else {
> -		sa->crypto_session = rte_cryptodev_sym_session_create(
> -				ipsec_ctx->session_pool);
> -		rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
> -				sa->crypto_session, sa->xforms,
> -				ipsec_ctx->session_priv_pool);
> -
> -		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
> -				&cdev_info);
>  	}
> -	sa->cdev_id_qp = cdev_id_qp;
> 
> -	return 0;
> +	return rc;
> +}
> +
> +static int
> +create_crypto_session(struct ipsec_sa *sa, struct rte_mempool *pool)
> +{
> +	int32_t rc;
> +	uint32_t devnum, i;
> +	struct rte_cryptodev_sym_session *s;
> +	uint8_t devid[RTE_CRYPTO_MAX_DEVS];
> +
> +	/* check which cryptodevs support SA */
> +	devnum = 0;
> +	for (i = 0; i < crypto_dev_num; i++) {
> +		rc = check_cryptodev_capablity(sa, crypto_devid[i]);
> +		if (rc == 0)
> +			devid[devnum++] = crypto_devid[i];
> +	}
> +
> +	if (devnum == 0)
> +		return -ENODEV;
> +
> +	s = rte_cryptodev_sym_session_create(pool);
> +	if (s == NULL)
> +		return -ENOMEM;
> +
> +	/* initialize SA crypto session for all supported devices */
> +	for (i = 0; i != devnum; i++) {
> +		rc = rte_cryptodev_sym_session_init(devid[i], s, sa->xforms,
> +			pool);
> +		if (rc != 0)
> +			break;
> +	}
> +
> +	if (i == devnum) {
> +		sa->crypto_session = s;
> +		return 0;
> +	}
> +
> +	/* failure, do cleanup */
> +	while (i-- != 0)
> +		rte_cryptodev_sym_session_clear(devid[i], s);
> +
> +	rte_cryptodev_sym_session_free(s);
> +	return rc;
> +}
> +
> +int
> +create_session(struct ipsec_sa *sa, struct rte_mempool *pool)
> +{
> +	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE)
> +		return create_sec_session(sa, pool);
> +	else
> +		return create_crypto_session(sa, pool);
>  }
> 
>  /*
> @@ -393,13 +461,6 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
>  			priv->cop.status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> 
>  			rte_prefetch0(&priv->sym_cop);
> -
> -			if ((unlikely(sa->sec_session == NULL)) &&
> -					create_session(ipsec_ctx, sa)) {
> -				rte_pktmbuf_free(pkts[i]);
> -				continue;
> -			}
> -
>  			sym_cop = get_sym_cop(&priv->cop);
>  			sym_cop->m_src = pkts[i];
> 
> @@ -412,13 +473,6 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
>  			priv->cop.status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> 
>  			rte_prefetch0(&priv->sym_cop);
> -
> -			if ((unlikely(sa->crypto_session == NULL)) &&
> -					create_session(ipsec_ctx, sa)) {
> -				rte_pktmbuf_free(pkts[i]);
> -				continue;
> -			}
> -
>  			rte_crypto_op_attach_sym_session(&priv->cop,
>  					sa->crypto_session);
> 
> @@ -429,12 +483,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
>  			}
>  			break;
>  		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
> -			if ((unlikely(sa->sec_session == NULL)) &&
> -					create_session(ipsec_ctx, sa)) {
> -				rte_pktmbuf_free(pkts[i]);
> -				continue;
> -			}
> -
> +			RTE_ASSERT(sa->sec_session != NULL);
>  			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
>  			if (sa->ol_flags &
> RTE_SECURITY_TX_OLOAD_NEED_MDATA)
>  				rte_security_set_pkt_metadata(
> @@ -442,17 +491,11 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct
> ipsec_ctx *ipsec_ctx,
>  						sa->sec_session, pkts[i], NULL);
>  			continue;
>  		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
> +			RTE_ASSERT(sa->sec_session != NULL);
>  			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
>  			priv->cop.status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> 
>  			rte_prefetch0(&priv->sym_cop);
> -
> -			if ((unlikely(sa->sec_session == NULL)) &&
> -					create_session(ipsec_ctx, sa)) {
> -				rte_pktmbuf_free(pkts[i]);
> -				continue;
> -			}
> -
>  			rte_security_attach_session(&priv->cop,
>  					sa->sec_session);
> 
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index 99f49d6..804330c 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -83,6 +83,14 @@ struct app_sa_prm {
> 
>  extern struct app_sa_prm app_sa_prm;
> 
> +/*
> + * Number of enabled crypto devices
> + * This number is needed when checking crypto device capabilities
> + */
> +extern uint8_t crypto_dev_num;
> +/* array of crypto device ID's */
> +extern uint8_t crypto_devid[RTE_CRYPTO_MAX_DEVS];
> +
>  struct ipsec_sa {
>  	struct rte_ipsec_session ips; /* one session per sa for now */
>  	uint32_t spi;
> @@ -306,6 +314,6 @@ void
>  enqueue_cop_burst(struct cdev_qp *cqp);
> 
>  int
> -create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
> +create_session(struct ipsec_sa *sa, struct rte_mempool *pool);
> 
>  #endif /* __IPSEC_H__ */
> diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-
> secgw/ipsec_process.c
> index 3f9cacb..0df6969 100644
> --- a/examples/ipsec-secgw/ipsec_process.c
> +++ b/examples/ipsec-secgw/ipsec_process.c
> @@ -86,39 +86,6 @@ enqueue_cop_bulk(struct cdev_qp *cqp, struct
> rte_crypto_op *cop[], uint32_t num)
>  	cqp->len = len;
>  }
> 
> -static inline int
> -fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
> -	struct ipsec_sa *sa)
> -{
> -	int32_t rc;
> -
> -	/* setup crypto section */
> -	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
> -		if (sa->crypto_session == NULL) {
> -			rc = create_session(ctx, sa);
> -			if (rc != 0)
> -				return rc;
> -		}
> -		ss->crypto.ses = sa->crypto_session;
> -	/* setup session action type */
> -	} else {
> -		if (sa->sec_session == NULL) {
> -			rc = create_session(ctx, sa);
> -			if (rc != 0)
> -				return rc;
> -		}
> -		ss->security.ses = sa->sec_session;
> -		ss->security.ctx = sa->security_ctx;
> -		ss->security.ol_flags = sa->ol_flags;
> -	}
> -
> -	rc = rte_ipsec_session_prepare(ss);
> -	if (rc != 0)
> -		memset(ss, 0, sizeof(*ss));
> -
> -	return rc;
> -}
> -
>  /*
>   * group input packets byt the SA they belong to.
>   */
> @@ -219,9 +186,8 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic
> *trf)
> 
>  		ips = &sa->ips;
> 
> -		/* no valid HW session for that SA, try to create one */
> -		if (sa == NULL || (ips->crypto.ses == NULL &&
> -				fill_ipsec_session(ips, ctx, sa) != 0))
> +		/* no valid HW session for that SA */
> +		if (sa == NULL || ips->crypto.ses == NULL)
>  			k = 0;
> 
>  		/* process packets inline */
> diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
> index a7298a3..0f36f5b 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -774,14 +774,14 @@ check_eth_dev_caps(uint16_t portid, uint32_t
> inbound)
>  	return 0;
>  }
> 
> -
>  static int
>  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
> -		uint32_t nb_entries, uint32_t inbound)
> +	uint32_t nb_entries, uint32_t inbound, struct socket_ctx *skt_ctx)
>  {
>  	struct ipsec_sa *sa;
>  	uint32_t i, idx;
>  	uint16_t iv_length, aad_length;
> +	int32_t rc;
> 
>  	/* for ESN upper 32 bits of SQN also need to be part of AAD */
>  	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
> @@ -902,6 +902,12 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> 
>  			print_one_sa_rule(sa, inbound);
>  		}
> +		rc = create_session(sa, skt_ctx->session_pool);
> +		if (rc != 0) {
> +			RTE_LOG(ERR, IPSEC_ESP,
> +					"create_session() failed\n");
> +			return -EINVAL;
> +		}
>  	}
> 
>  	return 0;
> @@ -909,16 +915,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
> 
>  static inline int
>  sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
> -		uint32_t nb_entries)
> +		uint32_t nb_entries, struct socket_ctx *skt_ctx)
>  {
> -	return sa_add_rules(sa_ctx, entries, nb_entries, 0);
> +	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
>  }
> 
>  static inline int
>  sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
> -		uint32_t nb_entries)
> +		uint32_t nb_entries, struct socket_ctx *skt_ctx)
>  {
> -	return sa_add_rules(sa_ctx, entries, nb_entries, 1);
> +	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
>  }
> 
>  /*
> @@ -1012,10 +1018,12 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm,
> const struct ipsec_sa *ss,
>  	return 0;
>  }
> 
> -static void
> +static int
>  fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
>  	const struct ipsec_sa *lsa)
>  {
> +	int32_t rc = 0;
> +
>  	ss->sa = sa;
>  	ss->type = lsa->type;
> 
> @@ -1028,6 +1036,12 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct
> rte_ipsec_sa *sa,
>  		ss->security.ctx = lsa->security_ctx;
>  		ss->security.ol_flags = lsa->ol_flags;
>  	}
> +
> +	rc = rte_ipsec_session_prepare(ss);
> +	if (rc != 0)
> +		memset(ss, 0, sizeof(*ss));
> +
> +	return rc;
>  }
> 
>  /*
> @@ -1062,8 +1076,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa
> *sa, uint32_t sa_size)
>  	if (rc < 0)
>  		return rc;
> 
> -	fill_ipsec_session(&lsa->ips, sa, lsa);
> -	return 0;
> +	rc = fill_ipsec_session(&lsa->ips, sa, lsa);
> +	return rc;
>  }
> 
>  /*
> @@ -1141,7 +1155,10 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
>  				"context %s in socket %d\n", rte_errno,
>  				name, socket_id);
> 
> -		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
> +		rc = sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
> +		if (rc != 0)
> +			rte_exit(EXIT_FAILURE,
> +				"failed to add inbound rules\n");
> 
>  		if (app_sa_prm.enable != 0) {
>  			rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
> @@ -1161,7 +1178,10 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id)
>  				"context %s in socket %d\n", rte_errno,
>  				name, socket_id);
> 
> -		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
> +		rc = sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
> +		if (rc != 0)
> +			rte_exit(EXIT_FAILURE,
> +				"failed to add outbound rules\n");
> 
>  		if (app_sa_prm.enable != 0) {
>  			rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
> --
> 2.7.4
  
Iremonger, Bernard April 17, 2019, 12:53 p.m. UTC | #2
Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Wednesday, April 17, 2019 12:52 PM
> To: Iremonger, Bernard <bernard.iremonger@intel.com>; dev@dpdk.org;
> Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: stable@dpdk.org
> Subject: RE: [PATCH v3 1/2] examples/ipsec-secgw: fix 1st packet dropped
> for inline crypto
> 
> >
> > Inline crypto installs a flow rule in the NIC. This flow rule must be
> > installed before the first inbound packet is received.
> >
> > The create_session() function installs the flow rule.
> >
> > Refactor ipsec-secgw.c, sa.c, ipsec.h and ipsec.c to create sessions
> > at startup to fix the issue of the first packet being dropped for
> > inline crypto.
> >
> > The create_session() function is now called at initialisation in
> > sa_add_rules() which is called from sa_init().
> > The return code for add_rules is checked.
> > Calls to create_session() in other functions are dropped.
> >
> > Add crypto_devid_fill() in ipsec-secgw.c Add max_session_size() in
> > ipsec-secgw.c Add check_cryptodev_capability() in ipsec.c Add
> > check_cryptodev_aead_capability() in ipsec.c Add create_sec_session()
> > and create_crypto_session() in ipsec.c
> >
> > The crypto_dev_fill() function has been added to find the enabled
> > crypto devices.
> >
> > The max_session_size() function has been added to calculate memory
> > requirements.
> >
> > The check_cryptodev_capability() and check_cryptodev_aead_capability()
> > functions have been added to check that the SA is supported by the
> > crypto device.
> >
> > The create_session() function is refactored to use the
> > create_sec_session() and create_crypto_session() functions.
> >
> > The cryprodev_init() function has been refactored to drop calls to
> > rte_mempool_create() and to drop calculation of memory requirements.
> >
> > The main() function has been refactored to call crypto_devid_fill()
> > and max_session_size() and to call session_pool_init() and
> > session_priv_pool_init().
> > The ports are started now before adding a flow rule in main().
> > The sa_init(), sp4_init(), sp6_init() and rt_init() functions are now
> > called after the ports have been started.
> >
> > Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload")
> > Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample
> > application")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
> > ---
> >  examples/ipsec-secgw/ipsec-secgw.c   | 271 +++++++++--------
> >  examples/ipsec-secgw/ipsec.c         | 569 +++++++++++++++++++-----------
> -----
> >  examples/ipsec-secgw/ipsec.h         |  10 +-
> >  examples/ipsec-secgw/ipsec_process.c |  38 +--
> >  examples/ipsec-secgw/sa.c            |  42 ++-
> >  5 files changed, 495 insertions(+), 435 deletions(-)
> >
> > diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-
> > secgw/ipsec-secgw.c index ffbd00b..cc8bb57 100644
> > --- a/examples/ipsec-secgw/ipsec-secgw.c
> > +++ b/examples/ipsec-secgw/ipsec-secgw.c
> > @@ -182,6 +182,14 @@ struct lcore_params {
> >  	uint8_t lcore_id;
> >  } __rte_cache_aligned;
> >
> > +/*
> > + * Number of enabled crypto devices
> > + * This number is needed when checking crypto device capabilities  */
> > +uint8_t crypto_dev_num;
> > +/* array of crypto device ID's */
> > +uint8_t crypto_devid[RTE_CRYPTO_MAX_DEVS];
> > +
> >  static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> >
> >  static struct lcore_params *lcore_params; @@ -1623,13 +1631,27 @@
> > check_cryptodev_mask(uint8_t cdev_id)
> >  	return -1;
> >  }
> >
> > +static void
> > +crypto_devid_fill(void)
> > +{
> > +	uint32_t i, n;
> > +
> > +	n = rte_cryptodev_count();
> > +
> > +	for (i = 0; i != n; i++) {
> > +		if (check_cryptodev_mask(i) == 0)
> > +			crypto_devid[crypto_dev_num++] = i;
> > +	}
> > +}
> > +
> >  static int32_t
> >  cryptodevs_init(void)
> >  {
> >  	struct rte_cryptodev_config dev_conf;
> >  	struct rte_cryptodev_qp_conf qp_conf;
> >  	uint16_t idx, max_nb_qps, qp, i;
> > -	int16_t cdev_id, port_id;
> > +	int16_t cdev_id;
> > +	uint32_t dev_max_sess;
> >  	struct rte_hash_parameters params = { 0 };
> >
> >  	params.entries = CDEV_MAP_ENTRIES;
> > @@ -1652,45 +1674,6 @@ cryptodevs_init(void)
> >
> >  	printf("lcore/cryptodev/qp mappings:\n");
> >
> > -	uint32_t max_sess_sz = 0, sess_sz;
> > -	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
> > -		void *sec_ctx;
> > -
> > -		/* Get crypto priv session size */
> > -		sess_sz =
> rte_cryptodev_sym_get_private_session_size(cdev_id);
> > -		if (sess_sz > max_sess_sz)
> > -			max_sess_sz = sess_sz;
> > -
> > -		/*
> > -		 * If crypto device is security capable, need to check the
> > -		 * size of security session as well.
> > -		 */
> > -
> > -		/* Get security context of the crypto device */
> > -		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
> > -		if (sec_ctx == NULL)
> > -			continue;
> > -
> > -		/* Get size of security session */
> > -		sess_sz = rte_security_session_get_size(sec_ctx);
> > -		if (sess_sz > max_sess_sz)
> > -			max_sess_sz = sess_sz;
> > -	}
> > -	RTE_ETH_FOREACH_DEV(port_id) {
> > -		void *sec_ctx;
> > -
> > -		if ((enabled_port_mask & (1 << port_id)) == 0)
> > -			continue;
> > -
> > -		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
> > -		if (sec_ctx == NULL)
> > -			continue;
> > -
> > -		sess_sz = rte_security_session_get_size(sec_ctx);
> > -		if (sess_sz > max_sess_sz)
> > -			max_sess_sz = sess_sz;
> > -	}
> > -
> >  	idx = 0;
> >  	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
> >  		struct rte_cryptodev_info cdev_info; @@ -1722,51 +1705,12
> @@
> > cryptodevs_init(void)
> >  		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
> >  		dev_conf.nb_queue_pairs = qp;
> >
> > -		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
> > +		dev_max_sess = cdev_info.sym.max_nb_sessions;
> >  		if (dev_max_sess != 0 && dev_max_sess <
> CDEV_MP_NB_OBJS)
> >  			rte_exit(EXIT_FAILURE,
> >  				"Device does not support at least %u "
> >  				"sessions", CDEV_MP_NB_OBJS);
> >
> > -		if (!socket_ctx[dev_conf.socket_id].session_pool) {
> > -			char mp_name[RTE_MEMPOOL_NAMESIZE];
> > -			struct rte_mempool *sess_mp;
> > -
> > -			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> > -					"sess_mp_%u", dev_conf.socket_id);
> > -			sess_mp =
> rte_cryptodev_sym_session_pool_create(
> > -					mp_name, CDEV_MP_NB_OBJS,
> > -					0, CDEV_MP_CACHE_SZ, 0,
> > -					dev_conf.socket_id);
> > -			socket_ctx[dev_conf.socket_id].session_pool =
> > sess_mp;
> > -		}
> > -
> > -		if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
> > -			char mp_name[RTE_MEMPOOL_NAMESIZE];
> > -			struct rte_mempool *sess_mp;
> > -
> > -			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> > -					"sess_mp_priv_%u",
> > dev_conf.socket_id);
> > -			sess_mp = rte_mempool_create(mp_name,
> > -					CDEV_MP_NB_OBJS,
> > -					max_sess_sz,
> > -					CDEV_MP_CACHE_SZ,
> > -					0, NULL, NULL, NULL,
> > -					NULL, dev_conf.socket_id,
> > -					0);
> > -			socket_ctx[dev_conf.socket_id].session_priv_pool =
> > -					sess_mp;
> > -		}
> > -
> > -		if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
> > -
> 	!socket_ctx[dev_conf.socket_id].session_pool)
> > -			rte_exit(EXIT_FAILURE,
> > -				"Cannot create session pool on socket %d\n",
> > -				dev_conf.socket_id);
> > -		else
> > -			printf("Allocated session pool on socket %d\n",
> > -					dev_conf.socket_id);
> > -
> >  		if (rte_cryptodev_configure(cdev_id, &dev_conf))
> >  			rte_panic("Failed to initialize cryptodev %u\n",
> >  					cdev_id);
> > @@ -1787,38 +1731,6 @@ cryptodevs_init(void)
> >  					cdev_id);
> >  	}
> >
> > -	/* create session pools for eth devices that implement security */
> > -	RTE_ETH_FOREACH_DEV(port_id) {
> > -		if ((enabled_port_mask & (1 << port_id)) &&
> > -				rte_eth_dev_get_sec_ctx(port_id)) {
> > -			int socket_id = rte_eth_dev_socket_id(port_id);
> > -
> > -			if (!socket_ctx[socket_id].session_pool) {
> > -				char mp_name[RTE_MEMPOOL_NAMESIZE];
> > -				struct rte_mempool *sess_mp;
> > -
> > -				snprintf(mp_name,
> RTE_MEMPOOL_NAMESIZE,
> > -						"sess_mp_%u", socket_id);
> > -				sess_mp = rte_mempool_create(mp_name,
> > -						(CDEV_MP_NB_OBJS * 2),
> > -						max_sess_sz,
> > -						CDEV_MP_CACHE_SZ,
> > -						0, NULL, NULL, NULL,
> > -						NULL, socket_id,
> > -						0);
> > -				if (sess_mp == NULL)
> > -					rte_exit(EXIT_FAILURE,
> > -						"Cannot create session pool "
> > -						"on socket %d\n", socket_id);
> > -				else
> > -					printf("Allocated session pool "
> > -						"on socket %d\n", socket_id);
> > -				socket_ctx[socket_id].session_pool =
> sess_mp;
> > -			}
> > -		}
> > -	}
> > -
> > -
> >  	printf("\n");
> >
> >  	return 0;
> > @@ -1984,6 +1896,98 @@ port_init(uint16_t portid, uint64_t
> > req_rx_offloads, uint64_t req_tx_offloads)
> >  	printf("\n");
> >  }
> >
> > +static size_t
> > +max_session_size(void)
> > +{
> > +	size_t max_sz, sz;
> > +	void *sec_ctx;
> > +	int16_t cdev_id, port_id, n;
> > +
> > +	max_sz = 0;
> > +	n =  rte_cryptodev_count();
> > +	for (cdev_id = 0; cdev_id != n; cdev_id++) {
> > +		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
> > +		if (sz > max_sz)
> > +			max_sz = sz;
> > +		/*
> > +		 * If crypto device is security capable, need to check the
> > +		 * size of security session as well.
> > +		 */
> > +
> > +		/* Get security context of the crypto device */
> > +		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
> > +		if (sec_ctx == NULL)
> > +			continue;
> > +
> > +		/* Get size of security session */
> > +		sz = rte_security_session_get_size(sec_ctx);
> > +		if (sz > max_sz)
> > +			max_sz = sz;
> > +	}
> > +
> > +	RTE_ETH_FOREACH_DEV(port_id) {
> > +		if ((enabled_port_mask & (1 << port_id)) == 0)
> > +			continue;
> > +
> > +		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
> > +		if (sec_ctx == NULL)
> > +			continue;
> > +
> > +		sz = rte_security_session_get_size(sec_ctx);
> > +		if (sz > max_sz)
> > +			max_sz = sz;
> > +	}
> > +
> > +	return max_sz;
> > +}
> > +
> > +static void
> > +session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t
> > +sess_sz) {
> > +	char mp_name[RTE_MEMPOOL_NAMESIZE];
> > +	struct rte_mempool *sess_mp;
> > +
> > +	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> > +			"sess_mp_%u", socket_id);
> > +	sess_mp = rte_cryptodev_sym_session_pool_create(
> > +			mp_name, CDEV_MP_NB_OBJS,
> > +			sess_sz, CDEV_MP_CACHE_SZ, 0,
> > +			socket_id);
> > +	ctx->session_pool = sess_mp;
> > +
> > +	if (ctx->session_pool == NULL)
> > +		rte_exit(EXIT_FAILURE,
> > +			"Cannot init session pool on socket %d\n",
> socket_id);
> > +	else
> > +		printf("Allocated session pool on socket %d\n",
> 	socket_id);
> > +}
> > +
> > +static void
> > +session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
> > +	size_t sess_sz)
> > +{
> > +	char mp_name[RTE_MEMPOOL_NAMESIZE];
> > +	struct rte_mempool *sess_mp;
> > +
> > +	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
> > +			"sess_mp_priv_%u", socket_id);
> > +	sess_mp = rte_mempool_create(mp_name,
> > +			CDEV_MP_NB_OBJS,
> > +			sess_sz,
> > +			CDEV_MP_CACHE_SZ,
> > +			0, NULL, NULL, NULL,
> > +			NULL, socket_id,
> > +			0);
> > +	ctx->session_priv_pool = sess_mp;
> > +	if (ctx->session_priv_pool == NULL)
> > +		rte_exit(EXIT_FAILURE,
> > +			"Cannot init session priv pool on socket %d\n",
> > +			socket_id);
> > +	else
> > +		printf("Allocated session priv pool on socket %d\n",
> > +			socket_id);
> > +}
> > +
> >  static void
> >  pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t
> > nb_mbuf)  { @@ -2064,9 +2068,11 @@ main(int32_t argc, char **argv)  {
> >  	int32_t ret;
> >  	uint32_t lcore_id;
> > +	uint32_t i;
> >  	uint8_t socket_id;
> >  	uint16_t portid;
> >  	uint64_t req_rx_offloads, req_tx_offloads;
> > +	size_t sess_sz;
> >
> >  	/* init EAL */
> >  	ret = rte_eal_init(argc, argv);
> > @@ -2094,7 +2100,10 @@ main(int32_t argc, char **argv)
> >
> >  	nb_lcores = rte_lcore_count();
> >
> > -	/* Replicate each context per socket */
> > +	crypto_devid_fill();
> > +
> > +	sess_sz = max_session_size();
> > +
> >  	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> >  		if (rte_lcore_is_enabled(lcore_id) == 0)
> >  			continue;
> > @@ -2104,20 +2113,17 @@ main(int32_t argc, char **argv)
> >  		else
> >  			socket_id = 0;
> >
> > +		/* mbuf_pool is initialised by the pool_init() function*/
> >  		if (socket_ctx[socket_id].mbuf_pool)
> >  			continue;
> >
> > -		/* initilaze SPD */
> > -		sp4_init(&socket_ctx[socket_id], socket_id);
> > -
> > -		sp6_init(&socket_ctx[socket_id], socket_id);
> > -
> > -		/* initilaze SAD */
> > -		sa_init(&socket_ctx[socket_id], socket_id);
> > -
> > -		rt_init(&socket_ctx[socket_id], socket_id);
> > -
> >  		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
> > +		session_pool_init(&socket_ctx[socket_id], socket_id,
> sess_sz);
> > +		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
> > +			sess_sz);
> > +
> > +		if (!numa_on)
> > +			break;
> >  	}
> >
> >  	RTE_ETH_FOREACH_DEV(portid) {
> > @@ -2135,7 +2141,11 @@ main(int32_t argc, char **argv)
> >  		if ((enabled_port_mask & (1 << portid)) == 0)
> >  			continue;
> >
> > -		/* Start device */
> > +		/*
> > +		 * Start device
> > +		 * note: device must be started before a flow rule
> > +		 * can be installed.
> > +		 */
> >  		ret = rte_eth_dev_start(portid);
> >  		if (ret < 0)
> >  			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
> > @@ -2153,6 +2163,19 @@ main(int32_t argc, char **argv)
> >  			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback,
> NULL);
> >  	}
> >
> > +	/* Replicate each context per socket */
> > +	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
> > +		socket_id = rte_socket_id_by_idx(i);
> > +		if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
> > +			(socket_ctx[socket_id].sa_in == NULL) &&
> > +			(socket_ctx[socket_id].sa_out == NULL)) {
> > +			sa_init(&socket_ctx[socket_id], socket_id);
> > +			sp4_init(&socket_ctx[socket_id], socket_id);
> > +			sp6_init(&socket_ctx[socket_id], socket_id);
> > +			rt_init(&socket_ctx[socket_id], socket_id);
> > +		}
> > +	}
> > +
> >  	check_all_ports_link_status(enabled_port_mask);
> >
> >  	/* launch per-lcore init on every lcore */ diff --git
> > a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index
> > 4352cb8..e31d472 100644
> > --- a/examples/ipsec-secgw/ipsec.c
> > +++ b/examples/ipsec-secgw/ipsec.c
> > @@ -39,42 +39,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct
> > rte_security_ipsec_xform *ipsec)
> >  	ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;  }
> >
> > -int
> > -create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
> > +static int
> > +create_sec_session(struct ipsec_sa *sa, struct rte_mempool *pool)
> >  {
> > -	struct rte_cryptodev_info cdev_info;
> > -	unsigned long cdev_id_qp = 0;
> > +	const struct rte_security_capability *sec_cap;
> > +	struct rte_security_ctx *ctx;
> >  	int32_t ret = 0;
> > -	struct cdev_key key = { 0 };
> > -
> > -	key.lcore_id = (uint8_t)rte_lcore_id();
> > -
> > -	key.cipher_algo = (uint8_t)sa->cipher_algo;
> > -	key.auth_algo = (uint8_t)sa->auth_algo;
> > -	key.aead_algo = (uint8_t)sa->aead_algo;
> > -
> > -	if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
> > -		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
> > -				(void **)&cdev_id_qp);
> > -		if (ret < 0) {
> > -			RTE_LOG(ERR, IPSEC,
> > -				"No cryptodev: core %u, cipher_algo %u, "
> > -				"auth_algo %u, aead_algo %u\n",
> > -				key.lcore_id,
> > -				key.cipher_algo,
> > -				key.auth_algo,
> > -				key.aead_algo);
> > -			return -1;
> > -		}
> > -	}
> >
> > -	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on
> cryptodev
> > "
> > -			"%u qp %u\n", sa->spi,
> > -			ipsec_ctx->tbl[cdev_id_qp].id,
> > -			ipsec_ctx->tbl[cdev_id_qp].qp);
> > +	if ((sa == NULL) || (pool == NULL))
> > +		return -EINVAL;
> >
> > -	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
> > -		struct rte_security_session_conf sess_conf = {
> > +	struct rte_security_session_conf sess_conf = {
> >  			.action_type = sa->type,
> >  			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> >  			{.ipsec = {
> > @@ -90,247 +65,340 @@ create_session(struct ipsec_ctx *ipsec_ctx,
> > struct ipsec_sa *sa)
> >  			} },
> >  			.crypto_xform = sa->xforms,
> >  			.userdata = NULL,
> > -
> >  		};
> >
> > -		if (sa->type ==
> > RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> > -			struct rte_security_ctx *ctx = (struct rte_security_ctx
> *)
> > -
> > 	rte_cryptodev_get_sec_ctx(
> > -							ipsec_ctx-
> > >tbl[cdev_id_qp].id);
> > -
> > -			/* Set IPsec parameters in conf */
> > -			set_ipsec_conf(sa, &(sess_conf.ipsec));
> > -
> > -			sa->sec_session = rte_security_session_create(ctx,
> > -					&sess_conf, ipsec_ctx-
> >session_pool);
> > -			if (sa->sec_session == NULL) {
> > -				RTE_LOG(ERR, IPSEC,
> > -				"SEC Session init failed: err: %d\n", ret);
> > -				return -1;
> > -			}
> > -		} else if (sa->type ==
> > RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
> > -			struct rte_flow_error err;
> > -			struct rte_security_ctx *ctx = (struct rte_security_ctx
> *)
> > -
> > 	rte_eth_dev_get_sec_ctx(
> > -							sa->portid);
> > -			const struct rte_security_capability *sec_cap;
> > -			int ret = 0;
> > -
> > -			sa->sec_session = rte_security_session_create(ctx,
> > -					&sess_conf, ipsec_ctx-
> >session_pool);
> > -			if (sa->sec_session == NULL) {
> > -				RTE_LOG(ERR, IPSEC,
> > -				"SEC Session init failed: err: %d\n", ret);
> > -				return -1;
> > -			}
> > +	if (sa->type ==
> RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
> > +		ctx = (struct rte_security_ctx *)
> > +				rte_eth_dev_get_sec_ctx(sa->portid);
> >
> > -			sec_cap = rte_security_capabilities_get(ctx);
> > +		/* Set IPsec parameters in conf */
> > +		set_ipsec_conf(sa, &(sess_conf.ipsec));
> >
> > -			/* iterate until ESP tunnel*/
> > -			while (sec_cap->action !=
> > -
> 	RTE_SECURITY_ACTION_TYPE_NONE)
> > {
> > +		sa->sec_session = rte_security_session_create(ctx,
> > +				&sess_conf, pool);
> > +		if (sa->sec_session == NULL) {
> > +			RTE_LOG(ERR, IPSEC,
> > +				"SEC Session init failed: err: %d\n",
> > +				ret);
> > +			return -1;
> > +		}
> > +	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
> {
> > +		struct rte_flow_error err;
> > +		ctx = (struct rte_security_ctx *)
> > +				rte_eth_dev_get_sec_ctx(sa->portid);
> > +		sa->sec_session = rte_security_session_create(ctx,
> > +				&sess_conf, pool);
> > +		if (sa->sec_session == NULL) {
> > +			RTE_LOG(ERR, IPSEC, "SEC Session init failed\n");
> > +			return -1;
> > +		}
> >
> > -				if (sec_cap->action == sa->type &&
> > -				    sec_cap->protocol ==
> > -					RTE_SECURITY_PROTOCOL_IPSEC &&
> > -				    sec_cap->ipsec.mode ==
> > -
> > 	RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
> > -				    sec_cap->ipsec.direction == sa->direction)
> > -					break;
> > -				sec_cap++;
> > -			}
> > +		sec_cap = rte_security_capabilities_get(ctx);
> > +
> > +		/* iterate until ESP tunnel*/
> > +		while (sec_cap->action !=
> RTE_SECURITY_ACTION_TYPE_NONE)
> > {
> > +			if (sec_cap->action == sa->type &&
> > +				sec_cap->protocol ==
> > +				RTE_SECURITY_PROTOCOL_IPSEC &&
> > +				sec_cap->ipsec.mode ==
> > +				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL
> &&
> > +				sec_cap->ipsec.direction == sa->direction)
> > +				break;
> > +			sec_cap++;
> > +		}
> >
> > -			if (sec_cap->action ==
> > RTE_SECURITY_ACTION_TYPE_NONE) {
> > -				RTE_LOG(ERR, IPSEC,
> > +		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE)
> {
> > +			RTE_LOG(ERR, IPSEC,
> >  				"No suitable security capability found\n");
> >  				return -1;
> > -			}
> > +		}
> >
> > -			sa->ol_flags = sec_cap->ol_flags;
> > -			sa->security_ctx = ctx;
> > -			sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
> > -
> > -			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
> > -			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
> > -			if (sa->flags & IP6_TUNNEL) {
> > -				sa->pattern[1].spec = &sa->ipv6_spec;
> > -				memcpy(sa->ipv6_spec.hdr.dst_addr,
> > -					sa->dst.ip.ip6.ip6_b, 16);
> > -				memcpy(sa->ipv6_spec.hdr.src_addr,
> > -				       sa->src.ip.ip6.ip6_b, 16);
> > -			} else {
> > -				sa->pattern[1].spec = &sa->ipv4_spec;
> > -				sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
> > -				sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
> > -			}
> > +		sa->ol_flags = sec_cap->ol_flags;
> > +		sa->security_ctx = ctx;
> > +		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
> > +
> > +		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
> > +		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
> > +		if (sa->flags & IP6_TUNNEL) {
> > +			sa->pattern[1].spec = &sa->ipv6_spec;
> > +			memcpy(sa->ipv6_spec.hdr.dst_addr,
> > +				sa->dst.ip.ip6.ip6_b, 16);
> > +			memcpy(sa->ipv6_spec.hdr.src_addr,
> > +			       sa->src.ip.ip6.ip6_b, 16);
> > +		} else {
> > +			sa->pattern[1].spec = &sa->ipv4_spec;
> > +			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
> > +			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
> > +		}
> >
> > -			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
> > -			sa->pattern[2].spec = &sa->esp_spec;
> > -			sa->pattern[2].mask = &rte_flow_item_esp_mask;
> > -			sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
> > +		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
> > +		sa->pattern[2].spec = &sa->esp_spec;
> > +		sa->pattern[2].mask = &rte_flow_item_esp_mask;
> > +		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
> >
> > -			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
> > +		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
> >
> > -			sa->action[0].type =
> > RTE_FLOW_ACTION_TYPE_SECURITY;
> > -			sa->action[0].conf = sa->sec_session;
> > +		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
> > +		sa->action[0].conf = sa->sec_session;
> >
> > -			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
> > +		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
> >
> > -			sa->attr.egress = (sa->direction ==
> > +		sa->attr.egress = (sa->direction ==
> >
> > 	RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
> > -			sa->attr.ingress = (sa->direction ==
> > +		sa->attr.ingress = (sa->direction ==
> >
> > 	RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
> > -			if (sa->attr.ingress) {
> > -				uint8_t rss_key[40];
> > -				struct rte_eth_rss_conf rss_conf = {
> > -					.rss_key = rss_key,
> > -					.rss_key_len = 40,
> > -				};
> > -				struct rte_eth_dev *eth_dev;
> > -				uint16_t
> > queue[RTE_MAX_QUEUES_PER_PORT];
> > -				struct rte_flow_action_rss action_rss;
> > -				unsigned int i;
> > -				unsigned int j;
> > -
> > -				sa->action[2].type =
> > RTE_FLOW_ACTION_TYPE_END;
> > -				/* Try RSS. */
> > -				sa->action[1].type =
> > RTE_FLOW_ACTION_TYPE_RSS;
> > -				sa->action[1].conf = &action_rss;
> > -				eth_dev = ctx->device;
> > -				rte_eth_dev_rss_hash_conf_get(sa->portid,
> > -							      &rss_conf);
> > -				for (i = 0, j = 0;
> > -				     i < eth_dev->data->nb_rx_queues; ++i)
> > -					if (eth_dev->data->rx_queues[i])
> > -						queue[j++] = i;
> > +		if (sa->attr.ingress) {
> > +			uint8_t rss_key[40];
> > +			struct rte_eth_rss_conf rss_conf = {
> > +				.rss_key = rss_key,
> > +				.rss_key_len = 40,
> > +			};
> > +			struct rte_eth_dev *eth_dev;
> > +			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
> > +			struct rte_flow_action_rss action_rss;
> > +			unsigned int i;
> > +			unsigned int j;
> > +
> > +			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
> > +			/* Try RSS. */
> > +			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
> > +			sa->action[1].conf = &action_rss;
> > +			eth_dev = ctx->device;
> > +			rte_eth_dev_rss_hash_conf_get(sa->portid,
> > +						&rss_conf);
> > +			for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
> > +				++i)
> > +				if (eth_dev->data->rx_queues[i])
> > +					queue[j++] = i;
> Compilation error
> 
> /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c: In
> function 'create_sec_session':
> /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:169:4:
> error: this 'for' clause does not guard... [-Werror=misleading-indentation]
>     for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
>     ^~~
> /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:173:5:
> note: ...this statement, but the latter is misleadingly indented as if it is
> guarded by the 'for'
>      action_rss = (struct rte_flow_action_rss){
>      ^~~~~~~~~~
> 
<snip>

I will send a v4.
What compiler are you using?

Regards,

Bernard.
  
Akhil Goyal April 17, 2019, 1:01 p.m. UTC | #3
> > > +                   rte_eth_dev_rss_hash_conf_get(sa->portid,
> > > +                                           &rss_conf);
> > > +                   for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
> > > +                           ++i)
> > > +                           if (eth_dev->data->rx_queues[i])
> > > +                                   queue[j++] = i;
> > Compilation error
> >
> > /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c: In
> > function 'create_sec_session':
> > /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:169:4:
> > error: this 'for' clause does not guard... [-Werror=misleading-indentation]
> >     for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
> >     ^~~
> > /home/akhil/up/dpdk-next-crypto/examples/ipsec-secgw/ipsec.c:173:5:
> > note: ...this statement, but the latter is misleadingly indented as if it is
> > guarded by the 'for'
> >      action_rss = (struct rte_flow_action_rss){
> >      ^~~~~~~~~~
> >
> <snip>
> 
> I will send a v4.
> What compiler are you using?

Cross compile for arm
gcc-linaro-6.1.1-2016.08-x86_64_aarch64-linux-gnu
> 
> Regards,
> 
> Bernard.
>
  

Patch

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index ffbd00b..cc8bb57 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -182,6 +182,14 @@  struct lcore_params {
 	uint8_t lcore_id;
 } __rte_cache_aligned;
 
+/*
+ * Number of enabled crypto devices
+ * This number is needed when checking crypto device capabilities
+ */
+uint8_t crypto_dev_num;
+/* array of crypto device ID's */
+uint8_t crypto_devid[RTE_CRYPTO_MAX_DEVS];
+
 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
 
 static struct lcore_params *lcore_params;
@@ -1623,13 +1631,27 @@  check_cryptodev_mask(uint8_t cdev_id)
 	return -1;
 }
 
+static void
+crypto_devid_fill(void)
+{
+	uint32_t i, n;
+
+	n = rte_cryptodev_count();
+
+	for (i = 0; i != n; i++) {
+		if (check_cryptodev_mask(i) == 0)
+			crypto_devid[crypto_dev_num++] = i;
+	}
+}
+
 static int32_t
 cryptodevs_init(void)
 {
 	struct rte_cryptodev_config dev_conf;
 	struct rte_cryptodev_qp_conf qp_conf;
 	uint16_t idx, max_nb_qps, qp, i;
-	int16_t cdev_id, port_id;
+	int16_t cdev_id;
+	uint32_t dev_max_sess;
 	struct rte_hash_parameters params = { 0 };
 
 	params.entries = CDEV_MAP_ENTRIES;
@@ -1652,45 +1674,6 @@  cryptodevs_init(void)
 
 	printf("lcore/cryptodev/qp mappings:\n");
 
-	uint32_t max_sess_sz = 0, sess_sz;
-	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
-		void *sec_ctx;
-
-		/* Get crypto priv session size */
-		sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
-		if (sess_sz > max_sess_sz)
-			max_sess_sz = sess_sz;
-
-		/*
-		 * If crypto device is security capable, need to check the
-		 * size of security session as well.
-		 */
-
-		/* Get security context of the crypto device */
-		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
-		if (sec_ctx == NULL)
-			continue;
-
-		/* Get size of security session */
-		sess_sz = rte_security_session_get_size(sec_ctx);
-		if (sess_sz > max_sess_sz)
-			max_sess_sz = sess_sz;
-	}
-	RTE_ETH_FOREACH_DEV(port_id) {
-		void *sec_ctx;
-
-		if ((enabled_port_mask & (1 << port_id)) == 0)
-			continue;
-
-		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
-		if (sec_ctx == NULL)
-			continue;
-
-		sess_sz = rte_security_session_get_size(sec_ctx);
-		if (sess_sz > max_sess_sz)
-			max_sess_sz = sess_sz;
-	}
-
 	idx = 0;
 	for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
 		struct rte_cryptodev_info cdev_info;
@@ -1722,51 +1705,12 @@  cryptodevs_init(void)
 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
 		dev_conf.nb_queue_pairs = qp;
 
-		uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
+		dev_max_sess = cdev_info.sym.max_nb_sessions;
 		if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS)
 			rte_exit(EXIT_FAILURE,
 				"Device does not support at least %u "
 				"sessions", CDEV_MP_NB_OBJS);
 
-		if (!socket_ctx[dev_conf.socket_id].session_pool) {
-			char mp_name[RTE_MEMPOOL_NAMESIZE];
-			struct rte_mempool *sess_mp;
-
-			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
-					"sess_mp_%u", dev_conf.socket_id);
-			sess_mp = rte_cryptodev_sym_session_pool_create(
-					mp_name, CDEV_MP_NB_OBJS,
-					0, CDEV_MP_CACHE_SZ, 0,
-					dev_conf.socket_id);
-			socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
-		}
-
-		if (!socket_ctx[dev_conf.socket_id].session_priv_pool) {
-			char mp_name[RTE_MEMPOOL_NAMESIZE];
-			struct rte_mempool *sess_mp;
-
-			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
-					"sess_mp_priv_%u", dev_conf.socket_id);
-			sess_mp = rte_mempool_create(mp_name,
-					CDEV_MP_NB_OBJS,
-					max_sess_sz,
-					CDEV_MP_CACHE_SZ,
-					0, NULL, NULL, NULL,
-					NULL, dev_conf.socket_id,
-					0);
-			socket_ctx[dev_conf.socket_id].session_priv_pool =
-					sess_mp;
-		}
-
-		if (!socket_ctx[dev_conf.socket_id].session_priv_pool ||
-				!socket_ctx[dev_conf.socket_id].session_pool)
-			rte_exit(EXIT_FAILURE,
-				"Cannot create session pool on socket %d\n",
-				dev_conf.socket_id);
-		else
-			printf("Allocated session pool on socket %d\n",
-					dev_conf.socket_id);
-
 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
 			rte_panic("Failed to initialize cryptodev %u\n",
 					cdev_id);
@@ -1787,38 +1731,6 @@  cryptodevs_init(void)
 					cdev_id);
 	}
 
-	/* create session pools for eth devices that implement security */
-	RTE_ETH_FOREACH_DEV(port_id) {
-		if ((enabled_port_mask & (1 << port_id)) &&
-				rte_eth_dev_get_sec_ctx(port_id)) {
-			int socket_id = rte_eth_dev_socket_id(port_id);
-
-			if (!socket_ctx[socket_id].session_pool) {
-				char mp_name[RTE_MEMPOOL_NAMESIZE];
-				struct rte_mempool *sess_mp;
-
-				snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
-						"sess_mp_%u", socket_id);
-				sess_mp = rte_mempool_create(mp_name,
-						(CDEV_MP_NB_OBJS * 2),
-						max_sess_sz,
-						CDEV_MP_CACHE_SZ,
-						0, NULL, NULL, NULL,
-						NULL, socket_id,
-						0);
-				if (sess_mp == NULL)
-					rte_exit(EXIT_FAILURE,
-						"Cannot create session pool "
-						"on socket %d\n", socket_id);
-				else
-					printf("Allocated session pool "
-						"on socket %d\n", socket_id);
-				socket_ctx[socket_id].session_pool = sess_mp;
-			}
-		}
-	}
-
-
 	printf("\n");
 
 	return 0;
@@ -1984,6 +1896,98 @@  port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 	printf("\n");
 }
 
+static size_t
+max_session_size(void)
+{
+	size_t max_sz, sz;
+	void *sec_ctx;
+	int16_t cdev_id, port_id, n;
+
+	max_sz = 0;
+	n =  rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+		if (sz > max_sz)
+			max_sz = sz;
+		/*
+		 * If crypto device is security capable, need to check the
+		 * size of security session as well.
+		 */
+
+		/* Get security context of the crypto device */
+		sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id);
+		if (sec_ctx == NULL)
+			continue;
+
+		/* Get size of security session */
+		sz = rte_security_session_get_size(sec_ctx);
+		if (sz > max_sz)
+			max_sz = sz;
+	}
+
+	RTE_ETH_FOREACH_DEV(port_id) {
+		if ((enabled_port_mask & (1 << port_id)) == 0)
+			continue;
+
+		sec_ctx = rte_eth_dev_get_sec_ctx(port_id);
+		if (sec_ctx == NULL)
+			continue;
+
+		sz = rte_security_session_get_size(sec_ctx);
+		if (sz > max_sz)
+			max_sz = sz;
+	}
+
+	return max_sz;
+}
+
+static void
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+{
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *sess_mp;
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+			"sess_mp_%u", socket_id);
+	sess_mp = rte_cryptodev_sym_session_pool_create(
+			mp_name, CDEV_MP_NB_OBJS,
+			sess_sz, CDEV_MP_CACHE_SZ, 0,
+			socket_id);
+	ctx->session_pool = sess_mp;
+
+	if (ctx->session_pool == NULL)
+		rte_exit(EXIT_FAILURE,
+			"Cannot init session pool on socket %d\n", socket_id);
+	else
+		printf("Allocated session pool on socket %d\n",	socket_id);
+}
+
+static void
+session_priv_pool_init(struct socket_ctx *ctx, int32_t socket_id,
+	size_t sess_sz)
+{
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *sess_mp;
+
+	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+			"sess_mp_priv_%u", socket_id);
+	sess_mp = rte_mempool_create(mp_name,
+			CDEV_MP_NB_OBJS,
+			sess_sz,
+			CDEV_MP_CACHE_SZ,
+			0, NULL, NULL, NULL,
+			NULL, socket_id,
+			0);
+	ctx->session_priv_pool = sess_mp;
+	if (ctx->session_priv_pool == NULL)
+		rte_exit(EXIT_FAILURE,
+			"Cannot init session priv pool on socket %d\n",
+			socket_id);
+	else
+		printf("Allocated session priv pool on socket %d\n",
+			socket_id);
+}
+
 static void
 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
 {
@@ -2064,9 +2068,11 @@  main(int32_t argc, char **argv)
 {
 	int32_t ret;
 	uint32_t lcore_id;
+	uint32_t i;
 	uint8_t socket_id;
 	uint16_t portid;
 	uint64_t req_rx_offloads, req_tx_offloads;
+	size_t sess_sz;
 
 	/* init EAL */
 	ret = rte_eal_init(argc, argv);
@@ -2094,7 +2100,10 @@  main(int32_t argc, char **argv)
 
 	nb_lcores = rte_lcore_count();
 
-	/* Replicate each context per socket */
+	crypto_devid_fill();
+
+	sess_sz = max_session_size();
+
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 		if (rte_lcore_is_enabled(lcore_id) == 0)
 			continue;
@@ -2104,20 +2113,17 @@  main(int32_t argc, char **argv)
 		else
 			socket_id = 0;
 
+		/* mbuf_pool is initialised by the pool_init() function*/
 		if (socket_ctx[socket_id].mbuf_pool)
 			continue;
 
-		/* initilaze SPD */
-		sp4_init(&socket_ctx[socket_id], socket_id);
-
-		sp6_init(&socket_ctx[socket_id], socket_id);
-
-		/* initilaze SAD */
-		sa_init(&socket_ctx[socket_id], socket_id);
-
-		rt_init(&socket_ctx[socket_id], socket_id);
-
 		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
+		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
+			sess_sz);
+
+		if (!numa_on)
+			break;
 	}
 
 	RTE_ETH_FOREACH_DEV(portid) {
@@ -2135,7 +2141,11 @@  main(int32_t argc, char **argv)
 		if ((enabled_port_mask & (1 << portid)) == 0)
 			continue;
 
-		/* Start device */
+		/*
+		 * Start device
+		 * note: device must be started before a flow rule
+		 * can be installed.
+		 */
 		ret = rte_eth_dev_start(portid);
 		if (ret < 0)
 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
@@ -2153,6 +2163,19 @@  main(int32_t argc, char **argv)
 			RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
 	}
 
+	/* Replicate each context per socket */
+	for (i = 0; i < NB_SOCKETS && i < rte_socket_count(); i++) {
+		socket_id = rte_socket_id_by_idx(i);
+		if ((socket_ctx[socket_id].mbuf_pool != NULL) &&
+			(socket_ctx[socket_id].sa_in == NULL) &&
+			(socket_ctx[socket_id].sa_out == NULL)) {
+			sa_init(&socket_ctx[socket_id], socket_id);
+			sp4_init(&socket_ctx[socket_id], socket_id);
+			sp6_init(&socket_ctx[socket_id], socket_id);
+			rt_init(&socket_ctx[socket_id], socket_id);
+		}
+	}
+
 	check_all_ports_link_status(enabled_port_mask);
 
 	/* launch per-lcore init on every lcore */
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 4352cb8..e31d472 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -39,42 +39,17 @@  set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
 	ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
 }
 
-int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+static int
+create_sec_session(struct ipsec_sa *sa, struct rte_mempool *pool)
 {
-	struct rte_cryptodev_info cdev_info;
-	unsigned long cdev_id_qp = 0;
+	const struct rte_security_capability *sec_cap;
+	struct rte_security_ctx *ctx;
 	int32_t ret = 0;
-	struct cdev_key key = { 0 };
-
-	key.lcore_id = (uint8_t)rte_lcore_id();
-
-	key.cipher_algo = (uint8_t)sa->cipher_algo;
-	key.auth_algo = (uint8_t)sa->auth_algo;
-	key.aead_algo = (uint8_t)sa->aead_algo;
-
-	if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
-		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
-				(void **)&cdev_id_qp);
-		if (ret < 0) {
-			RTE_LOG(ERR, IPSEC,
-				"No cryptodev: core %u, cipher_algo %u, "
-				"auth_algo %u, aead_algo %u\n",
-				key.lcore_id,
-				key.cipher_algo,
-				key.auth_algo,
-				key.aead_algo);
-			return -1;
-		}
-	}
 
-	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
-			"%u qp %u\n", sa->spi,
-			ipsec_ctx->tbl[cdev_id_qp].id,
-			ipsec_ctx->tbl[cdev_id_qp].qp);
+	if ((sa == NULL) || (pool == NULL))
+		return -EINVAL;
 
-	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
-		struct rte_security_session_conf sess_conf = {
+	struct rte_security_session_conf sess_conf = {
 			.action_type = sa->type,
 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
 			{.ipsec = {
@@ -90,247 +65,340 @@  create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
 			} },
 			.crypto_xform = sa->xforms,
 			.userdata = NULL,
-
 		};
 
-		if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
-			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
-							rte_cryptodev_get_sec_ctx(
-							ipsec_ctx->tbl[cdev_id_qp].id);
-
-			/* Set IPsec parameters in conf */
-			set_ipsec_conf(sa, &(sess_conf.ipsec));
-
-			sa->sec_session = rte_security_session_create(ctx,
-					&sess_conf, ipsec_ctx->session_pool);
-			if (sa->sec_session == NULL) {
-				RTE_LOG(ERR, IPSEC,
-				"SEC Session init failed: err: %d\n", ret);
-				return -1;
-			}
-		} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
-			struct rte_flow_error err;
-			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
-							rte_eth_dev_get_sec_ctx(
-							sa->portid);
-			const struct rte_security_capability *sec_cap;
-			int ret = 0;
-
-			sa->sec_session = rte_security_session_create(ctx,
-					&sess_conf, ipsec_ctx->session_pool);
-			if (sa->sec_session == NULL) {
-				RTE_LOG(ERR, IPSEC,
-				"SEC Session init failed: err: %d\n", ret);
-				return -1;
-			}
+	if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
+		ctx = (struct rte_security_ctx *)
+				rte_eth_dev_get_sec_ctx(sa->portid);
 
-			sec_cap = rte_security_capabilities_get(ctx);
+		/* Set IPsec parameters in conf */
+		set_ipsec_conf(sa, &(sess_conf.ipsec));
 
-			/* iterate until ESP tunnel*/
-			while (sec_cap->action !=
-					RTE_SECURITY_ACTION_TYPE_NONE) {
+		sa->sec_session = rte_security_session_create(ctx,
+				&sess_conf, pool);
+		if (sa->sec_session == NULL) {
+			RTE_LOG(ERR, IPSEC,
+				"SEC Session init failed: err: %d\n",
+				ret);
+			return -1;
+		}
+	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+		struct rte_flow_error err;
+		ctx = (struct rte_security_ctx *)
+				rte_eth_dev_get_sec_ctx(sa->portid);
+		sa->sec_session = rte_security_session_create(ctx,
+				&sess_conf, pool);
+		if (sa->sec_session == NULL) {
+			RTE_LOG(ERR, IPSEC, "SEC Session init failed\n");
+			return -1;
+		}
 
-				if (sec_cap->action == sa->type &&
-				    sec_cap->protocol ==
-					RTE_SECURITY_PROTOCOL_IPSEC &&
-				    sec_cap->ipsec.mode ==
-					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
-				    sec_cap->ipsec.direction == sa->direction)
-					break;
-				sec_cap++;
-			}
+		sec_cap = rte_security_capabilities_get(ctx);
+
+		/* iterate until ESP tunnel*/
+		while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
+			if (sec_cap->action == sa->type &&
+				sec_cap->protocol ==
+				RTE_SECURITY_PROTOCOL_IPSEC &&
+				sec_cap->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+				sec_cap->ipsec.direction == sa->direction)
+				break;
+			sec_cap++;
+		}
 
-			if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
-				RTE_LOG(ERR, IPSEC,
+		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+			RTE_LOG(ERR, IPSEC,
 				"No suitable security capability found\n");
 				return -1;
-			}
+		}
 
-			sa->ol_flags = sec_cap->ol_flags;
-			sa->security_ctx = ctx;
-			sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
-
-			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
-			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
-			if (sa->flags & IP6_TUNNEL) {
-				sa->pattern[1].spec = &sa->ipv6_spec;
-				memcpy(sa->ipv6_spec.hdr.dst_addr,
-					sa->dst.ip.ip6.ip6_b, 16);
-				memcpy(sa->ipv6_spec.hdr.src_addr,
-				       sa->src.ip.ip6.ip6_b, 16);
-			} else {
-				sa->pattern[1].spec = &sa->ipv4_spec;
-				sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
-				sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
-			}
+		sa->ol_flags = sec_cap->ol_flags;
+		sa->security_ctx = ctx;
+		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+		if (sa->flags & IP6_TUNNEL) {
+			sa->pattern[1].spec = &sa->ipv6_spec;
+			memcpy(sa->ipv6_spec.hdr.dst_addr,
+				sa->dst.ip.ip6.ip6_b, 16);
+			memcpy(sa->ipv6_spec.hdr.src_addr,
+			       sa->src.ip.ip6.ip6_b, 16);
+		} else {
+			sa->pattern[1].spec = &sa->ipv4_spec;
+			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+		}
 
-			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
-			sa->pattern[2].spec = &sa->esp_spec;
-			sa->pattern[2].mask = &rte_flow_item_esp_mask;
-			sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
+		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+		sa->pattern[2].spec = &sa->esp_spec;
+		sa->pattern[2].mask = &rte_flow_item_esp_mask;
+		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
 
-			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
 
-			sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
-			sa->action[0].conf = sa->sec_session;
+		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+		sa->action[0].conf = sa->sec_session;
 
-			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
 
-			sa->attr.egress = (sa->direction ==
+		sa->attr.egress = (sa->direction ==
 					RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
-			sa->attr.ingress = (sa->direction ==
+		sa->attr.ingress = (sa->direction ==
 					RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
-			if (sa->attr.ingress) {
-				uint8_t rss_key[40];
-				struct rte_eth_rss_conf rss_conf = {
-					.rss_key = rss_key,
-					.rss_key_len = 40,
-				};
-				struct rte_eth_dev *eth_dev;
-				uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
-				struct rte_flow_action_rss action_rss;
-				unsigned int i;
-				unsigned int j;
-
-				sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
-				/* Try RSS. */
-				sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-				sa->action[1].conf = &action_rss;
-				eth_dev = ctx->device;
-				rte_eth_dev_rss_hash_conf_get(sa->portid,
-							      &rss_conf);
-				for (i = 0, j = 0;
-				     i < eth_dev->data->nb_rx_queues; ++i)
-					if (eth_dev->data->rx_queues[i])
-						queue[j++] = i;
+		if (sa->attr.ingress) {
+			uint8_t rss_key[40];
+			struct rte_eth_rss_conf rss_conf = {
+				.rss_key = rss_key,
+				.rss_key_len = 40,
+			};
+			struct rte_eth_dev *eth_dev;
+			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+			struct rte_flow_action_rss action_rss;
+			unsigned int i;
+			unsigned int j;
+
+			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
+			/* Try RSS. */
+			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+			sa->action[1].conf = &action_rss;
+			eth_dev = ctx->device;
+			rte_eth_dev_rss_hash_conf_get(sa->portid,
+						&rss_conf);
+			for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues;
+				++i)
+				if (eth_dev->data->rx_queues[i])
+					queue[j++] = i;
 				action_rss = (struct rte_flow_action_rss){
 					.types = rss_conf.rss_hf,
 					.key_len = rss_conf.rss_key_len,
 					.queue_num = j,
 					.key = rss_key,
 					.queue = queue,
-				};
-				ret = rte_flow_validate(sa->portid, &sa->attr,
-							sa->pattern, sa->action,
-							&err);
-				if (!ret)
-					goto flow_create;
-				/* Try Queue. */
-				sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
-				sa->action[1].conf =
-					&(struct rte_flow_action_queue){
-					.index = 0,
-				};
-				ret = rte_flow_validate(sa->portid, &sa->attr,
-							sa->pattern, sa->action,
-							&err);
-				/* Try End. */
-				sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
-				sa->action[1].conf = NULL;
-				ret = rte_flow_validate(sa->portid, &sa->attr,
+			};
+			ret = rte_flow_validate(sa->portid, &sa->attr,
+						sa->pattern, sa->action,
+						&err);
+			if (!ret)
+				goto flow_create;
+			/* Try Queue. */
+			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+			sa->action[1].conf =
+				&(struct rte_flow_action_queue){
+				.index = 0,
+			};
+			ret = rte_flow_validate(sa->portid, &sa->attr,
+						sa->pattern, sa->action,
+						&err);
+			/* Try End. */
+			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+			sa->action[1].conf = NULL;
+			ret = rte_flow_validate(sa->portid, &sa->attr,
 							sa->pattern, sa->action,
 							&err);
-				if (ret)
-					goto flow_create_failure;
-			} else if (sa->attr.egress &&
-				   (sa->ol_flags &
-				    RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
-				sa->action[1].type =
-					RTE_FLOW_ACTION_TYPE_PASSTHRU;
-				sa->action[2].type =
-					RTE_FLOW_ACTION_TYPE_END;
-			}
+			if (ret)
+				goto flow_create_failure;
+		} else if (sa->attr.egress &&
+				(sa->ol_flags &
+				RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
+			sa->action[1].type =
+				RTE_FLOW_ACTION_TYPE_PASSTHRU;
+			sa->action[2].type =
+				RTE_FLOW_ACTION_TYPE_END;
+		}
 flow_create:
-			sa->flow = rte_flow_create(sa->portid,
-				&sa->attr, sa->pattern, sa->action, &err);
-			if (sa->flow == NULL) {
+		sa->flow = rte_flow_create(sa->portid,
+			&sa->attr, sa->pattern, sa->action, &err);
+		if (sa->flow == NULL) {
 flow_create_failure:
-				RTE_LOG(ERR, IPSEC,
-					"Failed to create ipsec flow msg: %s\n",
-					err.message);
-				return -1;
-			}
-		} else if (sa->type ==
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
-			struct rte_security_ctx *ctx =
-					(struct rte_security_ctx *)
-					rte_eth_dev_get_sec_ctx(sa->portid);
-			const struct rte_security_capability *sec_cap;
-
-			if (ctx == NULL) {
-				RTE_LOG(ERR, IPSEC,
-				"Ethernet device doesn't have security features registered\n");
-				return -1;
-			}
+			RTE_LOG(ERR, IPSEC,
+				"Failed to create ipsec flow msg: %s\n",
+				err.message);
+			return -1;
+		}
+	} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+		struct rte_security_ctx *ctx =
+				(struct rte_security_ctx *)
+				rte_eth_dev_get_sec_ctx(sa->portid);
+		const struct rte_security_capability *sec_cap;
 
-			/* Set IPsec parameters in conf */
-			set_ipsec_conf(sa, &(sess_conf.ipsec));
-
-			/* Save SA as userdata for the security session. When
-			 * the packet is received, this userdata will be
-			 * retrieved using the metadata from the packet.
-			 *
-			 * The PMD is expected to set similar metadata for other
-			 * operations, like rte_eth_event, which are tied to
-			 * security session. In such cases, the userdata could
-			 * be obtained to uniquely identify the security
-			 * parameters denoted.
-			 */
-
-			sess_conf.userdata = (void *) sa;
-
-			sa->sec_session = rte_security_session_create(ctx,
-					&sess_conf, ipsec_ctx->session_pool);
-			if (sa->sec_session == NULL) {
-				RTE_LOG(ERR, IPSEC,
-				"SEC Session init failed: err: %d\n", ret);
-				return -1;
-			}
+		if (ctx == NULL) {
+			RTE_LOG(ERR, IPSEC,
+			"Ethernet device doesn't have security features registered\n");
+			return -1;
+		}
+
+		/* Set IPsec parameters in conf */
+		set_ipsec_conf(sa, &(sess_conf.ipsec));
+
+		/* Save SA as userdata for the security session. When
+		 * the packet is received, this userdata will be
+		 * retrieved using the metadata from the packet.
+		 *
+		 * The PMD is expected to set similar metadata for other
+		 * operations, like rte_eth_event, which are tied to
+		 * security session. In such cases, the userdata could
+		 * be obtained to uniquely identify the security
+		 * parameters denoted.
+		 */
+
+		sess_conf.userdata = (void *) sa;
+
+		sa->sec_session = rte_security_session_create(ctx,
+			&sess_conf, pool);
+		if (sa->sec_session == NULL) {
+			RTE_LOG(ERR, IPSEC,
+			"SEC Session init failed: err: %d\n", ret);
+			return -1;
+		}
 
-			sec_cap = rte_security_capabilities_get(ctx);
+		sec_cap = rte_security_capabilities_get(ctx);
 
-			if (sec_cap == NULL) {
-				RTE_LOG(ERR, IPSEC,
-				"No capabilities registered\n");
-				return -1;
-			}
+		if (sec_cap == NULL) {
+			RTE_LOG(ERR, IPSEC,
+			"No capabilities registered\n");
+			return -1;
+		}
 
-			/* iterate until ESP tunnel*/
-			while (sec_cap->action !=
+		/* iterate until ESP tunnel*/
+		while (sec_cap->action !=
 					RTE_SECURITY_ACTION_TYPE_NONE) {
 
-				if (sec_cap->action == sa->type &&
-				    sec_cap->protocol ==
-					RTE_SECURITY_PROTOCOL_IPSEC &&
-				    sec_cap->ipsec.mode ==
-					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
-				    sec_cap->ipsec.direction == sa->direction)
-					break;
-				sec_cap++;
-			}
+			if (sec_cap->action == sa->type &&
+				sec_cap->protocol ==
+				RTE_SECURITY_PROTOCOL_IPSEC &&
+				sec_cap->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+				sec_cap->ipsec.direction == sa->direction)
+				break;
+			sec_cap++;
+		}
 
-			if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
-				RTE_LOG(ERR, IPSEC,
-				"No suitable security capability found\n");
-				return -1;
-			}
+		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+			RTE_LOG(ERR, IPSEC,
+			"No suitable security capability found\n");
+			return -1;
+		}
+
+		sa->ol_flags = sec_cap->ol_flags;
+		sa->security_ctx = ctx;
+	} else
+		return -EINVAL;
+	return 0;
+}
+
+#define CDEV_IV_SIZE 12
+
+static int
+check_cryptodev_aead_capablity(const struct ipsec_sa *ss, uint8_t dev_id)
+{
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *cap;
 
-			sa->ol_flags = sec_cap->ol_flags;
-			sa->security_ctx = ctx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+	cap_idx.algo.aead = ss->aead_algo;
+
+	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
+	if (cap == NULL)
+		return -ENOENT;
+
+	return rte_cryptodev_sym_capability_check_aead(cap,
+					ss->cipher_key_len,
+					ss->digest_len,
+					ss->aad_len,
+					CDEV_IV_SIZE);
+}
+
+static int
+check_cryptodev_capablity(const struct ipsec_sa *ss, uint8_t dev_id)
+{
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	const struct rte_cryptodev_symmetric_capability *cap;
+	uint16_t auth_iv_len;
+	int rc = -1;
+
+	if (ss == NULL)
+		return rc;
+
+	if (ss->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+		return check_cryptodev_aead_capablity(ss, dev_id);
+
+	auth_iv_len = 0;
+
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = ss->auth_algo;
+	cap = rte_cryptodev_sym_capability_get(dev_id, &cap_idx);
+	if (cap != NULL) {
+		rc = rte_cryptodev_sym_capability_check_auth(
+				cap, ss->auth_key_len, ss->digest_len,
+				auth_iv_len);
+		if (rc == 0) {
+			cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+			cap_idx.algo.cipher = ss->cipher_algo;
+			cap = rte_cryptodev_sym_capability_get(dev_id,
+					&cap_idx);
+			if (cap != NULL)
+				rc = rte_cryptodev_sym_capability_check_cipher(
+						cap, ss->cipher_key_len,
+						ss->iv_len);
 		}
-	} else {
-		sa->crypto_session = rte_cryptodev_sym_session_create(
-				ipsec_ctx->session_pool);
-		rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
-				sa->crypto_session, sa->xforms,
-				ipsec_ctx->session_priv_pool);
-
-		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
-				&cdev_info);
 	}
-	sa->cdev_id_qp = cdev_id_qp;
 
-	return 0;
+	return rc;
+}
+
+static int
+create_crypto_session(struct ipsec_sa *sa, struct rte_mempool *pool)
+{
+	int32_t rc;
+	uint32_t devnum, i;
+	struct rte_cryptodev_sym_session *s;
+	uint8_t devid[RTE_CRYPTO_MAX_DEVS];
+
+	/* check which cryptodevs support SA */
+	devnum = 0;
+	for (i = 0; i < crypto_dev_num; i++) {
+		rc = check_cryptodev_capablity(sa, crypto_devid[i]);
+		if (rc == 0)
+			devid[devnum++] = crypto_devid[i];
+	}
+
+	if (devnum == 0)
+		return -ENODEV;
+
+	s = rte_cryptodev_sym_session_create(pool);
+	if (s == NULL)
+		return -ENOMEM;
+
+	/* initialize SA crypto session for all supported devices */
+	for (i = 0; i != devnum; i++) {
+		rc = rte_cryptodev_sym_session_init(devid[i], s, sa->xforms,
+			pool);
+		if (rc != 0)
+			break;
+	}
+
+	if (i == devnum) {
+		sa->crypto_session = s;
+		return 0;
+	}
+
+	/* failure, do cleanup */
+	while (i-- != 0)
+		rte_cryptodev_sym_session_clear(devid[i], s);
+
+	rte_cryptodev_sym_session_free(s);
+	return rc;
+}
+
+int
+create_session(struct ipsec_sa *sa, struct rte_mempool *pool)
+{
+	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE)
+		return create_sec_session(sa, pool);
+	else
+		return create_crypto_session(sa, pool);
 }
 
 /*
@@ -393,13 +461,6 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
 
 			rte_prefetch0(&priv->sym_cop);
-
-			if ((unlikely(sa->sec_session == NULL)) &&
-					create_session(ipsec_ctx, sa)) {
-				rte_pktmbuf_free(pkts[i]);
-				continue;
-			}
-
 			sym_cop = get_sym_cop(&priv->cop);
 			sym_cop->m_src = pkts[i];
 
@@ -412,13 +473,6 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
 
 			rte_prefetch0(&priv->sym_cop);
-
-			if ((unlikely(sa->crypto_session == NULL)) &&
-					create_session(ipsec_ctx, sa)) {
-				rte_pktmbuf_free(pkts[i]);
-				continue;
-			}
-
 			rte_crypto_op_attach_sym_session(&priv->cop,
 					sa->crypto_session);
 
@@ -429,12 +483,7 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 			}
 			break;
 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
-			if ((unlikely(sa->sec_session == NULL)) &&
-					create_session(ipsec_ctx, sa)) {
-				rte_pktmbuf_free(pkts[i]);
-				continue;
-			}
-
+			RTE_ASSERT(sa->sec_session != NULL);
 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
 			if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
 				rte_security_set_pkt_metadata(
@@ -442,17 +491,11 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 						sa->sec_session, pkts[i], NULL);
 			continue;
 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+			RTE_ASSERT(sa->sec_session != NULL);
 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
 
 			rte_prefetch0(&priv->sym_cop);
-
-			if ((unlikely(sa->sec_session == NULL)) &&
-					create_session(ipsec_ctx, sa)) {
-				rte_pktmbuf_free(pkts[i]);
-				continue;
-			}
-
 			rte_security_attach_session(&priv->cop,
 					sa->sec_session);
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 99f49d6..804330c 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -83,6 +83,14 @@  struct app_sa_prm {
 
 extern struct app_sa_prm app_sa_prm;
 
+/*
+ * Number of enabled crypto devices
+ * This number is needed when checking crypto device capabilities
+ */
+extern uint8_t crypto_dev_num;
+/* array of crypto device ID's */
+extern uint8_t crypto_devid[RTE_CRYPTO_MAX_DEVS];
+
 struct ipsec_sa {
 	struct rte_ipsec_session ips; /* one session per sa for now */
 	uint32_t spi;
@@ -306,6 +314,6 @@  void
 enqueue_cop_burst(struct cdev_qp *cqp);
 
 int
-create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+create_session(struct ipsec_sa *sa, struct rte_mempool *pool);
 
 #endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 3f9cacb..0df6969 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -86,39 +86,6 @@  enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
 	cqp->len = len;
 }
 
-static inline int
-fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
-	struct ipsec_sa *sa)
-{
-	int32_t rc;
-
-	/* setup crypto section */
-	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
-		if (sa->crypto_session == NULL) {
-			rc = create_session(ctx, sa);
-			if (rc != 0)
-				return rc;
-		}
-		ss->crypto.ses = sa->crypto_session;
-	/* setup session action type */
-	} else {
-		if (sa->sec_session == NULL) {
-			rc = create_session(ctx, sa);
-			if (rc != 0)
-				return rc;
-		}
-		ss->security.ses = sa->sec_session;
-		ss->security.ctx = sa->security_ctx;
-		ss->security.ol_flags = sa->ol_flags;
-	}
-
-	rc = rte_ipsec_session_prepare(ss);
-	if (rc != 0)
-		memset(ss, 0, sizeof(*ss));
-
-	return rc;
-}
-
 /*
  * group input packets byt the SA they belong to.
  */
@@ -219,9 +186,8 @@  ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 
 		ips = &sa->ips;
 
-		/* no valid HW session for that SA, try to create one */
-		if (sa == NULL || (ips->crypto.ses == NULL &&
-				fill_ipsec_session(ips, ctx, sa) != 0))
+		/* no valid HW session for that SA */
+		if (sa == NULL || ips->crypto.ses == NULL)
 			k = 0;
 
 		/* process packets inline */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index a7298a3..0f36f5b 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -774,14 +774,14 @@  check_eth_dev_caps(uint16_t portid, uint32_t inbound)
 	return 0;
 }
 
-
 static int
 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
-		uint32_t nb_entries, uint32_t inbound)
+	uint32_t nb_entries, uint32_t inbound, struct socket_ctx *skt_ctx)
 {
 	struct ipsec_sa *sa;
 	uint32_t i, idx;
 	uint16_t iv_length, aad_length;
+	int32_t rc;
 
 	/* for ESN upper 32 bits of SQN also need to be part of AAD */
 	aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
@@ -902,6 +902,12 @@  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 
 			print_one_sa_rule(sa, inbound);
 		}
+		rc = create_session(sa, skt_ctx->session_pool);
+		if (rc != 0) {
+			RTE_LOG(ERR, IPSEC_ESP,
+					"create_session() failed\n");
+			return -EINVAL;
+		}
 	}
 
 	return 0;
@@ -909,16 +915,16 @@  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 
 static inline int
 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
-		uint32_t nb_entries)
+		uint32_t nb_entries, struct socket_ctx *skt_ctx)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 0);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
 }
 
 static inline int
 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
-		uint32_t nb_entries)
+		uint32_t nb_entries, struct socket_ctx *skt_ctx)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 1);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
 }
 
 /*
@@ -1012,10 +1018,12 @@  fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
 	return 0;
 }
 
-static void
+static int
 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
 	const struct ipsec_sa *lsa)
 {
+	int32_t rc = 0;
+
 	ss->sa = sa;
 	ss->type = lsa->type;
 
@@ -1028,6 +1036,12 @@  fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
 		ss->security.ctx = lsa->security_ctx;
 		ss->security.ol_flags = lsa->ol_flags;
 	}
+
+	rc = rte_ipsec_session_prepare(ss);
+	if (rc != 0)
+		memset(ss, 0, sizeof(*ss));
+
+	return rc;
 }
 
 /*
@@ -1062,8 +1076,8 @@  ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
 	if (rc < 0)
 		return rc;
 
-	fill_ipsec_session(&lsa->ips, sa, lsa);
-	return 0;
+	rc = fill_ipsec_session(&lsa->ips, sa, lsa);
+	return rc;
 }
 
 /*
@@ -1141,7 +1155,10 @@  sa_init(struct socket_ctx *ctx, int32_t socket_id)
 				"context %s in socket %d\n", rte_errno,
 				name, socket_id);
 
-		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
+		rc = sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
+		if (rc != 0)
+			rte_exit(EXIT_FAILURE,
+				"failed to add inbound rules\n");
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
@@ -1161,7 +1178,10 @@  sa_init(struct socket_ctx *ctx, int32_t socket_id)
 				"context %s in socket %d\n", rte_errno,
 				name, socket_id);
 
-		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
+		rc = sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
+		if (rc != 0)
+			rte_exit(EXIT_FAILURE,
+				"failed to add outbound rules\n");
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,