[v7,2/4] examples/ipsec-secgw: add fallback session feature

Message ID 20191014134842.3084-3-marcinx.smoczynski@intel.com (mailing list archive)
State Accepted, archived
Delegated to: akhil goyal
Headers
Series add fallback session |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Marcin Smoczynski Oct. 14, 2019, 1:48 p.m. UTC
  Inline processing is limited to a specified subset of traffic. It is
often unable to handle more complicated situations, such as fragmented
traffic. When using inline processing such traffic is dropped.

Introduce fallback session for inline crypto processing allowing
handling packets that normally would be dropped. A fallback session is
configured by adding 'fallback' keyword with 'lookaside-none' parameter
to an SA configuration. Only 'inline-crypto-offload" as a primary
session and 'lookaside-none' as a fall-back session combination is
supported by this patch.

Fallback session feature is not available in the legacy mode.

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Tested-by: Bernard Iremonger <bernard.iremonger@intel.com>
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>
---
 doc/guides/rel_notes/release_19_11.rst   |   8 ++
 doc/guides/sample_app_ug/ipsec_secgw.rst |  22 ++++-
 examples/ipsec-secgw/esp.c               |   4 +-
 examples/ipsec-secgw/ipsec-secgw.c       |  16 ++--
 examples/ipsec-secgw/ipsec.c             |  10 +--
 examples/ipsec-secgw/ipsec.h             |  40 +++++++--
 examples/ipsec-secgw/ipsec_process.c     |  85 ++++++++++++------
 examples/ipsec-secgw/sa.c                | 105 +++++++++++++++++++----
 8 files changed, 226 insertions(+), 64 deletions(-)
  

Patch

diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst
index 85953b962..7e1205be7 100644
--- a/doc/guides/rel_notes/release_19_11.rst
+++ b/doc/guides/rel_notes/release_19_11.rst
@@ -115,6 +115,14 @@  New Features
   Added eBPF JIT support for arm64 architecture to improve the eBPF program
   performance.
 
+* **Updated the IPSec library and IPsec Security Gateway application.**
+
+  Added the following features to ``librte_ipsec`` and the ``ipsec-secgw``
+  sample application:
+
+  * Support fragmented packets in inline crypto processing mode with fallback
+    ``lookaside-none`` session.
+
 
 Removed Items
 -------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index ad2d79e75..cb6fadb35 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -401,7 +401,7 @@  The SA rule syntax is shown as follows:
 .. code-block:: console
 
     sa <dir> <spi> <cipher_algo> <cipher_key> <auth_algo> <auth_key>
-    <mode> <src_ip> <dst_ip> <action_type> <port_id>
+    <mode> <src_ip> <dst_ip> <action_type> <port_id> <fallback>
 
 where each options means:
 
@@ -573,6 +573,26 @@  where each options means:
 
    * *port_id X* X is a valid device number in decimal
 
+ ``<fallback>``
+
+ * Action type for ingress IPsec packets that inline processor failed to
+   process. Only a combination of *inline-crypto-offload* as a primary
+   session and *lookaside-none* as a fall-back session is supported at the
+   moment.
+
+   If used in conjunction with IPsec window, its width needs be increased
+   due to different processing times of inline and lookaside modes which
+   results in packet reordering.
+
+ * Optional: Yes.
+
+ * Available options:
+
+   * *lookaside-none*: use automatically chosen cryptodev to process packets
+
+ * Syntax:
+
+   * *fallback lookaside-none*
 
 Example SA rules:
 
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index c1b49da1e..bfa7ff721 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -155,7 +155,7 @@  esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
 	RTE_ASSERT(sa != NULL);
 	RTE_ASSERT(cop != NULL);
 
-	ips = ipsec_get_session(sa);
+	ips = ipsec_get_primary_session(sa);
 
 	if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
 			(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
@@ -234,7 +234,7 @@  esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 	RTE_ASSERT(m != NULL);
 	RTE_ASSERT(sa != NULL);
 
-	ips = ipsec_get_session(sa);
+	ips = ipsec_get_primary_session(sa);
 	ip_hdr_len = 0;
 
 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index f52ca5297..904ee65d9 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -189,6 +189,7 @@  static uint32_t mtu_size = RTE_ETHER_MTU;
 
 /* application wide librte_ipsec/SA parameters */
 struct app_sa_prm app_sa_prm = {.enable = 0};
+static const char *cfgfile;
 
 struct lcore_rx_queue {
 	uint16_t port_id;
@@ -1465,12 +1466,7 @@  parse_args(int32_t argc, char **argv)
 				print_usage(prgname);
 				return -1;
 			}
-			if (parse_cfg_file(optarg) < 0) {
-				printf("parsing file \"%s\" failed\n",
-					optarg);
-				print_usage(prgname);
-				return -1;
-			}
+			cfgfile = optarg;
 			f_present = 1;
 			break;
 		case 'j':
@@ -2418,6 +2414,14 @@  main(int32_t argc, char **argv)
 	if (ret < 0)
 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
 
+	/* parse configuration file */
+	if (parse_cfg_file(cfgfile) < 0) {
+		printf("parsing file \"%s\" failed\n",
+			optarg);
+		print_usage(argv[0]);
+		return -1;
+	}
+
 	if ((unprotected_port_mask & enabled_port_mask) !=
 			unprotected_port_mask)
 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 5b63a13fd..d7761e966 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -430,7 +430,7 @@  enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
 
 static inline void
 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
-		struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
+		struct rte_mbuf *pkts[], void *sas[],
 		uint16_t nb_pkts)
 {
 	int32_t ret = 0, i;
@@ -449,9 +449,9 @@  ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 		rte_prefetch0(pkts[i]);
 
 		priv = get_priv(pkts[i]);
-		sa = sas[i];
+		sa = ipsec_mask_saptr(sas[i]);
 		priv->sa = sa;
-		ips = ipsec_get_session(sa);
+		ips = ipsec_get_primary_session(sa);
 
 		switch (ips->type) {
 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
@@ -618,7 +618,7 @@  uint16_t
 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
 		uint16_t nb_pkts, uint16_t len)
 {
-	struct ipsec_sa *sas[nb_pkts];
+	void *sas[nb_pkts];
 
 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
 
@@ -638,7 +638,7 @@  uint16_t
 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
 {
-	struct ipsec_sa *sas[nb_pkts];
+	void *sas[nb_pkts];
 
 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index a4ad81b0e..8e075216c 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -81,12 +81,32 @@  struct app_sa_prm {
 
 extern struct app_sa_prm app_sa_prm;
 
+enum {
+	IPSEC_SESSION_PRIMARY = 0,
+	IPSEC_SESSION_FALLBACK = 1,
+	IPSEC_SESSION_MAX
+};
+
+#define IPSEC_SA_OFFLOAD_FALLBACK_FLAG (1)
+
+static inline struct ipsec_sa *
+ipsec_mask_saptr(void *ptr)
+{
+	uintptr_t i = (uintptr_t)ptr;
+	static const uintptr_t mask = IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
+
+	i &= ~mask;
+
+	return (struct ipsec_sa *)i;
+}
+
 struct ipsec_sa {
-	struct rte_ipsec_session ips; /* one session per sa for now */
+	struct rte_ipsec_session sessions[IPSEC_SESSION_MAX];
 	uint32_t spi;
 	uint32_t cdev_id_qp;
 	uint64_t seq;
 	uint32_t salt;
+	uint32_t fallback_sessions;
 	enum rte_crypto_cipher_algorithm cipher_algo;
 	enum rte_crypto_auth_algorithm auth_algo;
 	enum rte_crypto_aead_algorithm aead_algo;
@@ -210,7 +230,7 @@  struct cnt_blk {
 struct traffic_type {
 	const uint8_t *data[MAX_PKT_BURST * 2];
 	struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
-	struct ipsec_sa *saptr[MAX_PKT_BURST * 2];
+	void *saptr[MAX_PKT_BURST * 2];
 	uint32_t res[MAX_PKT_BURST * 2];
 	uint32_t num;
 };
@@ -278,16 +298,22 @@  get_sym_cop(struct rte_crypto_op *cop)
 }
 
 static inline struct rte_ipsec_session *
-ipsec_get_session(struct ipsec_sa *sa)
+ipsec_get_primary_session(struct ipsec_sa *sa)
+{
+	return &sa->sessions[IPSEC_SESSION_PRIMARY];
+}
+
+static inline struct rte_ipsec_session *
+ipsec_get_fallback_session(struct ipsec_sa *sa)
 {
-	return &sa->ips;
+	return &sa->sessions[IPSEC_SESSION_FALLBACK];
 }
 
 static inline enum rte_security_session_action_type
 ipsec_get_action_type(struct ipsec_sa *sa)
 {
 	struct rte_ipsec_session *ips;
-	ips = ipsec_get_session(sa);
+	ips = ipsec_get_primary_session(sa);
 	return ips->type;
 }
 
@@ -296,11 +322,11 @@  inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx);
 
 void
 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
-		struct ipsec_sa *sa[], uint16_t nb_pkts);
+		void *sa[], uint16_t nb_pkts);
 
 void
 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
-		struct ipsec_sa *sa[], uint16_t nb_pkts);
+		void *sa[], uint16_t nb_pkts);
 
 void
 sp4_init(struct socket_ctx *ctx, int32_t socket_id);
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 239d81ef6..2eb5c8b34 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -18,7 +18,6 @@ 
 	(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
 	((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
 
-
 /* helper routine to free bulk of packets */
 static inline void
 free_pkts(struct rte_mbuf *mb[], uint32_t n)
@@ -118,7 +117,7 @@  fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
  * group input packets byt the SA they belong to.
  */
 static uint32_t
-sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[],
+sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
 	struct rte_ipsec_group grp[], uint32_t num)
 {
 	uint32_t i, n, spi;
@@ -185,6 +184,37 @@  copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
 	out->num += num;
 }
 
+static uint32_t
+ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
+		struct rte_ipsec_session *ips, struct rte_mbuf **m,
+		unsigned int cnt)
+{
+	struct cdev_qp *cqp;
+	struct rte_crypto_op *cop[cnt];
+	uint32_t j, k;
+	struct ipsec_mbuf_metadata *priv;
+
+	cqp = &ctx->tbl[sa->cdev_id_qp];
+
+	/* for that app each mbuf has it's own crypto op */
+	for (j = 0; j != cnt; j++) {
+		priv = get_priv(m[j]);
+		cop[j] = &priv->cop;
+		/*
+		 * this is just to satisfy inbound_sa_check()
+		 * should be removed in future.
+		 */
+		priv->sa = sa;
+	}
+
+	/* prepare and enqueue crypto ops */
+	k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
+	if (k != 0)
+		enqueue_cop_bulk(cqp, cop, k);
+
+	return k;
+}
+
 /*
  * Process ipsec packets.
  * If packet belong to SA that is subject of inline-crypto,
@@ -201,18 +231,15 @@  ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 	struct ipsec_mbuf_metadata *priv;
 	struct rte_ipsec_group *pg;
 	struct rte_ipsec_session *ips;
-	struct cdev_qp *cqp;
-	struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
 
 	n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
 
 	for (i = 0; i != n; i++) {
-
 		pg = grp + i;
-		sa = pg->id.ptr;
+		sa = ipsec_mask_saptr(pg->id.ptr);
 
-		ips = ipsec_get_session(sa);
+		ips = ipsec_get_primary_session(sa);
 
 		/* no valid HW session for that SA, try to create one */
 		if (sa == NULL || (ips->crypto.ses == NULL &&
@@ -224,6 +251,7 @@  ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 				ips->type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
 
+			/* get SA type */
 			satp = rte_ipsec_sa_type(ips->sa);
 
 			/*
@@ -236,30 +264,33 @@  ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 				priv->sa = sa;
 			}
 
-			k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
-			copy_to_trf(trf, satp, pg->m, k);
+			/* fallback to cryptodev with RX packets which inline
+			 * processor was unable to process
+			 */
+			if (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) {
+				/* offload packets to cryptodev */
+				struct rte_ipsec_session *fallback;
+
+				fallback = ipsec_get_fallback_session(sa);
+				if (fallback->crypto.ses == NULL &&
+					fill_ipsec_session(fallback, ctx, sa)
+					!= 0)
+					k = 0;
+				else
+					k = ipsec_prepare_crypto_group(ctx, sa,
+						fallback, pg->m, pg->cnt);
+			} else {
+				/* finish processing of packets successfully
+				 * decrypted by an inline processor
+				 */
+				k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
+				copy_to_trf(trf, satp, pg->m, k);
 
+			}
 		/* enqueue packets to crypto dev */
 		} else {
-
-			cqp = &ctx->tbl[sa->cdev_id_qp];
-
-			/* for that app each mbuf has it's own crypto op */
-			for (j = 0; j != pg->cnt; j++) {
-				priv = get_priv(pg->m[j]);
-				cop[j] = &priv->cop;
-				/*
-				 * this is just to satisfy inbound_sa_check()
-				 * should be removed in future.
-				 */
-				priv->sa = sa;
-			}
-
-			/* prepare and enqueue crypto ops */
-			k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop,
+			k = ipsec_prepare_crypto_group(ctx, sa, ips, pg->m,
 				pg->cnt);
-			if (k != 0)
-				enqueue_cop_bulk(cqp, cop, k);
 		}
 
 		/* drop packets that cannot be enqueued/processed */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 52752e15f..4cb90857c 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -31,6 +31,8 @@ 
 
 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
 
+#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
+
 struct supported_cipher_algo {
 	const char *keyword;
 	enum rte_crypto_cipher_algorithm algo;
@@ -235,6 +237,7 @@  parse_sa_tokens(char **tokens, uint32_t n_tokens,
 	uint32_t mode_p = 0;
 	uint32_t type_p = 0;
 	uint32_t portid_p = 0;
+	uint32_t fallback_p = 0;
 
 	if (strcmp(tokens[0], "in") == 0) {
 		ri = &nb_sa_in;
@@ -245,6 +248,7 @@  parse_sa_tokens(char **tokens, uint32_t n_tokens,
 			return;
 
 		rule = &sa_in[*ri];
+		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
 	} else {
 		ri = &nb_sa_out;
 
@@ -254,6 +258,7 @@  parse_sa_tokens(char **tokens, uint32_t n_tokens,
 			return;
 
 		rule = &sa_out[*ri];
+		rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
 	}
 
 	/* spi number */
@@ -263,7 +268,7 @@  parse_sa_tokens(char **tokens, uint32_t n_tokens,
 	if (atoi(tokens[1]) == INVALID_SPI)
 		return;
 	rule->spi = atoi(tokens[1]);
-	ips = ipsec_get_session(rule);
+	ips = ipsec_get_primary_session(rule);
 
 	for (ti = 2; ti < n_tokens; ti++) {
 		if (strcmp(tokens[ti], "mode") == 0) {
@@ -596,6 +601,45 @@  parse_sa_tokens(char **tokens, uint32_t n_tokens,
 			continue;
 		}
 
+		if (strcmp(tokens[ti], "fallback") == 0) {
+			struct rte_ipsec_session *fb;
+
+			APP_CHECK(app_sa_prm.enable, status, "Fallback session "
+				"not allowed for legacy mode.");
+			if (status->status < 0)
+				return;
+			APP_CHECK(ips->type ==
+				RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
+				"Fallback session allowed if primary session "
+				"is of type inline-crypto-offload only.");
+			if (status->status < 0)
+				return;
+			APP_CHECK(rule->direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
+				"Fallback session not allowed for egress "
+				"rule");
+			if (status->status < 0)
+				return;
+			APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
+			if (status->status < 0)
+				return;
+			INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+			if (status->status < 0)
+				return;
+			fb = ipsec_get_fallback_session(rule);
+			if (strcmp(tokens[ti], "lookaside-none") == 0) {
+				fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
+			} else {
+				APP_CHECK(0, status, "unrecognized fallback "
+					"type %s.", tokens[ti]);
+				return;
+			}
+
+			rule->fallback_sessions = 1;
+			fallback_p = 1;
+			continue;
+		}
+
 		/* unrecognizeable input */
 		APP_CHECK(0, status, "unrecognized input \"%s\"",
 			tokens[ti]);
@@ -643,6 +687,7 @@  print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
 	uint32_t i;
 	uint8_t a, b, c, d;
 	const struct rte_ipsec_session *ips;
+	const struct rte_ipsec_session *fallback_ips;
 
 	printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
 
@@ -699,7 +744,7 @@  print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
 		break;
 	}
 
-	ips = &sa->ips;
+	ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
 	printf(" type:");
 	switch (ips->type) {
 	case RTE_SECURITY_ACTION_TYPE_NONE:
@@ -715,6 +760,15 @@  print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
 		printf("lookaside-protocol-offload ");
 		break;
 	}
+
+	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
+	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
+		printf("inline fallback: ");
+		if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
+			printf("lookaside-none");
+		else
+			printf("invalid");
+	}
 	printf("\n");
 }
 
@@ -904,7 +958,7 @@  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		}
 		*sa = entries[i];
 		sa->seq = 0;
-		ips = ipsec_get_session(sa);
+		ips = ipsec_get_primary_session(sa);
 
 		if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
 			ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
@@ -912,9 +966,6 @@  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				return -EINVAL;
 		}
 
-		sa->direction = (inbound == 1) ?
-				RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
-				RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
 
 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
 		case IP4_TUNNEL:
@@ -954,7 +1005,7 @@  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 
 			sa->xforms = &sa_ctx->xf[idx].a;
 
-			ips = ipsec_get_session(sa);
+			ips = ipsec_get_primary_session(sa);
 			if (ips->type ==
 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
 				ips->type ==
@@ -1168,9 +1219,15 @@  ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
 	if (rc < 0)
 		return rc;
 
-	/* init processing session */
-	ips = ipsec_get_session(lsa);
+	/* init primary processing session */
+	ips = ipsec_get_primary_session(lsa);
 	rc = fill_ipsec_session(ips, sa);
+	if (rc != 0)
+		return rc;
+
+	/* init inline fallback processing session */
+	if (lsa->fallback_sessions == 1)
+		rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
 
 	return rc;
 }
@@ -1326,13 +1383,14 @@  inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
 
 static inline void
 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
-		struct ipsec_sa **sa_ret)
+		void **sa_ret)
 {
 	struct rte_esp_hdr *esp;
 	struct ip *ip;
 	uint32_t *src4_addr;
 	uint8_t *src6_addr;
 	struct ipsec_sa *sa;
+	void *result_sa;
 
 	*sa_ret = NULL;
 
@@ -1342,33 +1400,48 @@  single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
 	if (esp->spi == INVALID_SPI)
 		return;
 
-	sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
+	result_sa = sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
 	if (rte_be_to_cpu_32(esp->spi) != sa->spi)
 		return;
 
+	/*
+	 * Mark need for inline offload fallback on the LSB of SA pointer.
+	 * Thanks to packet grouping mechanism which ipsec_process is using
+	 * packets marked for fallback processing will form separate group.
+	 *
+	 * Because it is not safe to use SA pointer it is casted to generic
+	 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
+	 * to get valid struct pointer.
+	 */
+	if (MBUF_NO_SEC_OFFLOAD(pkt) && sa->fallback_sessions > 0) {
+		uintptr_t intsa = (uintptr_t)sa;
+		intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
+		result_sa = (void *)intsa;
+	}
+
 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
 	case IP4_TUNNEL:
 		src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
 		if ((ip->ip_v == IPVERSION) &&
 				(sa->src.ip.ip4 == *src4_addr) &&
 				(sa->dst.ip.ip4 == *(src4_addr + 1)))
-			*sa_ret = sa;
+			*sa_ret = result_sa;
 		break;
 	case IP6_TUNNEL:
 		src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
 		if ((ip->ip_v == IP6_VERSION) &&
 				!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
 				!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
-			*sa_ret = sa;
+			*sa_ret = result_sa;
 		break;
 	case TRANSPORT:
-		*sa_ret = sa;
+		*sa_ret = result_sa;
 	}
 }
 
 void
 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
-		struct ipsec_sa *sa[], uint16_t nb_pkts)
+		void *sa[], uint16_t nb_pkts)
 {
 	uint32_t i;
 
@@ -1378,7 +1451,7 @@  inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
 
 void
 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
-		struct ipsec_sa *sa[], uint16_t nb_pkts)
+		void *sa[], uint16_t nb_pkts)
 {
 	uint32_t i;