[v6,10/10] ipsec: add ol_flags support

Message ID 20210917091747.1528262-11-radu.nicolau@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series new features for ipsec and security libraries |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing fail Testing issues
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing warning Testing issues

Commit Message

Radu Nicolau Sept. 17, 2021, 9:17 a.m. UTC
  Update the IPsec library to set mbuff->ol_flags and use the configured
L3 header length when setting the mbuff->tx_offload fields

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>
---
 lib/ipsec/esp_inb.c      | 17 ++++++++++++--
 lib/ipsec/esp_outb.c     | 48 ++++++++++++++++++++++++++++++---------
 lib/ipsec/rte_ipsec_sa.h |  3 ++-
 lib/ipsec/sa.c           | 49 ++++++++++++++++++++++++++++++++++++++--
 lib/ipsec/sa.h           |  8 +++++++
 5 files changed, 109 insertions(+), 16 deletions(-)
  

Comments

Fan Zhang Sept. 22, 2021, 1:18 p.m. UTC | #1
> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, September 17, 2021 10:18 AM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Iremonger,
> Bernard <bernard.iremonger@intel.com>; Medvedkin, Vladimir
> <vladimir.medvedkin@intel.com>
> Cc: dev@dpdk.org; mdr@ashroe.eu; Richardson, Bruce
> <bruce.richardson@intel.com>; Zhang, Roy Fan <roy.fan.zhang@intel.com>;
> hemant.agrawal@nxp.com; gakhil@marvell.com; anoobj@marvell.com;
> Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Buckley, Daniel M <daniel.m.buckley@intel.com>;
> marchana@marvell.com; ktejasree@marvell.com; matan@nvidia.com;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v6 10/10] ipsec: add ol_flags support
> 
> Update the IPsec library to set mbuff->ol_flags and use the configured
> L3 header length when setting the mbuff->tx_offload fields
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>

Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
  
Ananyev, Konstantin Sept. 24, 2021, 11:39 a.m. UTC | #2
> Update the IPsec library to set mbuff->ol_flags and use the configured
> L3 header length when setting the mbuff->tx_offload fields

You stated what pactch does, but didn't explain why it is needed.

> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>
> ---
>  lib/ipsec/esp_inb.c      | 17 ++++++++++++--
>  lib/ipsec/esp_outb.c     | 48 ++++++++++++++++++++++++++++++---------
>  lib/ipsec/rte_ipsec_sa.h |  3 ++-
>  lib/ipsec/sa.c           | 49 ++++++++++++++++++++++++++++++++++++++--
>  lib/ipsec/sa.h           |  8 +++++++
>  5 files changed, 109 insertions(+), 16 deletions(-)
> 
> diff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c
> index 8cb4c16302..5fcb41297e 100644
> --- a/lib/ipsec/esp_inb.c
> +++ b/lib/ipsec/esp_inb.c
> @@ -559,7 +559,8 @@ trs_process_step3(struct rte_mbuf *mb)
>   * - tx_offload
>   */
>  static inline void
> -tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
> +tun_process_step3(struct rte_mbuf *mb, uint8_t is_ipv4, uint64_t txof_msk,
> +	uint64_t txof_val)
>  {
>  	/* reset mbuf metatdata: L2/L3 len, packet type */
>  	mb->packet_type = RTE_PTYPE_UNKNOWN;
> @@ -567,6 +568,14 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
> 
>  	/* clear the PKT_RX_SEC_OFFLOAD flag if set */
>  	mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
> +
> +	if (is_ipv4) {
> +		mb->l3_len = sizeof(struct rte_ipv4_hdr);
> +		mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
> +	} else {
> +		mb->l3_len = sizeof(struct rte_ipv6_hdr);
> +		mb->ol_flags |= PKT_TX_IPV6;
> +	}

That's TX related flags.
Why do you set them for inbound traffic?

>  }
> 
>  /*
> @@ -618,8 +627,12 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>  			update_tun_inb_l3hdr(sa, outh, inh);
> 
>  			/* update mbuf's metadata */
> -			tun_process_step3(mb[i], sa->tx_offload.msk,
> +			tun_process_step3(mb[i],
> +				(sa->type & RTE_IPSEC_SATP_IPV_MASK) ==
> +					RTE_IPSEC_SATP_IPV4 ? 1 : 0,
> +				sa->tx_offload.msk,
>  				sa->tx_offload.val);
> +
>  			k++;
>  		} else
>  			dr[i - k] = i;
> diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c
> index 8a6d09558f..d8e261e6fb 100644
> --- a/lib/ipsec/esp_outb.c
> +++ b/lib/ipsec/esp_outb.c
> @@ -19,7 +19,7 @@
> 
>  typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>  	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
> -	union sym_op_data *icv, uint8_t sqh_len);
> +	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto);

Sigh, what does icrypto mean and why it is needed?
It really would help if you bother to put at least some comments explaining your changes.
If that's for inline crypto specific add-ons then why it has to be there, in generic function?
Why not in inline_outb_tun_pkt_process()?

> 
>  /*
>   * helper function to fill crypto_sym op for cipher+auth algorithms.
> @@ -140,9 +140,9 @@ outb_cop_prepare(struct rte_crypto_op *cop,
>  static inline int32_t
>  outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>  	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
> -	union sym_op_data *icv, uint8_t sqh_len)
> +	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto)
>  {
> -	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
> +	uint32_t clen, hlen, l2len, l3len, pdlen, pdofs, plen, tlen;
>  	struct rte_mbuf *ml;
>  	struct rte_esp_hdr *esph;
>  	struct rte_esp_tail *espt;
> @@ -154,6 +154,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
> 
>  	/* size of ipsec protected data */
>  	l2len = mb->l2_len;
> +	l3len = mb->l3_len;
> +
>  	plen = mb->pkt_len - l2len;
> 
>  	/* number of bytes to encrypt */
> @@ -190,8 +192,26 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>  	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
> 
>  	/* update pkt l2/l3 len */
> -	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
> -		sa->tx_offload.val;
> +	if (icrypto) {
> +		mb->tx_offload =
> +			(mb->tx_offload & sa->inline_crypto.tx_offload.msk) |
> +			sa->inline_crypto.tx_offload.val;
> +		mb->l3_len = l3len;
> +
> +		mb->ol_flags |= sa->inline_crypto.tx_ol_flags;
> +
> +		/* set ip checksum offload for inner */
> +		if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)
> +			mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
> +		else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK)
> +				== RTE_IPSEC_SATP_IPV6)
> +			mb->ol_flags |= PKT_TX_IPV6;
> +	} else {
> +		mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
> +			sa->tx_offload.val;
> +
> +		mb->ol_flags |= sa->tx_ol_flags;
> +	}
> 
>  	/* copy tunnel pkt header */
>  	rte_memcpy(ph, sa->hdr, sa->hdr_len);
> @@ -311,7 +331,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* try to update the packet itself */
>  		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
> -					  sa->sqh_len);
> +					  sa->sqh_len, 0);
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
>  			outb_pkt_xprepare(sa, sqc, &icv);
> @@ -338,7 +358,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  static inline int32_t
>  outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>  	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
> -	union sym_op_data *icv, uint8_t sqh_len)
> +	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto __rte_unused)
>  {
>  	uint8_t np;
>  	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
> @@ -394,10 +414,16 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
>  	/* shift L2/L3 headers */
>  	insert_esph(ph, ph + hlen, uhlen);
> 
> +	if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)
> +		mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
> +	else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV6)
> +		mb->ol_flags |= PKT_TX_IPV6;

Why do ipsec lib now have to setup these flags unconditionally?
If that's a change in functionality - it should be documented properly
with clear explanation why.
If you believe it is a bug in current ipsec implementation - then it should be
separate 'fix' patch. 
But so far, I don't see any good reason for that.

>  	/* update ip  header fields */
>  	np = update_trs_l34hdrs(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
>  			l3len, IPPROTO_ESP, tso);
> 
> +

Empty line.

>  	/* update spi, seqn and iv */
>  	esph = (struct rte_esp_hdr *)(ph + uhlen);
>  	iv = (uint64_t *)(esph + 1);
> @@ -463,7 +489,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> 
>  		/* try to update the packet itself */
>  		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
> -				  sa->sqh_len);
> +				  sa->sqh_len, 0);
>  		/* success, setup crypto op */
>  		if (rc >= 0) {
>  			outb_pkt_xprepare(sa, sqc, &icv);
> @@ -560,7 +586,7 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
>  		gen_iv(ivbuf[k], sqc);
> 
>  		/* try to update the packet itself */
> -		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
> +		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
> 
>  		/* success, proceed with preparations */
>  		if (rc >= 0) {
> @@ -741,7 +767,7 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  		gen_iv(iv, sqc);
> 
>  		/* try to update the packet itself */
> -		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
> +		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 1);
> 
>  		k += (rc >= 0);
> 
> @@ -808,7 +834,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  		gen_iv(iv, sqc);
> 
>  		/* try to update the packet itself */
> -		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
> +		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 0);
> 
>  		k += (rc >= 0);
> 
> diff --git a/lib/ipsec/rte_ipsec_sa.h b/lib/ipsec/rte_ipsec_sa.h
> index 40d1e70d45..3c36dcaa77 100644
> --- a/lib/ipsec/rte_ipsec_sa.h
> +++ b/lib/ipsec/rte_ipsec_sa.h
> @@ -38,7 +38,8 @@ struct rte_ipsec_sa_prm {
>  	union {
>  		struct {
>  			uint8_t hdr_len;     /**< tunnel header len */
> -			uint8_t hdr_l3_off;  /**< offset for IPv4/IPv6 header */
> +			uint8_t hdr_l3_off;   /**< tunnel l3 header len */
> +			uint8_t hdr_l3_len;   /**< tunnel l3 header len */
>  			uint8_t next_proto;  /**< next header protocol */
>  			const void *hdr;     /**< tunnel header template */
>  		} tun; /**< tunnel mode related parameters */
> diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c
> index d94684cf96..149ed5dd4f 100644
> --- a/lib/ipsec/sa.c
> +++ b/lib/ipsec/sa.c
> @@ -17,6 +17,8 @@
> 
>  #define MBUF_MAX_L2_LEN		RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
>  #define MBUF_MAX_L3_LEN		RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
> +#define MBUF_MAX_TSO_LEN	RTE_LEN2MASK(RTE_MBUF_TSO_SEGSZ_BITS, uint64_t)
> +#define MBUF_MAX_OL3_LEN	RTE_LEN2MASK(RTE_MBUF_OUTL3_LEN_BITS, uint64_t)
> 
>  /* some helper structures */
>  struct crypto_xform {
> @@ -348,6 +350,11 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn)
>  	sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
>  	sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
>  			(sa->ctp.cipher.offset + sa->ctp.cipher.length);
> +
> +	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
> +		sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
> +	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
> +		sa->tx_ol_flags |= PKT_TX_IPV6;

Same question: why these flags have to be *always* and unconditionally set?
This is a change in behaviour and there should be a really good reason for that.
And if we will decide to proceed with it - it should be clearly outlined in both doc
and commit message.

>  }
> 
>  /*
> @@ -362,9 +369,43 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
>  	sa->hdr_len = prm->tun.hdr_len;
>  	sa->hdr_l3_off = prm->tun.hdr_l3_off;
> 
> +
> +	/* update l2_len and l3_len fields for outbound mbuf */
> +	sa->inline_crypto.tx_offload.val = rte_mbuf_tx_offload(
> +			0,			/* iL2_LEN */
> +			0,			/* iL3_LEN */
> +			0,			/* iL4_LEN */
> +			0,			/* TSO_SEG_SZ */
> +			prm->tun.hdr_l3_len,	/* oL3_LEN */
> +			prm->tun.hdr_l3_off,	/* oL2_LEN */
> +			0);
> +	sa->inline_crypto.tx_ol_flags |= PKT_TX_TUNNEL_ESP;
> +
> +	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
> +		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV4;
> +	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
> +		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV6;
> +
> +	if (sa->inline_crypto.tx_ol_flags & PKT_TX_OUTER_IPV4)
> +		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IP_CKSUM;
> +	if (sa->tx_ol_flags & PKT_TX_IPV4)
> +		sa->inline_crypto.tx_ol_flags |= PKT_TX_IP_CKSUM;
> +
>  	/* update l2_len and l3_len fields for outbound mbuf */
> -	sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
> -		sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
> +	sa->tx_offload.val = rte_mbuf_tx_offload(
> +			prm->tun.hdr_l3_off,	/* iL2_LEN */
> +			prm->tun.hdr_l3_len,	/* iL3_LEN */

Sigh, again that's the change in current behaviour.
What would happen for old apps that doesn't set hdr_l3_len properly?

> +			0,			/* iL4_LEN */
> +			0,			/* TSO_SEG_SZ */
> +			0,			/* oL3_LEN */
> +			0,			/* oL2_LEN */
> +			0);
> +
> +	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
> +		sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
> +	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
> +		sa->tx_ol_flags |= PKT_TX_IPV6;
> 
>  	memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
> 
> @@ -473,6 +514,10 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
>  	sa->salt = prm->ipsec_xform.salt;
> 
>  	/* preserve all values except l2_len and l3_len */
> +	sa->inline_crypto.tx_offload.msk =
> +		~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
> +				0, 0, MBUF_MAX_OL3_LEN, 0, 0);
> +
>  	sa->tx_offload.msk =
>  		~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
>  				0, 0, 0, 0, 0);
> diff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h
> index b9b7ebec5b..172d094c4b 100644
> --- a/lib/ipsec/sa.h
> +++ b/lib/ipsec/sa.h
> @@ -101,6 +101,14 @@ struct rte_ipsec_sa {
>  		uint64_t msk;
>  		uint64_t val;
>  	} tx_offload;
> +	uint64_t tx_ol_flags;
> +	struct {
> +		uint64_t tx_ol_flags;
> +		struct {
> +			uint64_t msk;
> +			uint64_t val;
> +		} tx_offload;
> +	} inline_crypto;

I don't see any reason why we need two tx_offload fields (and tx_ol_flags) -
one generic, second for inline.
I believe both can be squeezed into just one value.
For different methods (inline/lookaside) these values will be just different.
Probably these fields need to be moved from struct rte_ipsec_sa, to rte_ipsec_session.

>  	struct {
>  		uint16_t sport;
>  		uint16_t dport;
> --
> 2.25.1
  

Patch

diff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c
index 8cb4c16302..5fcb41297e 100644
--- a/lib/ipsec/esp_inb.c
+++ b/lib/ipsec/esp_inb.c
@@ -559,7 +559,8 @@  trs_process_step3(struct rte_mbuf *mb)
  * - tx_offload
  */
 static inline void
-tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
+tun_process_step3(struct rte_mbuf *mb, uint8_t is_ipv4, uint64_t txof_msk,
+	uint64_t txof_val)
 {
 	/* reset mbuf metatdata: L2/L3 len, packet type */
 	mb->packet_type = RTE_PTYPE_UNKNOWN;
@@ -567,6 +568,14 @@  tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
 
 	/* clear the PKT_RX_SEC_OFFLOAD flag if set */
 	mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD;
+
+	if (is_ipv4) {
+		mb->l3_len = sizeof(struct rte_ipv4_hdr);
+		mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+	} else {
+		mb->l3_len = sizeof(struct rte_ipv6_hdr);
+		mb->ol_flags |= PKT_TX_IPV6;
+	}
 }
 
 /*
@@ -618,8 +627,12 @@  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 			update_tun_inb_l3hdr(sa, outh, inh);
 
 			/* update mbuf's metadata */
-			tun_process_step3(mb[i], sa->tx_offload.msk,
+			tun_process_step3(mb[i],
+				(sa->type & RTE_IPSEC_SATP_IPV_MASK) ==
+					RTE_IPSEC_SATP_IPV4 ? 1 : 0,
+				sa->tx_offload.msk,
 				sa->tx_offload.val);
+
 			k++;
 		} else
 			dr[i - k] = i;
diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c
index 8a6d09558f..d8e261e6fb 100644
--- a/lib/ipsec/esp_outb.c
+++ b/lib/ipsec/esp_outb.c
@@ -19,7 +19,7 @@ 
 
 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-	union sym_op_data *icv, uint8_t sqh_len);
+	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto);
 
 /*
  * helper function to fill crypto_sym op for cipher+auth algorithms.
@@ -140,9 +140,9 @@  outb_cop_prepare(struct rte_crypto_op *cop,
 static inline int32_t
 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-	union sym_op_data *icv, uint8_t sqh_len)
+	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto)
 {
-	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
+	uint32_t clen, hlen, l2len, l3len, pdlen, pdofs, plen, tlen;
 	struct rte_mbuf *ml;
 	struct rte_esp_hdr *esph;
 	struct rte_esp_tail *espt;
@@ -154,6 +154,8 @@  outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 
 	/* size of ipsec protected data */
 	l2len = mb->l2_len;
+	l3len = mb->l3_len;
+
 	plen = mb->pkt_len - l2len;
 
 	/* number of bytes to encrypt */
@@ -190,8 +192,26 @@  outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
 
 	/* update pkt l2/l3 len */
-	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
-		sa->tx_offload.val;
+	if (icrypto) {
+		mb->tx_offload =
+			(mb->tx_offload & sa->inline_crypto.tx_offload.msk) |
+			sa->inline_crypto.tx_offload.val;
+		mb->l3_len = l3len;
+
+		mb->ol_flags |= sa->inline_crypto.tx_ol_flags;
+
+		/* set ip checksum offload for inner */
+		if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)
+			mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+		else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK)
+				== RTE_IPSEC_SATP_IPV6)
+			mb->ol_flags |= PKT_TX_IPV6;
+	} else {
+		mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
+			sa->tx_offload.val;
+
+		mb->ol_flags |= sa->tx_ol_flags;
+	}
 
 	/* copy tunnel pkt header */
 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
@@ -311,7 +331,7 @@  esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 
 		/* try to update the packet itself */
 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
-					  sa->sqh_len);
+					  sa->sqh_len, 0);
 		/* success, setup crypto op */
 		if (rc >= 0) {
 			outb_pkt_xprepare(sa, sqc, &icv);
@@ -338,7 +358,7 @@  esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 static inline int32_t
 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-	union sym_op_data *icv, uint8_t sqh_len)
+	union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto __rte_unused)
 {
 	uint8_t np;
 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
@@ -394,10 +414,16 @@  outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	/* shift L2/L3 headers */
 	insert_esph(ph, ph + hlen, uhlen);
 
+	if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)
+		mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+	else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV6)
+		mb->ol_flags |= PKT_TX_IPV6;
+
 	/* update ip  header fields */
 	np = update_trs_l34hdrs(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
 			l3len, IPPROTO_ESP, tso);
 
+
 	/* update spi, seqn and iv */
 	esph = (struct rte_esp_hdr *)(ph + uhlen);
 	iv = (uint64_t *)(esph + 1);
@@ -463,7 +489,7 @@  esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 
 		/* try to update the packet itself */
 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
-				  sa->sqh_len);
+				  sa->sqh_len, 0);
 		/* success, setup crypto op */
 		if (rc >= 0) {
 			outb_pkt_xprepare(sa, sqc, &icv);
@@ -560,7 +586,7 @@  cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
 		gen_iv(ivbuf[k], sqc);
 
 		/* try to update the packet itself */
-		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
 
 		/* success, proceed with preparations */
 		if (rc >= 0) {
@@ -741,7 +767,7 @@  inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
 		gen_iv(iv, sqc);
 
 		/* try to update the packet itself */
-		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 1);
 
 		k += (rc >= 0);
 
@@ -808,7 +834,7 @@  inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
 		gen_iv(iv, sqc);
 
 		/* try to update the packet itself */
-		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 0);
 
 		k += (rc >= 0);
 
diff --git a/lib/ipsec/rte_ipsec_sa.h b/lib/ipsec/rte_ipsec_sa.h
index 40d1e70d45..3c36dcaa77 100644
--- a/lib/ipsec/rte_ipsec_sa.h
+++ b/lib/ipsec/rte_ipsec_sa.h
@@ -38,7 +38,8 @@  struct rte_ipsec_sa_prm {
 	union {
 		struct {
 			uint8_t hdr_len;     /**< tunnel header len */
-			uint8_t hdr_l3_off;  /**< offset for IPv4/IPv6 header */
+			uint8_t hdr_l3_off;   /**< tunnel l3 header len */
+			uint8_t hdr_l3_len;   /**< tunnel l3 header len */
 			uint8_t next_proto;  /**< next header protocol */
 			const void *hdr;     /**< tunnel header template */
 		} tun; /**< tunnel mode related parameters */
diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c
index d94684cf96..149ed5dd4f 100644
--- a/lib/ipsec/sa.c
+++ b/lib/ipsec/sa.c
@@ -17,6 +17,8 @@ 
 
 #define MBUF_MAX_L2_LEN		RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
 #define MBUF_MAX_L3_LEN		RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
+#define MBUF_MAX_TSO_LEN	RTE_LEN2MASK(RTE_MBUF_TSO_SEGSZ_BITS, uint64_t)
+#define MBUF_MAX_OL3_LEN	RTE_LEN2MASK(RTE_MBUF_OUTL3_LEN_BITS, uint64_t)
 
 /* some helper structures */
 struct crypto_xform {
@@ -348,6 +350,11 @@  esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn)
 	sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
 	sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
 			(sa->ctp.cipher.offset + sa->ctp.cipher.length);
+
+	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
+		sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
+		sa->tx_ol_flags |= PKT_TX_IPV6;
 }
 
 /*
@@ -362,9 +369,43 @@  esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 	sa->hdr_len = prm->tun.hdr_len;
 	sa->hdr_l3_off = prm->tun.hdr_l3_off;
 
+
+	/* update l2_len and l3_len fields for outbound mbuf */
+	sa->inline_crypto.tx_offload.val = rte_mbuf_tx_offload(
+			0,			/* iL2_LEN */
+			0,			/* iL3_LEN */
+			0,			/* iL4_LEN */
+			0,			/* TSO_SEG_SZ */
+			prm->tun.hdr_l3_len,	/* oL3_LEN */
+			prm->tun.hdr_l3_off,	/* oL2_LEN */
+			0);
+
+	sa->inline_crypto.tx_ol_flags |= PKT_TX_TUNNEL_ESP;
+
+	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
+		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV4;
+	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
+		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV6;
+
+	if (sa->inline_crypto.tx_ol_flags & PKT_TX_OUTER_IPV4)
+		sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+	if (sa->tx_ol_flags & PKT_TX_IPV4)
+		sa->inline_crypto.tx_ol_flags |= PKT_TX_IP_CKSUM;
+
 	/* update l2_len and l3_len fields for outbound mbuf */
-	sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
-		sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
+	sa->tx_offload.val = rte_mbuf_tx_offload(
+			prm->tun.hdr_l3_off,	/* iL2_LEN */
+			prm->tun.hdr_l3_len,	/* iL3_LEN */
+			0,			/* iL4_LEN */
+			0,			/* TSO_SEG_SZ */
+			0,			/* oL3_LEN */
+			0,			/* oL2_LEN */
+			0);
+
+	if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4)
+		sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM);
+	else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6)
+		sa->tx_ol_flags |= PKT_TX_IPV6;
 
 	memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len);
 
@@ -473,6 +514,10 @@  esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
 	sa->salt = prm->ipsec_xform.salt;
 
 	/* preserve all values except l2_len and l3_len */
+	sa->inline_crypto.tx_offload.msk =
+		~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
+				0, 0, MBUF_MAX_OL3_LEN, 0, 0);
+
 	sa->tx_offload.msk =
 		~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
 				0, 0, 0, 0, 0);
diff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h
index b9b7ebec5b..172d094c4b 100644
--- a/lib/ipsec/sa.h
+++ b/lib/ipsec/sa.h
@@ -101,6 +101,14 @@  struct rte_ipsec_sa {
 		uint64_t msk;
 		uint64_t val;
 	} tx_offload;
+	uint64_t tx_ol_flags;
+	struct {
+		uint64_t tx_ol_flags;
+		struct {
+			uint64_t msk;
+			uint64_t val;
+		} tx_offload;
+	} inline_crypto;
 	struct {
 		uint16_t sport;
 		uint16_t dport;