[08/10] ipsec: add rte_security cpu_crypto action support

Message ID 20190906131330.40185-9-roy.fan.zhang@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: akhil goyal
Headers
Series security: add software synchronous crypto process |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Fan Zhang Sept. 6, 2019, 1:13 p.m. UTC
  This patch updates the ipsec library to handle the newly introduced
RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO action.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 lib/librte_ipsec/esp_inb.c  | 174 +++++++++++++++++++++++++-
 lib/librte_ipsec/esp_outb.c | 290 +++++++++++++++++++++++++++++++++++++++++++-
 lib/librte_ipsec/sa.c       |  53 ++++++--
 lib/librte_ipsec/sa.h       |  29 +++++
 lib/librte_ipsec/ses.c      |   4 +-
 5 files changed, 539 insertions(+), 11 deletions(-)
  

Comments

Ananyev, Konstantin Sept. 26, 2019, 11:20 p.m. UTC | #1
Hi Fan,

...
> diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
> index 55799a867..097cb663f 100644
> --- a/lib/librte_ipsec/esp_outb.c
> +++ b/lib/librte_ipsec/esp_outb.c
> @@ -403,6 +403,292 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	return k;
>  }
> 
> +
> +static inline int
> +outb_sync_crypto_proc_prepare(struct rte_mbuf *m, const struct rte_ipsec_sa *sa,
> +		const uint64_t ivp[IPSEC_MAX_IV_QWORD],
> +		const union sym_op_data *icv, uint32_t hlen, uint32_t plen,
> +		struct rte_security_vec *buf, struct iovec *cur_vec, void *iv,
> +		void **aad, void **digest)
> +{
> +	struct rte_mbuf *ms;
> +	struct aead_gcm_iv *gcm;
> +	struct aesctr_cnt_blk *ctr;
> +	struct iovec *vec = cur_vec;
> +	uint32_t left, off = 0, n_seg = 0;

Please separate variable definition and value assignment.
It makes it hard to read, plus we don't do that in the rest of the library,
so better to follow rest of the code style. 

> +	uint32_t algo;
> +
> +	algo = sa->algo_type;
> +
> +	switch (algo) {
> +	case ALGO_TYPE_AES_GCM:
> +		gcm = iv;
> +		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
> +		*aad = (void *)(icv->va + sa->icv_len);

Why do we want to allocate aad inside the packet at all?
Why not just to do that on the stack instead?
In that case you probably wouldn't need this icv stuff at all to be passed to that function.

> +		off = sa->ctp.cipher.offset + hlen;
> +		break;
> +	case ALGO_TYPE_AES_CBC:
> +	case ALGO_TYPE_3DES_CBC:
> +		off = sa->ctp.auth.offset + hlen;
> +		break;
> +	case ALGO_TYPE_AES_CTR:
> +		ctr = iv;
> +		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
> +		break;
> +	case ALGO_TYPE_NULL:
> +		break;

For latest two, why off is zero?
Shouldn't it at least be 'hlen'?
In fact, I think it needs to be: sa->ctp.auth.offset + hlen;

> +	}
> +
> +	*digest = (void *)icv->va;

Could be done in the upper layer function, together with aad assignment, I think.

Looking at this function, it seems to consist of 2 separate parts:
1. calculates offset and generates iv
2. setup iovec[].
Probably worth to split it into 2 separate functions like that.
Would be much easier to read/understand.

> +
> +	left = sa->ctp.cipher.length + plen;
> +
> +	ms = mbuf_get_seg_ofs(m, &off);
> +	if (!ms)
> +		return -1;

outb_tun_pkt_prepare() should already check that we have a valid packet.
I don't think there is a need to check for any failure here.
Another thing, our esp header will be in the first segment for sure,
so do we need get_seg_ofs() here at all? 

> +
> +	while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {

I don't think this is right, we shouldn't impose additional limitations to
the number of segments in the packet.

> +		uint32_t len = RTE_MIN(left, ms->data_len - off);
> +
> +		vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
> +		vec->iov_len = len;
> +
> +		left -= len;
> +		vec++;
> +		n_seg++;
> +		ms = ms->next;
> +		off = 0;


Whole construction seems a bit over-complicated here...
Why just not have a separate function that would dill iovec[] from mbuf
And return an error if there is not enough iovec[] entries?
Something like:

static inline int
mbuf_to_iovec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len, struct iovec vec[], uint32_t num)
{
     uint32_t i;
     if (mb->nb_seg > num)
        return - mb->nb_seg;

    vec[0].iov_base =  rte_pktmbuf_mtod_offset(mb, void *, off);
    vec[0].iov_len = mb->data_len - off;

    for (i = 1, ms = mb->next; mb != NULL; ms = ms->next, i++) {
        vec[i].iov_base = rte_pktmbuf_mtod(ms);
        vec[i].iov_len = ms->data_len;
    }

   vec[i].iov_len -= mb->pkt_len - len;
   return i;
}

Then we can use that function to fill our iovec[] in a loop.

> +	}
> +
> +	if (left)
> +		return -1;
> +
> +	buf->vec = cur_vec;
> +	buf->num = n_seg;
> +
> +	return n_seg;
> +}
> +
> +/**
> + * Local post process function prototype that same as process function prototype
> + * as rte_ipsec_sa_pkt_func's process().
> + */
> +typedef uint16_t (*sync_crypto_post_process)(const struct rte_ipsec_session *ss,
> +				struct rte_mbuf *mb[],
> +				uint16_t num);

Stylish thing: typdef newtype_t ....

> +static uint16_t
> +esp_outb_tun_sync_crypto_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num,
> +		sync_crypto_post_process post_process)
> +{
> +	uint64_t sqn;
> +	rte_be64_t sqc;
> +	struct rte_ipsec_sa *sa;
> +	struct rte_security_ctx *ctx;
> +	struct rte_security_session *rss;
> +	union sym_op_data icv;
> +	struct rte_security_vec buf[num];
> +	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
> +	uint32_t vec_idx = 0;
> +	void *aad[num];
> +	void *digest[num];
> +	void *iv[num];
> +	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
> +	uint64_t ivp[IPSEC_MAX_IV_QWORD];

Why do we need both ivs and ivp?

> +	int status[num];
> +	uint32_t dr[num];
> +	uint32_t i, n, k;
> +	int32_t rc;
> +
> +	sa = ss->sa;
> +	ctx = ss->security.ctx;
> +	rss = ss->security.ses;
> +
> +	k = 0;
> +	n = num;
> +	sqn = esn_outb_update_sqn(sa, &n);
> +	if (n != num)
> +		rte_errno = EOVERFLOW;
> +
> +	for (i = 0; i != n; i++) {
> +		sqc = rte_cpu_to_be_64(sqn + i);
> +		gen_iv(ivp, sqc);
> +
> +		/* try to update the packet itself */
> +		rc = outb_tun_pkt_prepare(sa, sqc, ivp, mb[i], &icv,
> +				sa->sqh_len);
> +
> +		/* success, setup crypto op */
> +		if (rc >= 0) {
> +			outb_pkt_xprepare(sa, sqc, &icv);

We probably need something like outb_pkt_sync_xprepare(sa, sqc, &aad[i]); here.
To avoid using space in the packet for aad.

> +
> +			iv[k] = (void *)ivs[k];

Do we really need type conversion here?

> +			rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
> +					0, rc, &buf[k], &vec[vec_idx], iv[k],
> +					&aad[k], &digest[k]);



> +			if (rc < 0) {
> +				dr[i - k] = i;
> +				rte_errno = -rc;
> +				continue;
> +			}
> +
> +			vec_idx += rc;
> +			k++;
> +		/* failure, put packet into the death-row */
> +		} else {
> +			dr[i - k] = i;
> +			rte_errno = -rc;
> +		}
> +	}
> +
> +	 /* copy not prepared mbufs beyond good ones */
> +	if (k != n && k != 0)
> +		move_bad_mbufs(mb, dr, n, n - k);
> +
> +	if (unlikely(k == 0)) {

I don't think 'unlikely' will make any difference here here.

> +		rte_errno = EBADMSG;
> +		return 0;
> +	}
> +
> +	/* process the packets */
> +	n = 0;
> +	rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
> +			status, k);

Looking at the code below, I think it will be plausible to make 
rte_security_process_cpu_crypto_bulk() to return number of failures
(or number of succese).

> +	/* move failed process packets to dr */
> +	for (i = 0; i < n; i++) {

That loop will never be executed.
Should be i < k.

> +		if (status[i])
> +			dr[n++] = i;

Forgot to set rte_errno.

> +	}
> +
> +	if (n)

if (n != 0 && n != k)

> +		move_bad_mbufs(mb, dr, k, n);
> +
> +	return post_process(ss, mb, k - n);
> +}
> +
> +static uint16_t
> +esp_outb_trs_sync_crypto_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num,
> +		sync_crypto_post_process post_process)
> +
> +{
> +	uint64_t sqn;
> +	rte_be64_t sqc;
> +	struct rte_ipsec_sa *sa;
> +	struct rte_security_ctx *ctx;
> +	struct rte_security_session *rss;
> +	union sym_op_data icv;
> +	struct rte_security_vec buf[num];
> +	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
> +	uint32_t vec_idx = 0;
> +	void *aad[num];
> +	void *digest[num];
> +	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
> +	void *iv[num];
> +	int status[num];
> +	uint64_t ivp[IPSEC_MAX_IV_QWORD];
> +	uint32_t dr[num];
> +	uint32_t i, n, k;
> +	uint32_t l2, l3;
> +	int32_t rc;
> +
> +	sa = ss->sa;
> +	ctx = ss->security.ctx;
> +	rss = ss->security.ses;
> +
> +	k = 0;
> +	n = num;
> +	sqn = esn_outb_update_sqn(sa, &n);
> +	if (n != num)
> +		rte_errno = EOVERFLOW;
> +
> +	for (i = 0; i != n; i++) {
> +		l2 = mb[i]->l2_len;
> +		l3 = mb[i]->l3_len;
> +
> +		sqc = rte_cpu_to_be_64(sqn + i);
> +		gen_iv(ivp, sqc);
> +
> +		/* try to update the packet itself */
> +		rc = outb_trs_pkt_prepare(sa, sqc, ivp, mb[i], l2, l3, &icv,
> +				sa->sqh_len);
> +
> +		/* success, setup crypto op */
> +		if (rc >= 0) {
> +			outb_pkt_xprepare(sa, sqc, &icv);
> +
> +			iv[k] = (void *)ivs[k];
> +
> +			rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
> +					l2 + l3, rc, &buf[k], &vec[vec_idx],
> +					iv[k], &aad[k], &digest[k]);
> +			if (rc < 0) {
> +				dr[i - k] = i;
> +				rte_errno = -rc;
> +				continue;
> +			}
> +
> +			vec_idx += rc;
> +			k++;
> +		/* failure, put packet into the death-row */
> +		} else {
> +			dr[i - k] = i;
> +			rte_errno = -rc;
> +		}
> +	}
> +
> +	 /* copy not prepared mbufs beyond good ones */
> +	if (k != n && k != 0)
> +		move_bad_mbufs(mb, dr, n, n - k);


You don't really need to do it here.
Just one such thing at the very end should be enough.

> +
> +	/* process the packets */
> +	n = 0;
> +	rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
> +			status, k);
> +	/* move failed process packets to dr */
> +	for (i = 0; i < k; i++) {
> +		if (status[i])
> +			dr[n++] = i;
> +	}
> +
> +	if (n)
> +		move_bad_mbufs(mb, dr, k, n);
> +
> +	return post_process(ss, mb, k - n);
> +}
> +
> +uint16_t
> +esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_outb_tun_sync_crypto_process(ss, mb, num,
> +			esp_outb_sqh_process);

esp_outb_sqh_process() relies on PKT_RX_SEC_OFFLOAD_FAILED been set
in mb->ol_flags for failed packets.
At first for _sync_ case no-one will set it for you.
Second - for _sync_ you don't really need that it is just an extra overhead here.
So I think you can't reuse this function without some modifications here.
Probably easier to make a new one (and extract some common code into
another helper function that esp_out_sqh_process and new one can call).

> +}
> +
> +uint16_t
> +esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_outb_tun_sync_crypto_process(ss, mb, num,
> +			esp_outb_pkt_flag_process);

Same as above, plus the fact that you made esp_outb_pkt_flag_process()
is not a static one, so compiler wouldn't be able to inline it.

> +}
> +
> +uint16_t
> +esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_outb_trs_sync_crypto_process(ss, mb, num,
> +			esp_outb_sqh_process);
> +}
> +
> +uint16_t
> +esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	return esp_outb_trs_sync_crypto_process(ss, mb, num,
> +			esp_outb_pkt_flag_process);
> +}
> +
>  /*
>   * process outbound packets for SA with ESN support,
>   * for algorithms that require SQN.hibits to be implictly included
> @@ -410,8 +696,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>   * In that case we have to move ICV bytes back to their proper place.
>   */
>  uint16_t
> -esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> -	uint16_t num)
> +esp_outb_sqh_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num)

Any purpose for that change?

>  {
>  	uint32_t i, k, icv_len, *icv;
>  	struct rte_mbuf *ml;
> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
> index 23d394b46..31ffbce2c 100644
> --- a/lib/librte_ipsec/sa.c
> +++ b/lib/librte_ipsec/sa.c
> @@ -544,9 +544,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
>   * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
>   * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
>   */
> -static uint16_t
> -pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> -	uint16_t num)
> +uint16_t
> +esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)


Why to rename this function?
As comment above it states, the function is used for both inbound and outbound
code path. 
Such renaming seems misleading to me.

>  {
>  	uint32_t i, k;
>  	uint32_t dr[num];
> @@ -599,12 +599,48 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
>  		pf->prepare = esp_outb_tun_prepare;
>  		pf->process = (sa->sqh_len != 0) ?
> -			esp_outb_sqh_process : pkt_flag_process;
> +			esp_outb_sqh_process : esp_outb_pkt_flag_process;
>  		break;
>  	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
>  		pf->prepare = esp_outb_trs_prepare;
>  		pf->process = (sa->sqh_len != 0) ?
> -			esp_outb_sqh_process : pkt_flag_process;
> +			esp_outb_sqh_process : esp_outb_pkt_flag_process;
> +		break;
> +	default:
> +		rc = -ENOTSUP;
> +	}
> +
> +	return rc;
> +}
> +
> +static int
> +lksd_sync_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
> +		struct rte_ipsec_sa_pkt_func *pf)

As a nit: probably no point to have lksd_prefix for _sync_ functions.

> +{
> +	int32_t rc;
> +
> +	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
> +			RTE_IPSEC_SATP_MODE_MASK;
> +
> +	rc = 0;
> +	switch (sa->type & msk) {
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
> +		pf->process = esp_inb_tun_sync_crypto_pkt_process;
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
> +		pf->process = esp_inb_trs_sync_crypto_pkt_process;
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
> +		pf->process = (sa->sqh_len != 0) ?
> +			esp_outb_tun_sync_crpyto_sqh_process :
> +			esp_outb_tun_sync_crpyto_flag_process;
> +		break;
> +	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
> +		pf->process = (sa->sqh_len != 0) ?
> +			esp_outb_trs_sync_crpyto_sqh_process :
> +			esp_outb_trs_sync_crpyto_flag_process;
>  		break;
>  	default:
>  		rc = -ENOTSUP;
> @@ -672,13 +708,16 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
>  	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
>  		if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
>  				RTE_IPSEC_SATP_DIR_IB)
> -			pf->process = pkt_flag_process;
> +			pf->process = esp_outb_pkt_flag_process;
>  		else
>  			pf->process = inline_proto_outb_pkt_process;
>  		break;
>  	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
>  		pf->prepare = lksd_proto_prepare;
> -		pf->process = pkt_flag_process;
> +		pf->process = esp_outb_pkt_flag_process;
> +		break;
> +	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
> +		rc = lksd_sync_crypto_pkt_func_select(sa, pf);
>  		break;
>  	default:
>  		rc = -ENOTSUP;
> diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
> index 51e69ad05..02c7abc60 100644
> --- a/lib/librte_ipsec/sa.h
> +++ b/lib/librte_ipsec/sa.h
> @@ -156,6 +156,14 @@ uint16_t
>  inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
> +uint16_t
> +esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
> +esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
>  /* outbound processing */
> 
>  uint16_t
> @@ -170,6 +178,10 @@ uint16_t
>  esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
>  	uint16_t num);
> 
> +uint16_t
> +esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
> +	struct rte_mbuf *mb[], uint16_t num);
> +
>  uint16_t
>  inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> @@ -182,4 +194,21 @@ uint16_t
>  inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
>  	struct rte_mbuf *mb[], uint16_t num);
> 
> +uint16_t
> +esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
> +esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
> +esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
> +uint16_t
> +esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num);
> +
> +
>  #endif /* _SA_H_ */
  
Ananyev, Konstantin Sept. 27, 2019, 10:38 a.m. UTC | #2
Hi Fan,

> 
> This patch updates the ipsec library to handle the newly introduced
> RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO action.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---
>  lib/librte_ipsec/esp_inb.c  | 174 +++++++++++++++++++++++++-
>  lib/librte_ipsec/esp_outb.c | 290 +++++++++++++++++++++++++++++++++++++++++++-
>  lib/librte_ipsec/sa.c       |  53 ++++++--
>  lib/librte_ipsec/sa.h       |  29 +++++
>  lib/librte_ipsec/ses.c      |   4 +-
>  5 files changed, 539 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
> index 8e3ecbc64..6077dcb1e 100644
> --- a/lib/librte_ipsec/esp_inb.c
> +++ b/lib/librte_ipsec/esp_inb.c
> @@ -105,6 +105,73 @@ inb_cop_prepare(struct rte_crypto_op *cop,
>  	}
>  }
> 
> +static inline int
> +inb_sync_crypto_proc_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
> +	const union sym_op_data *icv, uint32_t pofs, uint32_t plen,
> +	struct rte_security_vec *buf, struct iovec *cur_vec,
> +	void *iv, void **aad, void **digest)
> +{
> +	struct rte_mbuf *ms;
> +	struct iovec *vec = cur_vec;
> +	struct aead_gcm_iv *gcm;
> +	struct aesctr_cnt_blk *ctr;
> +	uint64_t *ivp;
> +	uint32_t algo, left, off = 0, n_seg = 0;

Same thing as for outbound pls keep definitions and assignments separated.

> +
> +	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
> +		pofs + sizeof(struct rte_esp_hdr));
> +	algo = sa->algo_type;
> +
> +	switch (algo) {
> +	case ALGO_TYPE_AES_GCM:
> +		gcm = (struct aead_gcm_iv *)iv;
> +		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
> +		*aad = icv->va + sa->icv_len;
> +		off = sa->ctp.cipher.offset + pofs;
> +		break;
> +	case ALGO_TYPE_AES_CBC:
> +	case ALGO_TYPE_3DES_CBC:
> +		off = sa->ctp.auth.offset + pofs;
> +		break;
> +	case ALGO_TYPE_AES_CTR:
> +		off = sa->ctp.auth.offset + pofs;
> +		ctr = (struct aesctr_cnt_blk *)iv;
> +		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
> +		break;
> +	case ALGO_TYPE_NULL:
> +		break;
> +	}
> +
> +	*digest = icv->va;
> +
> +	left = plen - sa->ctp.cipher.length;
> +
> +	ms = mbuf_get_seg_ofs(mb, &off);
> +	if (!ms)
> +		return -1;

Same as for outbound: I think no need to check/return failure.
This function could be split into two.

> +
> +	while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {


Same thing - we shouldn't limt ourselves to 5 segs per packet.
Pretty much same comments about code restructuring as for outbound case.

> +		uint32_t len = RTE_MIN(left, ms->data_len - off);
> +
> +		vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
> +		vec->iov_len = len;
> +
> +		left -= len;
> +		vec++;
> +		n_seg++;
> +		ms = ms->next;
> +		off = 0;
> +	}
> +
> +	if (left)
> +		return -1;
> +
> +	buf->vec = cur_vec;
> +	buf->num = n_seg;
> +
> +	return n_seg;
> +}
> +
>  /*
>   * Helper function for prepare() to deal with situation when
>   * ICV is spread by two segments. Tries to move ICV completely into the
> @@ -512,7 +579,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>  	return k;
>  }
> 
> -
>  /*
>   * *process* function for tunnel packets
>   */
> @@ -625,6 +691,112 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
>  	return n;
>  }
> 
> +/*
> + * process packets using sync crypto engine
> + */
> +static uint16_t
> +esp_inb_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num, uint8_t sqh_len,
> +		esp_inb_process_t process)
> +{
> +	int32_t rc;
> +	uint32_t i, k, hl, n, p;
> +	struct rte_ipsec_sa *sa;
> +	struct replay_sqn *rsn;
> +	union sym_op_data icv;
> +	uint32_t sqn[num];
> +	uint32_t dr[num];
> +	struct rte_security_vec buf[num];
> +	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
> +	uint32_t vec_idx = 0;
> +	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
> +	void *iv[num];
> +	void *aad[num];
> +	void *digest[num];
> +	int status[num];
> +
> +	sa = ss->sa;
> +	rsn = rsn_acquire(sa);
> +
> +	k = 0;
> +	for (i = 0; i != num; i++) {
> +		hl = mb[i]->l2_len + mb[i]->l3_len;
> +		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
> +		if (rc >= 0) {
> +			iv[k] = (void *)ivs[k];
> +			rc = inb_sync_crypto_proc_prepare(sa, mb[i], &icv, hl,
> +					rc, &buf[k], &vec[vec_idx], iv[k],
> +					&aad[k], &digest[k]);
> +			if (rc < 0) {
> +				dr[i - k] = i;
> +				continue;
> +			}
> +
> +			vec_idx += rc;
> +			k++;
> +		} else
> +			dr[i - k] = i;
> +	}
> +
> +	/* copy not prepared mbufs beyond good ones */
> +	if (k != num) {
> +		rte_errno = EBADMSG;
> +
> +		if (unlikely(k == 0))
> +			return 0;
> +
> +		move_bad_mbufs(mb, dr, num, num - k);
> +	}
> +
> +	/* process the packets */
> +	n = 0;
> +	rte_security_process_cpu_crypto_bulk(ss->security.ctx,
> +			ss->security.ses, buf, iv, aad, digest, status,
> +			k);
> +	/* move failed process packets to dr */
> +	for (i = 0; i < k; i++) {
> +		if (status[i]) {
> +			dr[n++] = i;
> +			rte_errno = EBADMSG;
> +		}
> +	}
> +
> +	/* move bad packets to the back */
> +	if (n)
> +		move_bad_mbufs(mb, dr, k, n);

I don't think you need to set dr[] here and call that function, see below.

> +
> +	/* process packets */
> +	p = process(sa, mb, sqn, dr, k - n, sqh_len);

tun_process(), etc. expects PKT_RX_SEC_OFFLOAD_FAILED to be set in mb->ol_flags
for failed packets.
So you either need to set this value in ol_flags based on status,
or tweak existing process functions, or introduce new ones.


> +
> +	if (p != k - n && p != 0)
> +		move_bad_mbufs(mb, dr, k - n, k - n - p);
> +
> +	if (p != num)
> +		rte_errno = EBADMSG;
> +
> +	return p;
> +}
> +
> +uint16_t
> +esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	struct rte_ipsec_sa *sa = ss->sa;
> +
> +	return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
> +			tun_process);
> +}
> +
> +uint16_t
> +esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
> +		struct rte_mbuf *mb[], uint16_t num)
> +{
> +	struct rte_ipsec_sa *sa = ss->sa;
> +
> +	return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
> +			trs_process);
> +}
> +
>  /*
>   * process group of ESP inbound tunnel packets.
>   */
  

Patch

diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 8e3ecbc64..6077dcb1e 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -105,6 +105,73 @@  inb_cop_prepare(struct rte_crypto_op *cop,
 	}
 }
 
+static inline int
+inb_sync_crypto_proc_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+	const union sym_op_data *icv, uint32_t pofs, uint32_t plen,
+	struct rte_security_vec *buf, struct iovec *cur_vec,
+	void *iv, void **aad, void **digest)
+{
+	struct rte_mbuf *ms;
+	struct iovec *vec = cur_vec;
+	struct aead_gcm_iv *gcm;
+	struct aesctr_cnt_blk *ctr;
+	uint64_t *ivp;
+	uint32_t algo, left, off = 0, n_seg = 0;
+
+	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+		pofs + sizeof(struct rte_esp_hdr));
+	algo = sa->algo_type;
+
+	switch (algo) {
+	case ALGO_TYPE_AES_GCM:
+		gcm = (struct aead_gcm_iv *)iv;
+		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+		*aad = icv->va + sa->icv_len;
+		off = sa->ctp.cipher.offset + pofs;
+		break;
+	case ALGO_TYPE_AES_CBC:
+	case ALGO_TYPE_3DES_CBC:
+		off = sa->ctp.auth.offset + pofs;
+		break;
+	case ALGO_TYPE_AES_CTR:
+		off = sa->ctp.auth.offset + pofs;
+		ctr = (struct aesctr_cnt_blk *)iv;
+		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+		break;
+	case ALGO_TYPE_NULL:
+		break;
+	}
+
+	*digest = icv->va;
+
+	left = plen - sa->ctp.cipher.length;
+
+	ms = mbuf_get_seg_ofs(mb, &off);
+	if (!ms)
+		return -1;
+
+	while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+		uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+		vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+		vec->iov_len = len;
+
+		left -= len;
+		vec++;
+		n_seg++;
+		ms = ms->next;
+		off = 0;
+	}
+
+	if (left)
+		return -1;
+
+	buf->vec = cur_vec;
+	buf->num = n_seg;
+
+	return n_seg;
+}
+
 /*
  * Helper function for prepare() to deal with situation when
  * ICV is spread by two segments. Tries to move ICV completely into the
@@ -512,7 +579,6 @@  tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	return k;
 }
 
-
 /*
  * *process* function for tunnel packets
  */
@@ -625,6 +691,112 @@  esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	return n;
 }
 
+/*
+ * process packets using sync crypto engine
+ */
+static uint16_t
+esp_inb_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num, uint8_t sqh_len,
+		esp_inb_process_t process)
+{
+	int32_t rc;
+	uint32_t i, k, hl, n, p;
+	struct rte_ipsec_sa *sa;
+	struct replay_sqn *rsn;
+	union sym_op_data icv;
+	uint32_t sqn[num];
+	uint32_t dr[num];
+	struct rte_security_vec buf[num];
+	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+	uint32_t vec_idx = 0;
+	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+	void *iv[num];
+	void *aad[num];
+	void *digest[num];
+	int status[num];
+
+	sa = ss->sa;
+	rsn = rsn_acquire(sa);
+
+	k = 0;
+	for (i = 0; i != num; i++) {
+		hl = mb[i]->l2_len + mb[i]->l3_len;
+		rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+		if (rc >= 0) {
+			iv[k] = (void *)ivs[k];
+			rc = inb_sync_crypto_proc_prepare(sa, mb[i], &icv, hl,
+					rc, &buf[k], &vec[vec_idx], iv[k],
+					&aad[k], &digest[k]);
+			if (rc < 0) {
+				dr[i - k] = i;
+				continue;
+			}
+
+			vec_idx += rc;
+			k++;
+		} else
+			dr[i - k] = i;
+	}
+
+	/* copy not prepared mbufs beyond good ones */
+	if (k != num) {
+		rte_errno = EBADMSG;
+
+		if (unlikely(k == 0))
+			return 0;
+
+		move_bad_mbufs(mb, dr, num, num - k);
+	}
+
+	/* process the packets */
+	n = 0;
+	rte_security_process_cpu_crypto_bulk(ss->security.ctx,
+			ss->security.ses, buf, iv, aad, digest, status,
+			k);
+	/* move failed process packets to dr */
+	for (i = 0; i < k; i++) {
+		if (status[i]) {
+			dr[n++] = i;
+			rte_errno = EBADMSG;
+		}
+	}
+
+	/* move bad packets to the back */
+	if (n)
+		move_bad_mbufs(mb, dr, k, n);
+
+	/* process packets */
+	p = process(sa, mb, sqn, dr, k - n, sqh_len);
+
+	if (p != k - n && p != 0)
+		move_bad_mbufs(mb, dr, k - n, k - n - p);
+
+	if (p != num)
+		rte_errno = EBADMSG;
+
+	return p;
+}
+
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	struct rte_ipsec_sa *sa = ss->sa;
+
+	return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+			tun_process);
+}
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	struct rte_ipsec_sa *sa = ss->sa;
+
+	return esp_inb_sync_crypto_pkt_process(ss, mb, num, sa->sqh_len,
+			trs_process);
+}
+
 /*
  * process group of ESP inbound tunnel packets.
  */
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index 55799a867..097cb663f 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -403,6 +403,292 @@  esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	return k;
 }
 
+
+static inline int
+outb_sync_crypto_proc_prepare(struct rte_mbuf *m, const struct rte_ipsec_sa *sa,
+		const uint64_t ivp[IPSEC_MAX_IV_QWORD],
+		const union sym_op_data *icv, uint32_t hlen, uint32_t plen,
+		struct rte_security_vec *buf, struct iovec *cur_vec, void *iv,
+		void **aad, void **digest)
+{
+	struct rte_mbuf *ms;
+	struct aead_gcm_iv *gcm;
+	struct aesctr_cnt_blk *ctr;
+	struct iovec *vec = cur_vec;
+	uint32_t left, off = 0, n_seg = 0;
+	uint32_t algo;
+
+	algo = sa->algo_type;
+
+	switch (algo) {
+	case ALGO_TYPE_AES_GCM:
+		gcm = iv;
+		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+		*aad = (void *)(icv->va + sa->icv_len);
+		off = sa->ctp.cipher.offset + hlen;
+		break;
+	case ALGO_TYPE_AES_CBC:
+	case ALGO_TYPE_3DES_CBC:
+		off = sa->ctp.auth.offset + hlen;
+		break;
+	case ALGO_TYPE_AES_CTR:
+		ctr = iv;
+		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+		break;
+	case ALGO_TYPE_NULL:
+		break;
+	}
+
+	*digest = (void *)icv->va;
+
+	left = sa->ctp.cipher.length + plen;
+
+	ms = mbuf_get_seg_ofs(m, &off);
+	if (!ms)
+		return -1;
+
+	while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+		uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+		vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+		vec->iov_len = len;
+
+		left -= len;
+		vec++;
+		n_seg++;
+		ms = ms->next;
+		off = 0;
+	}
+
+	if (left)
+		return -1;
+
+	buf->vec = cur_vec;
+	buf->num = n_seg;
+
+	return n_seg;
+}
+
+/**
+ * Local post process function prototype that same as process function prototype
+ * as rte_ipsec_sa_pkt_func's process().
+ */
+typedef uint16_t (*sync_crypto_post_process)(const struct rte_ipsec_session *ss,
+				struct rte_mbuf *mb[],
+				uint16_t num);
+static uint16_t
+esp_outb_tun_sync_crypto_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num,
+		sync_crypto_post_process post_process)
+{
+	uint64_t sqn;
+	rte_be64_t sqc;
+	struct rte_ipsec_sa *sa;
+	struct rte_security_ctx *ctx;
+	struct rte_security_session *rss;
+	union sym_op_data icv;
+	struct rte_security_vec buf[num];
+	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+	uint32_t vec_idx = 0;
+	void *aad[num];
+	void *digest[num];
+	void *iv[num];
+	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+	uint64_t ivp[IPSEC_MAX_IV_QWORD];
+	int status[num];
+	uint32_t dr[num];
+	uint32_t i, n, k;
+	int32_t rc;
+
+	sa = ss->sa;
+	ctx = ss->security.ctx;
+	rss = ss->security.ses;
+
+	k = 0;
+	n = num;
+	sqn = esn_outb_update_sqn(sa, &n);
+	if (n != num)
+		rte_errno = EOVERFLOW;
+
+	for (i = 0; i != n; i++) {
+		sqc = rte_cpu_to_be_64(sqn + i);
+		gen_iv(ivp, sqc);
+
+		/* try to update the packet itself */
+		rc = outb_tun_pkt_prepare(sa, sqc, ivp, mb[i], &icv,
+				sa->sqh_len);
+
+		/* success, setup crypto op */
+		if (rc >= 0) {
+			outb_pkt_xprepare(sa, sqc, &icv);
+
+			iv[k] = (void *)ivs[k];
+			rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+					0, rc, &buf[k], &vec[vec_idx], iv[k],
+					&aad[k], &digest[k]);
+			if (rc < 0) {
+				dr[i - k] = i;
+				rte_errno = -rc;
+				continue;
+			}
+
+			vec_idx += rc;
+			k++;
+		/* failure, put packet into the death-row */
+		} else {
+			dr[i - k] = i;
+			rte_errno = -rc;
+		}
+	}
+
+	 /* copy not prepared mbufs beyond good ones */
+	if (k != n && k != 0)
+		move_bad_mbufs(mb, dr, n, n - k);
+
+	if (unlikely(k == 0)) {
+		rte_errno = EBADMSG;
+		return 0;
+	}
+
+	/* process the packets */
+	n = 0;
+	rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
+			status, k);
+	/* move failed process packets to dr */
+	for (i = 0; i < n; i++) {
+		if (status[i])
+			dr[n++] = i;
+	}
+
+	if (n)
+		move_bad_mbufs(mb, dr, k, n);
+
+	return post_process(ss, mb, k - n);
+}
+
+static uint16_t
+esp_outb_trs_sync_crypto_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num,
+		sync_crypto_post_process post_process)
+
+{
+	uint64_t sqn;
+	rte_be64_t sqc;
+	struct rte_ipsec_sa *sa;
+	struct rte_security_ctx *ctx;
+	struct rte_security_session *rss;
+	union sym_op_data icv;
+	struct rte_security_vec buf[num];
+	struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+	uint32_t vec_idx = 0;
+	void *aad[num];
+	void *digest[num];
+	uint8_t ivs[num][IPSEC_MAX_IV_SIZE];
+	void *iv[num];
+	int status[num];
+	uint64_t ivp[IPSEC_MAX_IV_QWORD];
+	uint32_t dr[num];
+	uint32_t i, n, k;
+	uint32_t l2, l3;
+	int32_t rc;
+
+	sa = ss->sa;
+	ctx = ss->security.ctx;
+	rss = ss->security.ses;
+
+	k = 0;
+	n = num;
+	sqn = esn_outb_update_sqn(sa, &n);
+	if (n != num)
+		rte_errno = EOVERFLOW;
+
+	for (i = 0; i != n; i++) {
+		l2 = mb[i]->l2_len;
+		l3 = mb[i]->l3_len;
+
+		sqc = rte_cpu_to_be_64(sqn + i);
+		gen_iv(ivp, sqc);
+
+		/* try to update the packet itself */
+		rc = outb_trs_pkt_prepare(sa, sqc, ivp, mb[i], l2, l3, &icv,
+				sa->sqh_len);
+
+		/* success, setup crypto op */
+		if (rc >= 0) {
+			outb_pkt_xprepare(sa, sqc, &icv);
+
+			iv[k] = (void *)ivs[k];
+
+			rc = outb_sync_crypto_proc_prepare(mb[i], sa, ivp, &icv,
+					l2 + l3, rc, &buf[k], &vec[vec_idx],
+					iv[k], &aad[k], &digest[k]);
+			if (rc < 0) {
+				dr[i - k] = i;
+				rte_errno = -rc;
+				continue;
+			}
+
+			vec_idx += rc;
+			k++;
+		/* failure, put packet into the death-row */
+		} else {
+			dr[i - k] = i;
+			rte_errno = -rc;
+		}
+	}
+
+	 /* copy not prepared mbufs beyond good ones */
+	if (k != n && k != 0)
+		move_bad_mbufs(mb, dr, n, n - k);
+
+	/* process the packets */
+	n = 0;
+	rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad, digest,
+			status, k);
+	/* move failed process packets to dr */
+	for (i = 0; i < k; i++) {
+		if (status[i])
+			dr[n++] = i;
+	}
+
+	if (n)
+		move_bad_mbufs(mb, dr, k, n);
+
+	return post_process(ss, mb, k - n);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_outb_tun_sync_crypto_process(ss, mb, num,
+			esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_outb_tun_sync_crypto_process(ss, mb, num,
+			esp_outb_pkt_flag_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_outb_trs_sync_crypto_process(ss, mb, num,
+			esp_outb_sqh_process);
+}
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return esp_outb_trs_sync_crypto_process(ss, mb, num,
+			esp_outb_pkt_flag_process);
+}
+
 /*
  * process outbound packets for SA with ESN support,
  * for algorithms that require SQN.hibits to be implictly included
@@ -410,8 +696,8 @@  esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
  * In that case we have to move ICV bytes back to their proper place.
  */
 uint16_t
-esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
-	uint16_t num)
+esp_outb_sqh_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num)
 {
 	uint32_t i, k, icv_len, *icv;
 	struct rte_mbuf *ml;
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
index 23d394b46..31ffbce2c 100644
--- a/lib/librte_ipsec/sa.c
+++ b/lib/librte_ipsec/sa.c
@@ -544,9 +544,9 @@  lksd_proto_prepare(const struct rte_ipsec_session *ss,
  * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
  * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
  */
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
-	uint16_t num)
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
 {
 	uint32_t i, k;
 	uint32_t dr[num];
@@ -599,12 +599,48 @@  lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
 		pf->prepare = esp_outb_tun_prepare;
 		pf->process = (sa->sqh_len != 0) ?
-			esp_outb_sqh_process : pkt_flag_process;
+			esp_outb_sqh_process : esp_outb_pkt_flag_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
 		pf->prepare = esp_outb_trs_prepare;
 		pf->process = (sa->sqh_len != 0) ?
-			esp_outb_sqh_process : pkt_flag_process;
+			esp_outb_sqh_process : esp_outb_pkt_flag_process;
+		break;
+	default:
+		rc = -ENOTSUP;
+	}
+
+	return rc;
+}
+
+static int
+lksd_sync_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+		struct rte_ipsec_sa_pkt_func *pf)
+{
+	int32_t rc;
+
+	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+			RTE_IPSEC_SATP_MODE_MASK;
+
+	rc = 0;
+	switch (sa->type & msk) {
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		pf->process = esp_inb_tun_sync_crypto_pkt_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+		pf->process = esp_inb_trs_sync_crypto_pkt_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		pf->process = (sa->sqh_len != 0) ?
+			esp_outb_tun_sync_crpyto_sqh_process :
+			esp_outb_tun_sync_crpyto_flag_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+		pf->process = (sa->sqh_len != 0) ?
+			esp_outb_trs_sync_crpyto_sqh_process :
+			esp_outb_trs_sync_crpyto_flag_process;
 		break;
 	default:
 		rc = -ENOTSUP;
@@ -672,13 +708,16 @@  ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
 	case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 		if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
 				RTE_IPSEC_SATP_DIR_IB)
-			pf->process = pkt_flag_process;
+			pf->process = esp_outb_pkt_flag_process;
 		else
 			pf->process = inline_proto_outb_pkt_process;
 		break;
 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
 		pf->prepare = lksd_proto_prepare;
-		pf->process = pkt_flag_process;
+		pf->process = esp_outb_pkt_flag_process;
+		break;
+	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+		rc = lksd_sync_crypto_pkt_func_select(sa, pf);
 		break;
 	default:
 		rc = -ENOTSUP;
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
index 51e69ad05..02c7abc60 100644
--- a/lib/librte_ipsec/sa.h
+++ b/lib/librte_ipsec/sa.h
@@ -156,6 +156,14 @@  uint16_t
 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+esp_inb_tun_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_sync_crypto_pkt_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
 /* outbound processing */
 
 uint16_t
@@ -170,6 +178,10 @@  uint16_t
 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	uint16_t num);
 
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num);
+
 uint16_t
 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
@@ -182,4 +194,21 @@  uint16_t
 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+esp_outb_tun_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_tun_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_sqh_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_sync_crpyto_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
+
 #endif /* _SA_H_ */
diff --git a/lib/librte_ipsec/ses.c b/lib/librte_ipsec/ses.c
index 82c765a33..eaa8c17b7 100644
--- a/lib/librte_ipsec/ses.c
+++ b/lib/librte_ipsec/ses.c
@@ -19,7 +19,9 @@  session_check(struct rte_ipsec_session *ss)
 			return -EINVAL;
 		if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
 				ss->type ==
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+				ss->type ==
+				RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) &&
 				ss->security.ctx == NULL)
 			return -EINVAL;
 	}