get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/51416/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 51416,
    "url": "http://patches.dpdk.org/api/patches/51416/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1553102679-23576-6-git-send-email-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1553102679-23576-6-git-send-email-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1553102679-23576-6-git-send-email-konstantin.ananyev@intel.com",
    "date": "2019-03-20T17:24:37",
    "name": "[v2,5/7] ipsec: move inbound and outbound code into different files",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "aa330ede46f599a7bd31683c255268a91c4170d0",
    "submitter": {
        "id": 33,
        "url": "http://patches.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1553102679-23576-6-git-send-email-konstantin.ananyev@intel.com/mbox/",
    "series": [
        {
            "id": 3831,
            "url": "http://patches.dpdk.org/api/series/3831/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=3831",
            "date": "2019-03-20T17:24:37",
            "name": null,
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/3831/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/51416/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/51416/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 144A31B274;\n\tWed, 20 Mar 2019 18:25:07 +0100 (CET)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n\tby dpdk.org (Postfix) with ESMTP id 07EAD1B1F9\n\tfor <dev@dpdk.org>; Wed, 20 Mar 2019 18:24:57 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t20 Mar 2019 10:24:55 -0700",
            "from sivswdev08.ir.intel.com (HELO localhost.localdomain)\n\t([10.237.217.47])\n\tby orsmga002.jf.intel.com with ESMTP; 20 Mar 2019 10:24:53 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.60,249,1549958400\"; d=\"scan'208\";a=\"143689994\"",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "akhil.goyal@nxp.com, olivier.matz@6wind.com,\n\tKonstantin Ananyev <konstantin.ananyev@intel.com>",
        "Date": "Wed, 20 Mar 2019 17:24:37 +0000",
        "Message-Id": "<1553102679-23576-6-git-send-email-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1551381661-21078-1-git-send-email-konstantin.ananyev@intel.com>",
        "References": "<1551381661-21078-1-git-send-email-konstantin.ananyev@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 5/7] ipsec: move inbound and outbound code\n\tinto different files",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "sa.c becomes too big, so decided to split it into 3 chunks:\n - sa.c - control path related functions (init/fini, etc.)\n - esp_inb.c - ESP inbound packet processing\n - esp_outb.c - ESP outbound packet processing\n\nPlus few changes in internal function names to follow the same\ncode convention.\nNo functional changes introduced.\n\nSigned-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n lib/librte_ipsec/Makefile    |    2 +\n lib/librte_ipsec/crypto.h    |   17 +\n lib/librte_ipsec/esp_inb.c   |  408 ++++++++++++++\n lib/librte_ipsec/esp_outb.c  |  535 ++++++++++++++++++\n lib/librte_ipsec/ipsec_sqn.h |   30 -\n lib/librte_ipsec/meson.build |    2 +-\n lib/librte_ipsec/misc.h      |   41 ++\n lib/librte_ipsec/sa.c        | 1011 ++--------------------------------\n lib/librte_ipsec/sa.h        |   40 ++\n 9 files changed, 1086 insertions(+), 1000 deletions(-)\n create mode 100644 lib/librte_ipsec/esp_inb.c\n create mode 100644 lib/librte_ipsec/esp_outb.c\n create mode 100644 lib/librte_ipsec/misc.h",
    "diff": "diff --git a/lib/librte_ipsec/Makefile b/lib/librte_ipsec/Makefile\nindex 77506d6ad..e80926baa 100644\n--- a/lib/librte_ipsec/Makefile\n+++ b/lib/librte_ipsec/Makefile\n@@ -16,6 +16,8 @@ EXPORT_MAP := rte_ipsec_version.map\n LIBABIVER := 1\n \n # all source are stored in SRCS-y\n+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_inb.c\n+SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += esp_outb.c\n SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += sa.c\n SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += ses.c\n \ndiff --git a/lib/librte_ipsec/crypto.h b/lib/librte_ipsec/crypto.h\nindex b5f264831..5a7b11f56 100644\n--- a/lib/librte_ipsec/crypto.h\n+++ b/lib/librte_ipsec/crypto.h\n@@ -145,4 +145,21 @@ remove_sqh(void *picv, uint32_t icv_len)\n \t\ticv[i] = icv[i + 1];\n }\n \n+/*\n+ * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.\n+ */\n+static inline void\n+lksd_none_cop_prepare(struct rte_crypto_op *cop,\n+\tstruct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)\n+{\n+\tstruct rte_crypto_sym_op *sop;\n+\n+\tsop = cop->sym;\n+\tcop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;\n+\tcop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;\n+\tcop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;\n+\tsop->m_src = mb;\n+\t__rte_crypto_sym_op_attach_sym_session(sop, cs);\n+}\n+\n #endif /* _CRYPTO_H_ */\ndiff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c\nnew file mode 100644\nindex 000000000..562185ebe\n--- /dev/null\n+++ b/lib/librte_ipsec/esp_inb.c\n@@ -0,0 +1,408 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#include <rte_ipsec.h>\n+#include <rte_esp.h>\n+#include <rte_ip.h>\n+#include <rte_errno.h>\n+#include <rte_cryptodev.h>\n+\n+#include \"sa.h\"\n+#include \"ipsec_sqn.h\"\n+#include \"crypto.h\"\n+#include \"iph.h\"\n+#include \"misc.h\"\n+#include \"pad.h\"\n+\n+/*\n+ * setup crypto op and crypto sym op for ESP inbound tunnel packet.\n+ */\n+static inline int32_t\n+inb_cop_prepare(struct rte_crypto_op *cop,\n+\tconst struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n+\tconst union sym_op_data *icv, uint32_t pofs, uint32_t plen)\n+{\n+\tstruct rte_crypto_sym_op *sop;\n+\tstruct aead_gcm_iv *gcm;\n+\tuint64_t *ivc, *ivp;\n+\tuint32_t clen;\n+\n+\tclen = plen - sa->ctp.cipher.length;\n+\tif ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* fill sym op fields */\n+\tsop = cop->sym;\n+\n+\t/* AEAD (AES_GCM) case */\n+\tif (sa->aad_len != 0) {\n+\t\tsop->aead.data.offset = pofs + sa->ctp.cipher.offset;\n+\t\tsop->aead.data.length = clen;\n+\t\tsop->aead.digest.data = icv->va;\n+\t\tsop->aead.digest.phys_addr = icv->pa;\n+\t\tsop->aead.aad.data = icv->va + sa->icv_len;\n+\t\tsop->aead.aad.phys_addr = icv->pa + sa->icv_len;\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\tivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,\n+\t\t\tpofs + sizeof(struct esp_hdr));\n+\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n+\t/* CRYPT+AUTH case */\n+\t} else {\n+\t\tsop->cipher.data.offset = pofs + sa->ctp.cipher.offset;\n+\t\tsop->cipher.data.length = clen;\n+\t\tsop->auth.data.offset = pofs + sa->ctp.auth.offset;\n+\t\tsop->auth.data.length = plen - sa->ctp.auth.length;\n+\t\tsop->auth.digest.data = icv->va;\n+\t\tsop->auth.digest.phys_addr = icv->pa;\n+\n+\t\t/* copy iv from the input packet to the cop */\n+\t\tivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);\n+\t\tivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,\n+\t\t\tpofs + sizeof(struct esp_hdr));\n+\t\tcopy_iv(ivc, ivp, sa->iv_len);\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * for pure cryptodev (lookaside none) depending on SA settings,\n+ * we might have to write some extra data to the packet.\n+ */\n+static inline void\n+inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n+\tconst union sym_op_data *icv)\n+{\n+\tstruct aead_gcm_aad *aad;\n+\n+\t/* insert SQN.hi between ESP trailer and ICV */\n+\tif (sa->sqh_len != 0)\n+\t\tinsert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);\n+\n+\t/*\n+\t * fill AAD fields, if any (aad fields are placed after icv),\n+\t * right now we support only one AEAD algorithm: AES-GCM.\n+\t */\n+\tif (sa->aad_len != 0) {\n+\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n+\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n+\t}\n+}\n+\n+/*\n+ * setup/update packet data and metadata for ESP inbound tunnel case.\n+ */\n+static inline int32_t\n+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,\n+\tstruct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)\n+{\n+\tint32_t rc;\n+\tuint64_t sqn;\n+\tuint32_t icv_ofs, plen;\n+\tstruct rte_mbuf *ml;\n+\tstruct esp_hdr *esph;\n+\n+\tesph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);\n+\n+\t/*\n+\t * retrieve and reconstruct SQN, then check it, then\n+\t * convert it back into network byte order.\n+\t */\n+\tsqn = rte_be_to_cpu_32(esph->seq);\n+\tif (IS_ESN(sa))\n+\t\tsqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);\n+\n+\trc = esn_inb_check_sqn(rsn, sa, sqn);\n+\tif (rc != 0)\n+\t\treturn rc;\n+\n+\tsqn = rte_cpu_to_be_64(sqn);\n+\n+\t/* start packet manipulation */\n+\tplen = mb->pkt_len;\n+\tplen = plen - hlen;\n+\n+\tml = rte_pktmbuf_lastseg(mb);\n+\ticv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;\n+\n+\t/* we have to allocate space for AAD somewhere,\n+\t * right now - just use free trailing space at the last segment.\n+\t * Would probably be more convenient to reserve space for AAD\n+\t * inside rte_crypto_op itself\n+\t * (again for IV space is already reserved inside cop).\n+\t */\n+\tif (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))\n+\t\treturn -ENOSPC;\n+\n+\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);\n+\ticv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);\n+\n+\tinb_pkt_xprepare(sa, sqn, icv);\n+\treturn plen;\n+}\n+\n+/*\n+ * setup/update packets and crypto ops for ESP inbound case.\n+ */\n+uint16_t\n+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num)\n+{\n+\tint32_t rc;\n+\tuint32_t i, k, hl;\n+\tstruct rte_ipsec_sa *sa;\n+\tstruct rte_cryptodev_sym_session *cs;\n+\tstruct replay_sqn *rsn;\n+\tunion sym_op_data icv;\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\tcs = ss->crypto.ses;\n+\trsn = rsn_acquire(sa);\n+\n+\tk = 0;\n+\tfor (i = 0; i != num; i++) {\n+\n+\t\thl = mb[i]->l2_len + mb[i]->l3_len;\n+\t\trc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);\n+\t\tif (rc >= 0) {\n+\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n+\t\t\trc = inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);\n+\t\t}\n+\n+\t\tk += (rc == 0);\n+\t\tif (rc != 0) {\n+\t\t\tdr[i - k] = i;\n+\t\t\trte_errno = -rc;\n+\t\t}\n+\t}\n+\n+\trsn_release(sa, rsn);\n+\n+\t/* copy not prepared mbufs beyond good ones */\n+\tif (k != num && k != 0)\n+\t\tmbuf_bad_move(mb, dr, num, num - k);\n+\n+\treturn k;\n+}\n+\n+/*\n+ * process ESP inbound tunnel packet.\n+ */\n+static inline int\n+inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n+\tuint32_t *sqn)\n+{\n+\tuint32_t hlen, icv_len, tlen;\n+\tstruct esp_hdr *esph;\n+\tstruct esp_tail *espt;\n+\tstruct rte_mbuf *ml;\n+\tchar *pd;\n+\n+\tif (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)\n+\t\treturn -EBADMSG;\n+\n+\ticv_len = sa->icv_len;\n+\n+\tml = rte_pktmbuf_lastseg(mb);\n+\tespt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,\n+\t\tml->data_len - icv_len - sizeof(*espt));\n+\n+\t/*\n+\t * check padding and next proto.\n+\t * return an error if something is wrong.\n+\t */\n+\tpd = (char *)espt - espt->pad_len;\n+\tif (espt->next_proto != sa->proto ||\n+\t\t\tmemcmp(pd, esp_pad_bytes, espt->pad_len))\n+\t\treturn -EINVAL;\n+\n+\t/* cut of ICV, ESP tail and padding bytes */\n+\ttlen = icv_len + sizeof(*espt) + espt->pad_len;\n+\tml->data_len -= tlen;\n+\tmb->pkt_len -= tlen;\n+\n+\t/* cut of L2/L3 headers, ESP header and IV */\n+\thlen = mb->l2_len + mb->l3_len;\n+\tesph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);\n+\trte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);\n+\n+\t/* retrieve SQN for later check */\n+\t*sqn = rte_be_to_cpu_32(esph->seq);\n+\n+\t/* reset mbuf metatdata: L2/L3 len, packet type */\n+\tmb->packet_type = RTE_PTYPE_UNKNOWN;\n+\tmb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |\n+\t\tsa->tx_offload.val;\n+\n+\t/* clear the PKT_RX_SEC_OFFLOAD flag if set */\n+\tmb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);\n+\treturn 0;\n+}\n+\n+/*\n+ * process ESP inbound transport packet.\n+ */\n+static inline int\n+inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n+\tuint32_t *sqn)\n+{\n+\tuint32_t hlen, icv_len, l2len, l3len, tlen;\n+\tstruct esp_hdr *esph;\n+\tstruct esp_tail *espt;\n+\tstruct rte_mbuf *ml;\n+\tchar *np, *op, *pd;\n+\n+\tif (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)\n+\t\treturn -EBADMSG;\n+\n+\ticv_len = sa->icv_len;\n+\n+\tml = rte_pktmbuf_lastseg(mb);\n+\tespt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,\n+\t\tml->data_len - icv_len - sizeof(*espt));\n+\n+\t/* check padding, return an error if something is wrong. */\n+\tpd = (char *)espt - espt->pad_len;\n+\tif (memcmp(pd, esp_pad_bytes, espt->pad_len))\n+\t\treturn -EINVAL;\n+\n+\t/* cut of ICV, ESP tail and padding bytes */\n+\ttlen = icv_len + sizeof(*espt) + espt->pad_len;\n+\tml->data_len -= tlen;\n+\tmb->pkt_len -= tlen;\n+\n+\t/* retrieve SQN for later check */\n+\tl2len = mb->l2_len;\n+\tl3len = mb->l3_len;\n+\thlen = l2len + l3len;\n+\top = rte_pktmbuf_mtod(mb, char *);\n+\tesph = (struct esp_hdr *)(op + hlen);\n+\t*sqn = rte_be_to_cpu_32(esph->seq);\n+\n+\t/* cut off ESP header and IV, update L3 header */\n+\tnp = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);\n+\tremove_esph(np, op, hlen);\n+\tupdate_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,\n+\t\t\tespt->next_proto);\n+\n+\t/* reset mbuf packet type */\n+\tmb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);\n+\n+\t/* clear the PKT_RX_SEC_OFFLOAD flag if set */\n+\tmb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);\n+\treturn 0;\n+}\n+\n+/*\n+ * for group of ESP inbound packets perform SQN check and update.\n+ */\n+static inline uint16_t\n+esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],\n+\tuint32_t dr[], uint16_t num)\n+{\n+\tuint32_t i, k;\n+\tstruct replay_sqn *rsn;\n+\n+\trsn = rsn_update_start(sa);\n+\n+\tk = 0;\n+\tfor (i = 0; i != num; i++) {\n+\t\tif (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)\n+\t\t\tk++;\n+\t\telse\n+\t\t\tdr[i - k] = i;\n+\t}\n+\n+\trsn_update_finish(sa, rsn);\n+\treturn k;\n+}\n+\n+/*\n+ * process group of ESP inbound tunnel packets.\n+ */\n+uint16_t\n+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tuint32_t i, k, n;\n+\tstruct rte_ipsec_sa *sa;\n+\tuint32_t sqn[num];\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\n+\t/* process packets, extract seq numbers */\n+\n+\tk = 0;\n+\tfor (i = 0; i != num; i++) {\n+\t\t/* good packet */\n+\t\tif (inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)\n+\t\t\tk++;\n+\t\t/* bad packet, will drop from furhter processing */\n+\t\telse\n+\t\t\tdr[i - k] = i;\n+\t}\n+\n+\t/* handle unprocessed mbufs */\n+\tif (k != num && k != 0)\n+\t\tmbuf_bad_move(mb, dr, num, num - k);\n+\n+\t/* update SQN and replay winow */\n+\tn = esp_inb_rsn_update(sa, sqn, dr, k);\n+\n+\t/* handle mbufs with wrong SQN */\n+\tif (n != k && n != 0)\n+\t\tmbuf_bad_move(mb, dr, k, k - n);\n+\n+\tif (n != num)\n+\t\trte_errno = EBADMSG;\n+\n+\treturn n;\n+}\n+\n+/*\n+ * process group of ESP inbound transport packets.\n+ */\n+uint16_t\n+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tuint32_t i, k, n;\n+\tuint32_t sqn[num];\n+\tstruct rte_ipsec_sa *sa;\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\n+\t/* process packets, extract seq numbers */\n+\n+\tk = 0;\n+\tfor (i = 0; i != num; i++) {\n+\t\t/* good packet */\n+\t\tif (inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)\n+\t\t\tk++;\n+\t\t/* bad packet, will drop from furhter processing */\n+\t\telse\n+\t\t\tdr[i - k] = i;\n+\t}\n+\n+\t/* handle unprocessed mbufs */\n+\tif (k != num && k != 0)\n+\t\tmbuf_bad_move(mb, dr, num, num - k);\n+\n+\t/* update SQN and replay winow */\n+\tn = esp_inb_rsn_update(sa, sqn, dr, k);\n+\n+\t/* handle mbufs with wrong SQN */\n+\tif (n != k && n != 0)\n+\t\tmbuf_bad_move(mb, dr, k, k - n);\n+\n+\tif (n != num)\n+\t\trte_errno = EBADMSG;\n+\n+\treturn n;\n+}\ndiff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c\nnew file mode 100644\nindex 000000000..dce0fe051\n--- /dev/null\n+++ b/lib/librte_ipsec/esp_outb.c\n@@ -0,0 +1,535 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#include <rte_ipsec.h>\n+#include <rte_esp.h>\n+#include <rte_ip.h>\n+#include <rte_errno.h>\n+#include <rte_cryptodev.h>\n+\n+#include \"sa.h\"\n+#include \"ipsec_sqn.h\"\n+#include \"crypto.h\"\n+#include \"iph.h\"\n+#include \"misc.h\"\n+#include \"pad.h\"\n+\n+/*\n+ * setup crypto op and crypto sym op for ESP outbound packet.\n+ */\n+static inline void\n+outb_cop_prepare(struct rte_crypto_op *cop,\n+\tconst struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],\n+\tconst union sym_op_data *icv, uint32_t hlen, uint32_t plen)\n+{\n+\tstruct rte_crypto_sym_op *sop;\n+\tstruct aead_gcm_iv *gcm;\n+\n+\t/* fill sym op fields */\n+\tsop = cop->sym;\n+\n+\t/* AEAD (AES_GCM) case */\n+\tif (sa->aad_len != 0) {\n+\t\tsop->aead.data.offset = sa->ctp.cipher.offset + hlen;\n+\t\tsop->aead.data.length = sa->ctp.cipher.length + plen;\n+\t\tsop->aead.digest.data = icv->va;\n+\t\tsop->aead.digest.phys_addr = icv->pa;\n+\t\tsop->aead.aad.data = icv->va + sa->icv_len;\n+\t\tsop->aead.aad.phys_addr = icv->pa + sa->icv_len;\n+\n+\t\t/* fill AAD IV (located inside crypto op) */\n+\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n+\t\t\tsa->iv_ofs);\n+\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n+\t/* CRYPT+AUTH case */\n+\t} else {\n+\t\tsop->cipher.data.offset = sa->ctp.cipher.offset + hlen;\n+\t\tsop->cipher.data.length = sa->ctp.cipher.length + plen;\n+\t\tsop->auth.data.offset = sa->ctp.auth.offset + hlen;\n+\t\tsop->auth.data.length = sa->ctp.auth.length + plen;\n+\t\tsop->auth.digest.data = icv->va;\n+\t\tsop->auth.digest.phys_addr = icv->pa;\n+\t}\n+}\n+\n+/*\n+ * setup/update packet data and metadata for ESP outbound tunnel case.\n+ */\n+static inline int32_t\n+outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n+\tconst uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,\n+\tunion sym_op_data *icv)\n+{\n+\tuint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;\n+\tstruct rte_mbuf *ml;\n+\tstruct esp_hdr *esph;\n+\tstruct esp_tail *espt;\n+\tchar *ph, *pt;\n+\tuint64_t *iv;\n+\n+\t/* calculate extra header space required */\n+\thlen = sa->hdr_len + sa->iv_len + sizeof(*esph);\n+\n+\t/* size of ipsec protected data */\n+\tl2len = mb->l2_len;\n+\tplen = mb->pkt_len - l2len;\n+\n+\t/* number of bytes to encrypt */\n+\tclen = plen + sizeof(*espt);\n+\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n+\n+\t/* pad length + esp tail */\n+\tpdlen = clen - plen;\n+\ttlen = pdlen + sa->icv_len;\n+\n+\t/* do append and prepend */\n+\tml = rte_pktmbuf_lastseg(mb);\n+\tif (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))\n+\t\treturn -ENOSPC;\n+\n+\t/* prepend header */\n+\tph = rte_pktmbuf_prepend(mb, hlen - l2len);\n+\tif (ph == NULL)\n+\t\treturn -ENOSPC;\n+\n+\t/* append tail */\n+\tpdofs = ml->data_len;\n+\tml->data_len += tlen;\n+\tmb->pkt_len += tlen;\n+\tpt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);\n+\n+\t/* update pkt l2/l3 len */\n+\tmb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |\n+\t\tsa->tx_offload.val;\n+\n+\t/* copy tunnel pkt header */\n+\trte_memcpy(ph, sa->hdr, sa->hdr_len);\n+\n+\t/* update original and new ip header fields */\n+\tupdate_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,\n+\t\t\tsqn_low16(sqc));\n+\n+\t/* update spi, seqn and iv */\n+\tesph = (struct esp_hdr *)(ph + sa->hdr_len);\n+\tiv = (uint64_t *)(esph + 1);\n+\tcopy_iv(iv, ivp, sa->iv_len);\n+\n+\tesph->spi = sa->spi;\n+\tesph->seq = sqn_low32(sqc);\n+\n+\t/* offset for ICV */\n+\tpdofs += pdlen + sa->sqh_len;\n+\n+\t/* pad length */\n+\tpdlen -= sizeof(*espt);\n+\n+\t/* copy padding data */\n+\trte_memcpy(pt, esp_pad_bytes, pdlen);\n+\n+\t/* update esp trailer */\n+\tespt = (struct esp_tail *)(pt + pdlen);\n+\tespt->pad_len = pdlen;\n+\tespt->next_proto = sa->proto;\n+\n+\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);\n+\ticv->pa = rte_pktmbuf_iova_offset(ml, pdofs);\n+\n+\treturn clen;\n+}\n+\n+/*\n+ * for pure cryptodev (lookaside none) depending on SA settings,\n+ * we might have to write some extra data to the packet.\n+ */\n+static inline void\n+outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n+\tconst union sym_op_data *icv)\n+{\n+\tuint32_t *psqh;\n+\tstruct aead_gcm_aad *aad;\n+\n+\t/* insert SQN.hi between ESP trailer and ICV */\n+\tif (sa->sqh_len != 0) {\n+\t\tpsqh = (uint32_t *)(icv->va - sa->sqh_len);\n+\t\tpsqh[0] = sqn_hi32(sqc);\n+\t}\n+\n+\t/*\n+\t * fill IV and AAD fields, if any (aad fields are placed after icv),\n+\t * right now we support only one AEAD algorithm: AES-GCM .\n+\t */\n+\tif (sa->aad_len != 0) {\n+\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n+\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n+\t}\n+}\n+\n+/*\n+ * setup/update packets and crypto ops for ESP outbound tunnel case.\n+ */\n+uint16_t\n+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num)\n+{\n+\tint32_t rc;\n+\tuint32_t i, k, n;\n+\tuint64_t sqn;\n+\trte_be64_t sqc;\n+\tstruct rte_ipsec_sa *sa;\n+\tstruct rte_cryptodev_sym_session *cs;\n+\tunion sym_op_data icv;\n+\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\tcs = ss->crypto.ses;\n+\n+\tn = num;\n+\tsqn = esn_outb_update_sqn(sa, &n);\n+\tif (n != num)\n+\t\trte_errno = EOVERFLOW;\n+\n+\tk = 0;\n+\tfor (i = 0; i != n; i++) {\n+\n+\t\tsqc = rte_cpu_to_be_64(sqn + i);\n+\t\tgen_iv(iv, sqc);\n+\n+\t\t/* try to update the packet itself */\n+\t\trc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);\n+\n+\t\t/* success, setup crypto op */\n+\t\tif (rc >= 0) {\n+\t\t\toutb_pkt_xprepare(sa, sqc, &icv);\n+\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n+\t\t\toutb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);\n+\t\t\tk++;\n+\t\t/* failure, put packet into the death-row */\n+\t\t} else {\n+\t\t\tdr[i - k] = i;\n+\t\t\trte_errno = -rc;\n+\t\t}\n+\t}\n+\n+\t /* copy not prepared mbufs beyond good ones */\n+\tif (k != n && k != 0)\n+\t\tmbuf_bad_move(mb, dr, n, n - k);\n+\n+\treturn k;\n+}\n+\n+/*\n+ * setup/update packet data and metadata for ESP outbound transport case.\n+ */\n+static inline int32_t\n+outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n+\tconst uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,\n+\tuint32_t l2len, uint32_t l3len, union sym_op_data *icv)\n+{\n+\tuint8_t np;\n+\tuint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;\n+\tstruct rte_mbuf *ml;\n+\tstruct esp_hdr *esph;\n+\tstruct esp_tail *espt;\n+\tchar *ph, *pt;\n+\tuint64_t *iv;\n+\n+\tuhlen = l2len + l3len;\n+\tplen = mb->pkt_len - uhlen;\n+\n+\t/* calculate extra header space required */\n+\thlen = sa->iv_len + sizeof(*esph);\n+\n+\t/* number of bytes to encrypt */\n+\tclen = plen + sizeof(*espt);\n+\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n+\n+\t/* pad length + esp tail */\n+\tpdlen = clen - plen;\n+\ttlen = pdlen + sa->icv_len;\n+\n+\t/* do append and insert */\n+\tml = rte_pktmbuf_lastseg(mb);\n+\tif (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))\n+\t\treturn -ENOSPC;\n+\n+\t/* prepend space for ESP header */\n+\tph = rte_pktmbuf_prepend(mb, hlen);\n+\tif (ph == NULL)\n+\t\treturn -ENOSPC;\n+\n+\t/* append tail */\n+\tpdofs = ml->data_len;\n+\tml->data_len += tlen;\n+\tmb->pkt_len += tlen;\n+\tpt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);\n+\n+\t/* shift L2/L3 headers */\n+\tinsert_esph(ph, ph + hlen, uhlen);\n+\n+\t/* update ip  header fields */\n+\tnp = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,\n+\t\t\tIPPROTO_ESP);\n+\n+\t/* update spi, seqn and iv */\n+\tesph = (struct esp_hdr *)(ph + uhlen);\n+\tiv = (uint64_t *)(esph + 1);\n+\tcopy_iv(iv, ivp, sa->iv_len);\n+\n+\tesph->spi = sa->spi;\n+\tesph->seq = sqn_low32(sqc);\n+\n+\t/* offset for ICV */\n+\tpdofs += pdlen + sa->sqh_len;\n+\n+\t/* pad length */\n+\tpdlen -= sizeof(*espt);\n+\n+\t/* copy padding data */\n+\trte_memcpy(pt, esp_pad_bytes, pdlen);\n+\n+\t/* update esp trailer */\n+\tespt = (struct esp_tail *)(pt + pdlen);\n+\tespt->pad_len = pdlen;\n+\tespt->next_proto = np;\n+\n+\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);\n+\ticv->pa = rte_pktmbuf_iova_offset(ml, pdofs);\n+\n+\treturn clen;\n+}\n+\n+/*\n+ * setup/update packets and crypto ops for ESP outbound transport case.\n+ */\n+uint16_t\n+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num)\n+{\n+\tint32_t rc;\n+\tuint32_t i, k, n, l2, l3;\n+\tuint64_t sqn;\n+\trte_be64_t sqc;\n+\tstruct rte_ipsec_sa *sa;\n+\tstruct rte_cryptodev_sym_session *cs;\n+\tunion sym_op_data icv;\n+\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\tcs = ss->crypto.ses;\n+\n+\tn = num;\n+\tsqn = esn_outb_update_sqn(sa, &n);\n+\tif (n != num)\n+\t\trte_errno = EOVERFLOW;\n+\n+\tk = 0;\n+\tfor (i = 0; i != n; i++) {\n+\n+\t\tl2 = mb[i]->l2_len;\n+\t\tl3 = mb[i]->l3_len;\n+\n+\t\tsqc = rte_cpu_to_be_64(sqn + i);\n+\t\tgen_iv(iv, sqc);\n+\n+\t\t/* try to update the packet itself */\n+\t\trc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv);\n+\n+\t\t/* success, setup crypto op */\n+\t\tif (rc >= 0) {\n+\t\t\toutb_pkt_xprepare(sa, sqc, &icv);\n+\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n+\t\t\toutb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);\n+\t\t\tk++;\n+\t\t/* failure, put packet into the death-row */\n+\t\t} else {\n+\t\t\tdr[i - k] = i;\n+\t\t\trte_errno = -rc;\n+\t\t}\n+\t}\n+\n+\t/* copy not prepared mbufs beyond good ones */\n+\tif (k != n && k != 0)\n+\t\tmbuf_bad_move(mb, dr, n, n - k);\n+\n+\treturn k;\n+}\n+\n+/*\n+ * process outbound packets for SA with ESN support,\n+ * for algorithms that require SQN.hibits to be implictly included\n+ * into digest computation.\n+ * In that case we have to move ICV bytes back to their proper place.\n+ */\n+uint16_t\n+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tuint16_t num)\n+{\n+\tuint32_t i, k, icv_len, *icv;\n+\tstruct rte_mbuf *ml;\n+\tstruct rte_ipsec_sa *sa;\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\n+\tk = 0;\n+\ticv_len = sa->icv_len;\n+\n+\tfor (i = 0; i != num; i++) {\n+\t\tif ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {\n+\t\t\tml = rte_pktmbuf_lastseg(mb[i]);\n+\t\t\ticv = rte_pktmbuf_mtod_offset(ml, void *,\n+\t\t\t\tml->data_len - icv_len);\n+\t\t\tremove_sqh(icv, icv_len);\n+\t\t\tk++;\n+\t\t} else\n+\t\t\tdr[i - k] = i;\n+\t}\n+\n+\t/* handle unprocessed mbufs */\n+\tif (k != num) {\n+\t\trte_errno = EBADMSG;\n+\t\tif (k != 0)\n+\t\t\tmbuf_bad_move(mb, dr, num, num - k);\n+\t}\n+\n+\treturn k;\n+}\n+\n+/*\n+ * prepare packets for inline ipsec processing:\n+ * set ol_flags and attach metadata.\n+ */\n+static inline void\n+inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tuint32_t i, ol_flags;\n+\n+\tol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;\n+\tfor (i = 0; i != num; i++) {\n+\n+\t\tmb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;\n+\t\tif (ol_flags != 0)\n+\t\t\trte_security_set_pkt_metadata(ss->security.ctx,\n+\t\t\t\tss->security.ses, mb[i], NULL);\n+\t}\n+}\n+\n+/*\n+ * process group of ESP outbound tunnel packets destined for\n+ * INLINE_CRYPTO type of device.\n+ */\n+uint16_t\n+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tint32_t rc;\n+\tuint32_t i, k, n;\n+\tuint64_t sqn;\n+\trte_be64_t sqc;\n+\tstruct rte_ipsec_sa *sa;\n+\tunion sym_op_data icv;\n+\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\n+\tn = num;\n+\tsqn = esn_outb_update_sqn(sa, &n);\n+\tif (n != num)\n+\t\trte_errno = EOVERFLOW;\n+\n+\tk = 0;\n+\tfor (i = 0; i != n; i++) {\n+\n+\t\tsqc = rte_cpu_to_be_64(sqn + i);\n+\t\tgen_iv(iv, sqc);\n+\n+\t\t/* try to update the packet itself */\n+\t\trc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);\n+\n+\t\tk += (rc >= 0);\n+\n+\t\t/* failure, put packet into the death-row */\n+\t\tif (rc < 0) {\n+\t\t\tdr[i - k] = i;\n+\t\t\trte_errno = -rc;\n+\t\t}\n+\t}\n+\n+\t/* copy not processed mbufs beyond good ones */\n+\tif (k != n && k != 0)\n+\t\tmbuf_bad_move(mb, dr, n, n - k);\n+\n+\tinline_outb_mbuf_prepare(ss, mb, k);\n+\treturn k;\n+}\n+\n+/*\n+ * process group of ESP outbound transport packets destined for\n+ * INLINE_CRYPTO type of device.\n+ */\n+uint16_t\n+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tint32_t rc;\n+\tuint32_t i, k, n, l2, l3;\n+\tuint64_t sqn;\n+\trte_be64_t sqc;\n+\tstruct rte_ipsec_sa *sa;\n+\tunion sym_op_data icv;\n+\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n+\tuint32_t dr[num];\n+\n+\tsa = ss->sa;\n+\n+\tn = num;\n+\tsqn = esn_outb_update_sqn(sa, &n);\n+\tif (n != num)\n+\t\trte_errno = EOVERFLOW;\n+\n+\tk = 0;\n+\tfor (i = 0; i != n; i++) {\n+\n+\t\tl2 = mb[i]->l2_len;\n+\t\tl3 = mb[i]->l3_len;\n+\n+\t\tsqc = rte_cpu_to_be_64(sqn + i);\n+\t\tgen_iv(iv, sqc);\n+\n+\t\t/* try to update the packet itself */\n+\t\trc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],\n+\t\t\t\tl2, l3, &icv);\n+\n+\t\tk += (rc >= 0);\n+\n+\t\t/* failure, put packet into the death-row */\n+\t\tif (rc < 0) {\n+\t\t\tdr[i - k] = i;\n+\t\t\trte_errno = -rc;\n+\t\t}\n+\t}\n+\n+\t/* copy not processed mbufs beyond good ones */\n+\tif (k != n && k != 0)\n+\t\tmbuf_bad_move(mb, dr, n, n - k);\n+\n+\tinline_outb_mbuf_prepare(ss, mb, k);\n+\treturn k;\n+}\n+\n+/*\n+ * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:\n+ * actual processing is done by HW/PMD, just set flags and metadata.\n+ */\n+uint16_t\n+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num)\n+{\n+\tinline_outb_mbuf_prepare(ss, mb, num);\n+\treturn num;\n+}\ndiff --git a/lib/librte_ipsec/ipsec_sqn.h b/lib/librte_ipsec/ipsec_sqn.h\nindex a3ae7e2de..4ba079d75 100644\n--- a/lib/librte_ipsec/ipsec_sqn.h\n+++ b/lib/librte_ipsec/ipsec_sqn.h\n@@ -56,21 +56,6 @@ sqn_low16(rte_be64_t sqn)\n #endif\n }\n \n-/*\n- * for given size, calculate required number of buckets.\n- */\n-static uint32_t\n-replay_num_bucket(uint32_t wsz)\n-{\n-\tuint32_t nb;\n-\n-\tnb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /\n-\t\tWINDOW_BUCKET_SIZE);\n-\tnb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);\n-\n-\treturn nb;\n-}\n-\n /*\n  * According to RFC4303 A2.1, determine the high-order bit of sequence number.\n  * use 32bit arithmetic inside, return uint64_t.\n@@ -222,21 +207,6 @@ esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa,\n  * between writer and readers.\n  */\n \n-/**\n- * Based on number of buckets calculated required size for the\n- * structure that holds replay window and sequence number (RSN) information.\n- */\n-static size_t\n-rsn_size(uint32_t nb_bucket)\n-{\n-\tsize_t sz;\n-\tstruct replay_sqn *rsn;\n-\n-\tsz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);\n-\tsz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);\n-\treturn sz;\n-}\n-\n /**\n  * Copy replay window and SQN.\n  */\ndiff --git a/lib/librte_ipsec/meson.build b/lib/librte_ipsec/meson.build\nindex d2427b809..18fb2a143 100644\n--- a/lib/librte_ipsec/meson.build\n+++ b/lib/librte_ipsec/meson.build\n@@ -3,7 +3,7 @@\n \n allow_experimental_apis = true\n \n-sources=files('sa.c', 'ses.c')\n+sources=files('esp_inb.c', 'esp_outb.c', 'sa.c', 'ses.c')\n \n install_headers = files('rte_ipsec.h', 'rte_ipsec_group.h', 'rte_ipsec_sa.h')\n \ndiff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h\nnew file mode 100644\nindex 000000000..8988b4903\n--- /dev/null\n+++ b/lib/librte_ipsec/misc.h\n@@ -0,0 +1,41 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#ifndef _MISC_H_\n+#define _MISC_H_\n+\n+/**\n+ * @file misc.h\n+ * Contains miscelaneous functions/structures/macros used internally\n+ * by ipsec library.\n+ */\n+\n+/*\n+ * Move bad (unprocessed) mbufs beyond the good (processed) ones.\n+ * dr[] contains the indexes of bad mbufs insinde the mb[].\n+ */\n+static inline void\n+mbuf_bad_move(struct rte_mbuf *mb[], const uint32_t dr[], uint32_t num,\n+\tuint32_t drn)\n+{\n+\tuint32_t i, j, k;\n+\tstruct rte_mbuf *drb[drn];\n+\n+\tj = 0;\n+\tk = 0;\n+\n+\t/* copy bad ones into a temp place */\n+\tfor (i = 0; i != num; i++) {\n+\t\tif (j != drn && i == dr[j])\n+\t\t\tdrb[j++] = mb[i];\n+\t\telse\n+\t\t\tmb[k++] = mb[i];\n+\t}\n+\n+\t/* copy bad ones after the good ones */\n+\tfor (i = 0; i != drn; i++)\n+\t\tmb[k + i] = drb[i];\n+}\n+\n+#endif /* _MISC_H_ */\ndiff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c\nindex e4e3d7fc3..be6109768 100644\n--- a/lib/librte_ipsec/sa.c\n+++ b/lib/librte_ipsec/sa.c\n@@ -12,6 +12,7 @@\n #include \"ipsec_sqn.h\"\n #include \"crypto.h\"\n #include \"iph.h\"\n+#include \"misc.h\"\n #include \"pad.h\"\n \n #define MBUF_MAX_L2_LEN\t\tRTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)\n@@ -82,6 +83,36 @@ rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)\n \treturn sa->type;\n }\n \n+/**\n+ * Based on number of buckets calculated required size for the\n+ * structure that holds replay window and sequence number (RSN) information.\n+ */\n+static size_t\n+rsn_size(uint32_t nb_bucket)\n+{\n+\tsize_t sz;\n+\tstruct replay_sqn *rsn;\n+\n+\tsz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);\n+\tsz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);\n+\treturn sz;\n+}\n+\n+/*\n+ * for given size, calculate required number of buckets.\n+ */\n+static uint32_t\n+replay_num_bucket(uint32_t wsz)\n+{\n+\tuint32_t nb;\n+\n+\tnb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /\n+\t\tWINDOW_BUCKET_SIZE);\n+\tnb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);\n+\n+\treturn nb;\n+}\n+\n static int32_t\n ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)\n {\n@@ -411,569 +442,6 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,\n \treturn sz;\n }\n \n-/*\n- * Move bad (unprocessed) mbufs beyond the good (processed) ones.\n- * dr[] contains the indexes of bad mbufs insinde the mb[].\n- */\n-static void\n-mbuf_bad_move(struct rte_mbuf *mb[], const uint32_t dr[], uint32_t num,\n-\tuint32_t drn)\n-{\n-\tuint32_t i, j, k;\n-\tstruct rte_mbuf *drb[drn];\n-\n-\tj = 0;\n-\tk = 0;\n-\n-\t/* copy bad ones into a temp place */\n-\tfor (i = 0; i != num; i++) {\n-\t\tif (j != drn && i == dr[j])\n-\t\t\tdrb[j++] = mb[i];\n-\t\telse\n-\t\t\tmb[k++] = mb[i];\n-\t}\n-\n-\t/* copy bad ones after the good ones */\n-\tfor (i = 0; i != drn; i++)\n-\t\tmb[k + i] = drb[i];\n-}\n-\n-/*\n- * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.\n- */\n-static inline void\n-lksd_none_cop_prepare(struct rte_crypto_op *cop,\n-\tstruct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)\n-{\n-\tstruct rte_crypto_sym_op *sop;\n-\n-\tsop = cop->sym;\n-\tcop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;\n-\tcop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;\n-\tcop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;\n-\tsop->m_src = mb;\n-\t__rte_crypto_sym_op_attach_sym_session(sop, cs);\n-}\n-\n-/*\n- * setup crypto op and crypto sym op for ESP outbound packet.\n- */\n-static inline void\n-esp_outb_cop_prepare(struct rte_crypto_op *cop,\n-\tconst struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],\n-\tconst union sym_op_data *icv, uint32_t hlen, uint32_t plen)\n-{\n-\tstruct rte_crypto_sym_op *sop;\n-\tstruct aead_gcm_iv *gcm;\n-\n-\t/* fill sym op fields */\n-\tsop = cop->sym;\n-\n-\t/* AEAD (AES_GCM) case */\n-\tif (sa->aad_len != 0) {\n-\t\tsop->aead.data.offset = sa->ctp.cipher.offset + hlen;\n-\t\tsop->aead.data.length = sa->ctp.cipher.length + plen;\n-\t\tsop->aead.digest.data = icv->va;\n-\t\tsop->aead.digest.phys_addr = icv->pa;\n-\t\tsop->aead.aad.data = icv->va + sa->icv_len;\n-\t\tsop->aead.aad.phys_addr = icv->pa + sa->icv_len;\n-\n-\t\t/* fill AAD IV (located inside crypto op) */\n-\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n-\t\t\tsa->iv_ofs);\n-\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n-\t/* CRYPT+AUTH case */\n-\t} else {\n-\t\tsop->cipher.data.offset = sa->ctp.cipher.offset + hlen;\n-\t\tsop->cipher.data.length = sa->ctp.cipher.length + plen;\n-\t\tsop->auth.data.offset = sa->ctp.auth.offset + hlen;\n-\t\tsop->auth.data.length = sa->ctp.auth.length + plen;\n-\t\tsop->auth.digest.data = icv->va;\n-\t\tsop->auth.digest.phys_addr = icv->pa;\n-\t}\n-}\n-\n-/*\n- * setup/update packet data and metadata for ESP outbound tunnel case.\n- */\n-static inline int32_t\n-esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n-\tconst uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,\n-\tunion sym_op_data *icv)\n-{\n-\tuint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;\n-\tstruct rte_mbuf *ml;\n-\tstruct esp_hdr *esph;\n-\tstruct esp_tail *espt;\n-\tchar *ph, *pt;\n-\tuint64_t *iv;\n-\n-\t/* calculate extra header space required */\n-\thlen = sa->hdr_len + sa->iv_len + sizeof(*esph);\n-\n-\t/* size of ipsec protected data */\n-\tl2len = mb->l2_len;\n-\tplen = mb->pkt_len - l2len;\n-\n-\t/* number of bytes to encrypt */\n-\tclen = plen + sizeof(*espt);\n-\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n-\n-\t/* pad length + esp tail */\n-\tpdlen = clen - plen;\n-\ttlen = pdlen + sa->icv_len;\n-\n-\t/* do append and prepend */\n-\tml = rte_pktmbuf_lastseg(mb);\n-\tif (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))\n-\t\treturn -ENOSPC;\n-\n-\t/* prepend header */\n-\tph = rte_pktmbuf_prepend(mb, hlen - l2len);\n-\tif (ph == NULL)\n-\t\treturn -ENOSPC;\n-\n-\t/* append tail */\n-\tpdofs = ml->data_len;\n-\tml->data_len += tlen;\n-\tmb->pkt_len += tlen;\n-\tpt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);\n-\n-\t/* update pkt l2/l3 len */\n-\tmb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |\n-\t\tsa->tx_offload.val;\n-\n-\t/* copy tunnel pkt header */\n-\trte_memcpy(ph, sa->hdr, sa->hdr_len);\n-\n-\t/* update original and new ip header fields */\n-\tupdate_tun_l3hdr(sa, ph + sa->hdr_l3_off, mb->pkt_len, sa->hdr_l3_off,\n-\t\t\tsqn_low16(sqc));\n-\n-\t/* update spi, seqn and iv */\n-\tesph = (struct esp_hdr *)(ph + sa->hdr_len);\n-\tiv = (uint64_t *)(esph + 1);\n-\tcopy_iv(iv, ivp, sa->iv_len);\n-\n-\tesph->spi = sa->spi;\n-\tesph->seq = sqn_low32(sqc);\n-\n-\t/* offset for ICV */\n-\tpdofs += pdlen + sa->sqh_len;\n-\n-\t/* pad length */\n-\tpdlen -= sizeof(*espt);\n-\n-\t/* copy padding data */\n-\trte_memcpy(pt, esp_pad_bytes, pdlen);\n-\n-\t/* update esp trailer */\n-\tespt = (struct esp_tail *)(pt + pdlen);\n-\tespt->pad_len = pdlen;\n-\tespt->next_proto = sa->proto;\n-\n-\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);\n-\ticv->pa = rte_pktmbuf_iova_offset(ml, pdofs);\n-\n-\treturn clen;\n-}\n-\n-/*\n- * for pure cryptodev (lookaside none) depending on SA settings,\n- * we might have to write some extra data to the packet.\n- */\n-static inline void\n-outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n-\tconst union sym_op_data *icv)\n-{\n-\tuint32_t *psqh;\n-\tstruct aead_gcm_aad *aad;\n-\n-\t/* insert SQN.hi between ESP trailer and ICV */\n-\tif (sa->sqh_len != 0) {\n-\t\tpsqh = (uint32_t *)(icv->va - sa->sqh_len);\n-\t\tpsqh[0] = sqn_hi32(sqc);\n-\t}\n-\n-\t/*\n-\t * fill IV and AAD fields, if any (aad fields are placed after icv),\n-\t * right now we support only one AEAD algorithm: AES-GCM .\n-\t */\n-\tif (sa->aad_len != 0) {\n-\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n-\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n-\t}\n-}\n-\n-/*\n- * setup/update packets and crypto ops for ESP outbound tunnel case.\n- */\n-static uint16_t\n-outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tstruct rte_crypto_op *cop[], uint16_t num)\n-{\n-\tint32_t rc;\n-\tuint32_t i, k, n;\n-\tuint64_t sqn;\n-\trte_be64_t sqc;\n-\tstruct rte_ipsec_sa *sa;\n-\tstruct rte_cryptodev_sym_session *cs;\n-\tunion sym_op_data icv;\n-\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\tcs = ss->crypto.ses;\n-\n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n-\t\trte_errno = EOVERFLOW;\n-\n-\tk = 0;\n-\tfor (i = 0; i != n; i++) {\n-\n-\t\tsqc = rte_cpu_to_be_64(sqn + i);\n-\t\tgen_iv(iv, sqc);\n-\n-\t\t/* try to update the packet itself */\n-\t\trc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);\n-\n-\t\t/* success, setup crypto op */\n-\t\tif (rc >= 0) {\n-\t\t\toutb_pkt_xprepare(sa, sqc, &icv);\n-\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n-\t\t\tesp_outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);\n-\t\t\tk++;\n-\t\t/* failure, put packet into the death-row */\n-\t\t} else {\n-\t\t\tdr[i - k] = i;\n-\t\t\trte_errno = -rc;\n-\t\t}\n-\t}\n-\n-\t /* copy not prepared mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmbuf_bad_move(mb, dr, n, n - k);\n-\n-\treturn k;\n-}\n-\n-/*\n- * setup/update packet data and metadata for ESP outbound transport case.\n- */\n-static inline int32_t\n-esp_outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n-\tconst uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,\n-\tuint32_t l2len, uint32_t l3len, union sym_op_data *icv)\n-{\n-\tuint8_t np;\n-\tuint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;\n-\tstruct rte_mbuf *ml;\n-\tstruct esp_hdr *esph;\n-\tstruct esp_tail *espt;\n-\tchar *ph, *pt;\n-\tuint64_t *iv;\n-\n-\tuhlen = l2len + l3len;\n-\tplen = mb->pkt_len - uhlen;\n-\n-\t/* calculate extra header space required */\n-\thlen = sa->iv_len + sizeof(*esph);\n-\n-\t/* number of bytes to encrypt */\n-\tclen = plen + sizeof(*espt);\n-\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n-\n-\t/* pad length + esp tail */\n-\tpdlen = clen - plen;\n-\ttlen = pdlen + sa->icv_len;\n-\n-\t/* do append and insert */\n-\tml = rte_pktmbuf_lastseg(mb);\n-\tif (tlen + sa->sqh_len + sa->aad_len > rte_pktmbuf_tailroom(ml))\n-\t\treturn -ENOSPC;\n-\n-\t/* prepend space for ESP header */\n-\tph = rte_pktmbuf_prepend(mb, hlen);\n-\tif (ph == NULL)\n-\t\treturn -ENOSPC;\n-\n-\t/* append tail */\n-\tpdofs = ml->data_len;\n-\tml->data_len += tlen;\n-\tmb->pkt_len += tlen;\n-\tpt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);\n-\n-\t/* shift L2/L3 headers */\n-\tinsert_esph(ph, ph + hlen, uhlen);\n-\n-\t/* update ip  header fields */\n-\tnp = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len, l2len, l3len,\n-\t\t\tIPPROTO_ESP);\n-\n-\t/* update spi, seqn and iv */\n-\tesph = (struct esp_hdr *)(ph + uhlen);\n-\tiv = (uint64_t *)(esph + 1);\n-\tcopy_iv(iv, ivp, sa->iv_len);\n-\n-\tesph->spi = sa->spi;\n-\tesph->seq = sqn_low32(sqc);\n-\n-\t/* offset for ICV */\n-\tpdofs += pdlen + sa->sqh_len;\n-\n-\t/* pad length */\n-\tpdlen -= sizeof(*espt);\n-\n-\t/* copy padding data */\n-\trte_memcpy(pt, esp_pad_bytes, pdlen);\n-\n-\t/* update esp trailer */\n-\tespt = (struct esp_tail *)(pt + pdlen);\n-\tespt->pad_len = pdlen;\n-\tespt->next_proto = np;\n-\n-\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);\n-\ticv->pa = rte_pktmbuf_iova_offset(ml, pdofs);\n-\n-\treturn clen;\n-}\n-\n-/*\n- * setup/update packets and crypto ops for ESP outbound transport case.\n- */\n-static uint16_t\n-outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tstruct rte_crypto_op *cop[], uint16_t num)\n-{\n-\tint32_t rc;\n-\tuint32_t i, k, n, l2, l3;\n-\tuint64_t sqn;\n-\trte_be64_t sqc;\n-\tstruct rte_ipsec_sa *sa;\n-\tstruct rte_cryptodev_sym_session *cs;\n-\tunion sym_op_data icv;\n-\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\tcs = ss->crypto.ses;\n-\n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n-\t\trte_errno = EOVERFLOW;\n-\n-\tk = 0;\n-\tfor (i = 0; i != n; i++) {\n-\n-\t\tl2 = mb[i]->l2_len;\n-\t\tl3 = mb[i]->l3_len;\n-\n-\t\tsqc = rte_cpu_to_be_64(sqn + i);\n-\t\tgen_iv(iv, sqc);\n-\n-\t\t/* try to update the packet itself */\n-\t\trc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],\n-\t\t\t\tl2, l3, &icv);\n-\n-\t\t/* success, setup crypto op */\n-\t\tif (rc >= 0) {\n-\t\t\toutb_pkt_xprepare(sa, sqc, &icv);\n-\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n-\t\t\tesp_outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);\n-\t\t\tk++;\n-\t\t/* failure, put packet into the death-row */\n-\t\t} else {\n-\t\t\tdr[i - k] = i;\n-\t\t\trte_errno = -rc;\n-\t\t}\n-\t}\n-\n-\t/* copy not prepared mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmbuf_bad_move(mb, dr, n, n - k);\n-\n-\treturn k;\n-}\n-\n-/*\n- * setup crypto op and crypto sym op for ESP inbound tunnel packet.\n- */\n-static inline int32_t\n-esp_inb_tun_cop_prepare(struct rte_crypto_op *cop,\n-\tconst struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n-\tconst union sym_op_data *icv, uint32_t pofs, uint32_t plen)\n-{\n-\tstruct rte_crypto_sym_op *sop;\n-\tstruct aead_gcm_iv *gcm;\n-\tuint64_t *ivc, *ivp;\n-\tuint32_t clen;\n-\n-\tclen = plen - sa->ctp.cipher.length;\n-\tif ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0)\n-\t\treturn -EINVAL;\n-\n-\t/* fill sym op fields */\n-\tsop = cop->sym;\n-\n-\t/* AEAD (AES_GCM) case */\n-\tif (sa->aad_len != 0) {\n-\t\tsop->aead.data.offset = pofs + sa->ctp.cipher.offset;\n-\t\tsop->aead.data.length = clen;\n-\t\tsop->aead.digest.data = icv->va;\n-\t\tsop->aead.digest.phys_addr = icv->pa;\n-\t\tsop->aead.aad.data = icv->va + sa->icv_len;\n-\t\tsop->aead.aad.phys_addr = icv->pa + sa->icv_len;\n-\n-\t\t/* fill AAD IV (located inside crypto op) */\n-\t\tgcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,\n-\t\t\tsa->iv_ofs);\n-\t\tivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,\n-\t\t\tpofs + sizeof(struct esp_hdr));\n-\t\taead_gcm_iv_fill(gcm, ivp[0], sa->salt);\n-\t/* CRYPT+AUTH case */\n-\t} else {\n-\t\tsop->cipher.data.offset = pofs + sa->ctp.cipher.offset;\n-\t\tsop->cipher.data.length = clen;\n-\t\tsop->auth.data.offset = pofs + sa->ctp.auth.offset;\n-\t\tsop->auth.data.length = plen - sa->ctp.auth.length;\n-\t\tsop->auth.digest.data = icv->va;\n-\t\tsop->auth.digest.phys_addr = icv->pa;\n-\n-\t\t/* copy iv from the input packet to the cop */\n-\t\tivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs);\n-\t\tivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,\n-\t\t\tpofs + sizeof(struct esp_hdr));\n-\t\tcopy_iv(ivc, ivp, sa->iv_len);\n-\t}\n-\treturn 0;\n-}\n-\n-/*\n- * for pure cryptodev (lookaside none) depending on SA settings,\n- * we might have to write some extra data to the packet.\n- */\n-static inline void\n-inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,\n-\tconst union sym_op_data *icv)\n-{\n-\tstruct aead_gcm_aad *aad;\n-\n-\t/* insert SQN.hi between ESP trailer and ICV */\n-\tif (sa->sqh_len != 0)\n-\t\tinsert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);\n-\n-\t/*\n-\t * fill AAD fields, if any (aad fields are placed after icv),\n-\t * right now we support only one AEAD algorithm: AES-GCM.\n-\t */\n-\tif (sa->aad_len != 0) {\n-\t\taad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);\n-\t\taead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));\n-\t}\n-}\n-\n-/*\n- * setup/update packet data and metadata for ESP inbound tunnel case.\n- */\n-static inline int32_t\n-esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa,\n-\tconst struct replay_sqn *rsn, struct rte_mbuf *mb,\n-\tuint32_t hlen, union sym_op_data *icv)\n-{\n-\tint32_t rc;\n-\tuint64_t sqn;\n-\tuint32_t icv_ofs, plen;\n-\tstruct rte_mbuf *ml;\n-\tstruct esp_hdr *esph;\n-\n-\tesph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);\n-\n-\t/*\n-\t * retrieve and reconstruct SQN, then check it, then\n-\t * convert it back into network byte order.\n-\t */\n-\tsqn = rte_be_to_cpu_32(esph->seq);\n-\tif (IS_ESN(sa))\n-\t\tsqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);\n-\n-\trc = esn_inb_check_sqn(rsn, sa, sqn);\n-\tif (rc != 0)\n-\t\treturn rc;\n-\n-\tsqn = rte_cpu_to_be_64(sqn);\n-\n-\t/* start packet manipulation */\n-\tplen = mb->pkt_len;\n-\tplen = plen - hlen;\n-\n-\tml = rte_pktmbuf_lastseg(mb);\n-\ticv_ofs = ml->data_len - sa->icv_len + sa->sqh_len;\n-\n-\t/* we have to allocate space for AAD somewhere,\n-\t * right now - just use free trailing space at the last segment.\n-\t * Would probably be more convenient to reserve space for AAD\n-\t * inside rte_crypto_op itself\n-\t * (again for IV space is already reserved inside cop).\n-\t */\n-\tif (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))\n-\t\treturn -ENOSPC;\n-\n-\ticv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);\n-\ticv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);\n-\n-\tinb_pkt_xprepare(sa, sqn, icv);\n-\treturn plen;\n-}\n-\n-/*\n- * setup/update packets and crypto ops for ESP inbound case.\n- */\n-static uint16_t\n-inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tstruct rte_crypto_op *cop[], uint16_t num)\n-{\n-\tint32_t rc;\n-\tuint32_t i, k, hl;\n-\tstruct rte_ipsec_sa *sa;\n-\tstruct rte_cryptodev_sym_session *cs;\n-\tstruct replay_sqn *rsn;\n-\tunion sym_op_data icv;\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\tcs = ss->crypto.ses;\n-\trsn = rsn_acquire(sa);\n-\n-\tk = 0;\n-\tfor (i = 0; i != num; i++) {\n-\n-\t\thl = mb[i]->l2_len + mb[i]->l3_len;\n-\t\trc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv);\n-\t\tif (rc >= 0) {\n-\t\t\tlksd_none_cop_prepare(cop[k], cs, mb[i]);\n-\t\t\trc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv,\n-\t\t\t\thl, rc);\n-\t\t}\n-\n-\t\tk += (rc == 0);\n-\t\tif (rc != 0) {\n-\t\t\tdr[i - k] = i;\n-\t\t\trte_errno = -rc;\n-\t\t}\n-\t}\n-\n-\trsn_release(sa, rsn);\n-\n-\t/* copy not prepared mbufs beyond good ones */\n-\tif (k != num && k != 0)\n-\t\tmbuf_bad_move(mb, dr, num, num - k);\n-\n-\treturn k;\n-}\n-\n /*\n  *  setup crypto ops for LOOKASIDE_PROTO type of devices.\n  */\n@@ -1008,265 +476,6 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,\n \treturn num;\n }\n \n-/*\n- * process ESP inbound tunnel packet.\n- */\n-static inline int\n-esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n-\tuint32_t *sqn)\n-{\n-\tuint32_t hlen, icv_len, tlen;\n-\tstruct esp_hdr *esph;\n-\tstruct esp_tail *espt;\n-\tstruct rte_mbuf *ml;\n-\tchar *pd;\n-\n-\tif (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)\n-\t\treturn -EBADMSG;\n-\n-\ticv_len = sa->icv_len;\n-\n-\tml = rte_pktmbuf_lastseg(mb);\n-\tespt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,\n-\t\tml->data_len - icv_len - sizeof(*espt));\n-\n-\t/*\n-\t * check padding and next proto.\n-\t * return an error if something is wrong.\n-\t */\n-\tpd = (char *)espt - espt->pad_len;\n-\tif (espt->next_proto != sa->proto ||\n-\t\t\tmemcmp(pd, esp_pad_bytes, espt->pad_len))\n-\t\treturn -EINVAL;\n-\n-\t/* cut of ICV, ESP tail and padding bytes */\n-\ttlen = icv_len + sizeof(*espt) + espt->pad_len;\n-\tml->data_len -= tlen;\n-\tmb->pkt_len -= tlen;\n-\n-\t/* cut of L2/L3 headers, ESP header and IV */\n-\thlen = mb->l2_len + mb->l3_len;\n-\tesph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen);\n-\trte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset);\n-\n-\t/* retrieve SQN for later check */\n-\t*sqn = rte_be_to_cpu_32(esph->seq);\n-\n-\t/* reset mbuf metatdata: L2/L3 len, packet type */\n-\tmb->packet_type = RTE_PTYPE_UNKNOWN;\n-\tmb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |\n-\t\tsa->tx_offload.val;\n-\n-\t/* clear the PKT_RX_SEC_OFFLOAD flag if set */\n-\tmb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);\n-\treturn 0;\n-}\n-\n-/*\n- * process ESP inbound transport packet.\n- */\n-static inline int\n-esp_inb_trs_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb,\n-\tuint32_t *sqn)\n-{\n-\tuint32_t hlen, icv_len, l2len, l3len, tlen;\n-\tstruct esp_hdr *esph;\n-\tstruct esp_tail *espt;\n-\tstruct rte_mbuf *ml;\n-\tchar *np, *op, *pd;\n-\n-\tif (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)\n-\t\treturn -EBADMSG;\n-\n-\ticv_len = sa->icv_len;\n-\n-\tml = rte_pktmbuf_lastseg(mb);\n-\tespt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *,\n-\t\tml->data_len - icv_len - sizeof(*espt));\n-\n-\t/* check padding, return an error if something is wrong. */\n-\tpd = (char *)espt - espt->pad_len;\n-\tif (memcmp(pd, esp_pad_bytes, espt->pad_len))\n-\t\treturn -EINVAL;\n-\n-\t/* cut of ICV, ESP tail and padding bytes */\n-\ttlen = icv_len + sizeof(*espt) + espt->pad_len;\n-\tml->data_len -= tlen;\n-\tmb->pkt_len -= tlen;\n-\n-\t/* retrieve SQN for later check */\n-\tl2len = mb->l2_len;\n-\tl3len = mb->l3_len;\n-\thlen = l2len + l3len;\n-\top = rte_pktmbuf_mtod(mb, char *);\n-\tesph = (struct esp_hdr *)(op + hlen);\n-\t*sqn = rte_be_to_cpu_32(esph->seq);\n-\n-\t/* cut off ESP header and IV, update L3 header */\n-\tnp = rte_pktmbuf_adj(mb, sa->ctp.cipher.offset);\n-\tremove_esph(np, op, hlen);\n-\tupdate_trs_l3hdr(sa, np + l2len, mb->pkt_len, l2len, l3len,\n-\t\t\tespt->next_proto);\n-\n-\t/* reset mbuf packet type */\n-\tmb->packet_type &= (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);\n-\n-\t/* clear the PKT_RX_SEC_OFFLOAD flag if set */\n-\tmb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD);\n-\treturn 0;\n-}\n-\n-/*\n- * for group of ESP inbound packets perform SQN check and update.\n- */\n-static inline uint16_t\n-esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[],\n-\tuint32_t dr[], uint16_t num)\n-{\n-\tuint32_t i, k;\n-\tstruct replay_sqn *rsn;\n-\n-\trsn = rsn_update_start(sa);\n-\n-\tk = 0;\n-\tfor (i = 0; i != num; i++) {\n-\t\tif (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0)\n-\t\t\tk++;\n-\t\telse\n-\t\t\tdr[i - k] = i;\n-\t}\n-\n-\trsn_update_finish(sa, rsn);\n-\treturn k;\n-}\n-\n-/*\n- * process group of ESP inbound tunnel packets.\n- */\n-static uint16_t\n-inb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tuint16_t num)\n-{\n-\tuint32_t i, k, n;\n-\tstruct rte_ipsec_sa *sa;\n-\tuint32_t sqn[num];\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\n-\t/* process packets, extract seq numbers */\n-\n-\tk = 0;\n-\tfor (i = 0; i != num; i++) {\n-\t\t/* good packet */\n-\t\tif (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0)\n-\t\t\tk++;\n-\t\t/* bad packet, will drop from furhter processing */\n-\t\telse\n-\t\t\tdr[i - k] = i;\n-\t}\n-\n-\t/* handle unprocessed mbufs */\n-\tif (k != num && k != 0)\n-\t\tmbuf_bad_move(mb, dr, num, num - k);\n-\n-\t/* update SQN and replay winow */\n-\tn = esp_inb_rsn_update(sa, sqn, dr, k);\n-\n-\t/* handle mbufs with wrong SQN */\n-\tif (n != k && n != 0)\n-\t\tmbuf_bad_move(mb, dr, k, k - n);\n-\n-\tif (n != num)\n-\t\trte_errno = EBADMSG;\n-\n-\treturn n;\n-}\n-\n-/*\n- * process group of ESP inbound transport packets.\n- */\n-static uint16_t\n-inb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tuint16_t num)\n-{\n-\tuint32_t i, k, n;\n-\tuint32_t sqn[num];\n-\tstruct rte_ipsec_sa *sa;\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\n-\t/* process packets, extract seq numbers */\n-\n-\tk = 0;\n-\tfor (i = 0; i != num; i++) {\n-\t\t/* good packet */\n-\t\tif (esp_inb_trs_single_pkt_process(sa, mb[i], sqn + k) == 0)\n-\t\t\tk++;\n-\t\t/* bad packet, will drop from furhter processing */\n-\t\telse\n-\t\t\tdr[i - k] = i;\n-\t}\n-\n-\t/* handle unprocessed mbufs */\n-\tif (k != num && k != 0)\n-\t\tmbuf_bad_move(mb, dr, num, num - k);\n-\n-\t/* update SQN and replay winow */\n-\tn = esp_inb_rsn_update(sa, sqn, dr, k);\n-\n-\t/* handle mbufs with wrong SQN */\n-\tif (n != k && n != 0)\n-\t\tmbuf_bad_move(mb, dr, k, k - n);\n-\n-\tif (n != num)\n-\t\trte_errno = EBADMSG;\n-\n-\treturn n;\n-}\n-\n-/*\n- * process outbound packets for SA with ESN support,\n- * for algorithms that require SQN.hibits to be implictly included\n- * into digest computation.\n- * In that case we have to move ICV bytes back to their proper place.\n- */\n-static uint16_t\n-outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n-\tuint16_t num)\n-{\n-\tuint32_t i, k, icv_len, *icv;\n-\tstruct rte_mbuf *ml;\n-\tstruct rte_ipsec_sa *sa;\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\n-\tk = 0;\n-\ticv_len = sa->icv_len;\n-\n-\tfor (i = 0; i != num; i++) {\n-\t\tif ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {\n-\t\t\tml = rte_pktmbuf_lastseg(mb[i]);\n-\t\t\ticv = rte_pktmbuf_mtod_offset(ml, void *,\n-\t\t\t\tml->data_len - icv_len);\n-\t\t\tremove_sqh(icv, icv_len);\n-\t\t\tk++;\n-\t\t} else\n-\t\t\tdr[i - k] = i;\n-\t}\n-\n-\t/* handle unprocessed mbufs */\n-\tif (k != num) {\n-\t\trte_errno = EBADMSG;\n-\t\tif (k != 0)\n-\t\t\tmbuf_bad_move(mb, dr, num, num - k);\n-\t}\n-\n-\treturn k;\n-}\n-\n /*\n  * simplest pkt process routine:\n  * all actual processing is already done by HW/PMD,\n@@ -1303,142 +512,6 @@ pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n \treturn k;\n }\n \n-/*\n- * prepare packets for inline ipsec processing:\n- * set ol_flags and attach metadata.\n- */\n-static inline void\n-inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,\n-\tstruct rte_mbuf *mb[], uint16_t num)\n-{\n-\tuint32_t i, ol_flags;\n-\n-\tol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;\n-\tfor (i = 0; i != num; i++) {\n-\n-\t\tmb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;\n-\t\tif (ol_flags != 0)\n-\t\t\trte_security_set_pkt_metadata(ss->security.ctx,\n-\t\t\t\tss->security.ses, mb[i], NULL);\n-\t}\n-}\n-\n-/*\n- * process group of ESP outbound tunnel packets destined for\n- * INLINE_CRYPTO type of device.\n- */\n-static uint16_t\n-inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,\n-\tstruct rte_mbuf *mb[], uint16_t num)\n-{\n-\tint32_t rc;\n-\tuint32_t i, k, n;\n-\tuint64_t sqn;\n-\trte_be64_t sqc;\n-\tstruct rte_ipsec_sa *sa;\n-\tunion sym_op_data icv;\n-\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n-\t\trte_errno = EOVERFLOW;\n-\n-\tk = 0;\n-\tfor (i = 0; i != n; i++) {\n-\n-\t\tsqc = rte_cpu_to_be_64(sqn + i);\n-\t\tgen_iv(iv, sqc);\n-\n-\t\t/* try to update the packet itself */\n-\t\trc = esp_outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv);\n-\n-\t\tk += (rc >= 0);\n-\n-\t\t/* failure, put packet into the death-row */\n-\t\tif (rc < 0) {\n-\t\t\tdr[i - k] = i;\n-\t\t\trte_errno = -rc;\n-\t\t}\n-\t}\n-\n-\t/* copy not processed mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmbuf_bad_move(mb, dr, n, n - k);\n-\n-\tinline_outb_mbuf_prepare(ss, mb, k);\n-\treturn k;\n-}\n-\n-/*\n- * process group of ESP outbound transport packets destined for\n- * INLINE_CRYPTO type of device.\n- */\n-static uint16_t\n-inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,\n-\tstruct rte_mbuf *mb[], uint16_t num)\n-{\n-\tint32_t rc;\n-\tuint32_t i, k, n, l2, l3;\n-\tuint64_t sqn;\n-\trte_be64_t sqc;\n-\tstruct rte_ipsec_sa *sa;\n-\tunion sym_op_data icv;\n-\tuint64_t iv[IPSEC_MAX_IV_QWORD];\n-\tuint32_t dr[num];\n-\n-\tsa = ss->sa;\n-\n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n-\t\trte_errno = EOVERFLOW;\n-\n-\tk = 0;\n-\tfor (i = 0; i != n; i++) {\n-\n-\t\tl2 = mb[i]->l2_len;\n-\t\tl3 = mb[i]->l3_len;\n-\n-\t\tsqc = rte_cpu_to_be_64(sqn + i);\n-\t\tgen_iv(iv, sqc);\n-\n-\t\t/* try to update the packet itself */\n-\t\trc = esp_outb_trs_pkt_prepare(sa, sqc, iv, mb[i],\n-\t\t\t\tl2, l3, &icv);\n-\n-\t\tk += (rc >= 0);\n-\n-\t\t/* failure, put packet into the death-row */\n-\t\tif (rc < 0) {\n-\t\t\tdr[i - k] = i;\n-\t\t\trte_errno = -rc;\n-\t\t}\n-\t}\n-\n-\t/* copy not processed mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmbuf_bad_move(mb, dr, n, n - k);\n-\n-\tinline_outb_mbuf_prepare(ss, mb, k);\n-\treturn k;\n-}\n-\n-/*\n- * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:\n- * actual processing is done by HW/PMD, just set flags and metadata.\n- */\n-static uint16_t\n-outb_inline_proto_process(const struct rte_ipsec_session *ss,\n-\t\tstruct rte_mbuf *mb[], uint16_t num)\n-{\n-\tinline_outb_mbuf_prepare(ss, mb, num);\n-\treturn num;\n-}\n-\n /*\n  * Select packet processing function for session on LOOKASIDE_NONE\n  * type of device.\n@@ -1456,23 +529,23 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,\n \tswitch (sa->type & msk) {\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):\n-\t\tpf->prepare = inb_pkt_prepare;\n-\t\tpf->process = inb_tun_pkt_process;\n+\t\tpf->prepare = esp_inb_pkt_prepare;\n+\t\tpf->process = esp_inb_tun_pkt_process;\n \t\tbreak;\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):\n-\t\tpf->prepare = inb_pkt_prepare;\n-\t\tpf->process = inb_trs_pkt_process;\n+\t\tpf->prepare = esp_inb_pkt_prepare;\n+\t\tpf->process = esp_inb_trs_pkt_process;\n \t\tbreak;\n \tcase (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):\n \tcase (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):\n-\t\tpf->prepare = outb_tun_prepare;\n+\t\tpf->prepare = esp_outb_tun_prepare;\n \t\tpf->process = (sa->sqh_len != 0) ?\n-\t\t\toutb_sqh_process : pkt_flag_process;\n+\t\t\tesp_outb_sqh_process : pkt_flag_process;\n \t\tbreak;\n \tcase (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):\n-\t\tpf->prepare = outb_trs_prepare;\n+\t\tpf->prepare = esp_outb_trs_prepare;\n \t\tpf->process = (sa->sqh_len != 0) ?\n-\t\t\toutb_sqh_process : pkt_flag_process;\n+\t\t\tesp_outb_sqh_process : pkt_flag_process;\n \t\tbreak;\n \tdefault:\n \t\trc = -ENOTSUP;\n@@ -1498,10 +571,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,\n \tswitch (sa->type & msk) {\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):\n-\t\tpf->process = inb_tun_pkt_process;\n+\t\tpf->process = esp_inb_tun_pkt_process;\n \t\tbreak;\n \tcase (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):\n-\t\tpf->process = inb_trs_pkt_process;\n+\t\tpf->process = esp_inb_trs_pkt_process;\n \t\tbreak;\n \tcase (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):\n \tcase (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):\n@@ -1542,7 +615,7 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,\n \t\t\t\tRTE_IPSEC_SATP_DIR_IB)\n \t\t\tpf->process = pkt_flag_process;\n \t\telse\n-\t\t\tpf->process = outb_inline_proto_process;\n+\t\t\tpf->process = inline_proto_outb_pkt_process;\n \t\tbreak;\n \tcase RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:\n \t\tpf->prepare = lksd_proto_prepare;\ndiff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h\nindex 133a35d83..93ff7bdf1 100644\n--- a/lib/librte_ipsec/sa.h\n+++ b/lib/librte_ipsec/sa.h\n@@ -108,4 +108,44 @@ int\n ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,\n \tconst struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf);\n \n+/* inbound processing */\n+\n+uint16_t\n+esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num);\n+\n+uint16_t\n+esp_inb_tun_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num);\n+\n+uint16_t\n+esp_inb_trs_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num);\n+\n+/* outbound processing */\n+\n+uint16_t\n+esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num);\n+\n+uint16_t\n+esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tstruct rte_crypto_op *cop[], uint16_t num);\n+\n+uint16_t\n+esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],\n+\tuint16_t num);\n+\n+uint16_t\n+inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num);\n+\n+uint16_t\n+inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num);\n+\n+uint16_t\n+inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,\n+\tstruct rte_mbuf *mb[], uint16_t num);\n+\n #endif /* _SA_H_ */\n",
    "prefixes": [
        "v2",
        "5/7"
    ]
}