get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96864/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96864,
    "url": "https://patches.dpdk.org/api/patches/96864/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210812135425.698189-7-radu.nicolau@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210812135425.698189-7-radu.nicolau@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210812135425.698189-7-radu.nicolau@intel.com",
    "date": "2021-08-12T13:54:21",
    "name": "[v2,06/10] ipsec: add transmit segmentation offload support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fbf3662fd4cf11f4ecaa2714a7c3ecc16b3048f4",
    "submitter": {
        "id": 743,
        "url": "https://patches.dpdk.org/api/people/743/?format=api",
        "name": "Radu Nicolau",
        "email": "radu.nicolau@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210812135425.698189-7-radu.nicolau@intel.com/mbox/",
    "series": [
        {
            "id": 18265,
            "url": "https://patches.dpdk.org/api/series/18265/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=18265",
            "date": "2021-08-12T13:54:15",
            "name": "new features for ipsec and security libraries",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/18265/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/96864/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/96864/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B4DEAA0C4E;\n\tThu, 12 Aug 2021 16:10:57 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id ED1454120C;\n\tThu, 12 Aug 2021 16:10:32 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id B498541235\n for <dev@dpdk.org>; Thu, 12 Aug 2021 16:10:30 +0200 (CEST)",
            "from fmsmga007.fm.intel.com ([10.253.24.52])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 12 Aug 2021 07:10:24 -0700",
            "from silpixa00400884.ir.intel.com ([10.243.22.82])\n by fmsmga007.fm.intel.com with ESMTP; 12 Aug 2021 07:10:20 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10074\"; a=\"300944426\"",
            "E=Sophos;i=\"5.84,316,1620716400\"; d=\"scan'208\";a=\"300944426\"",
            "E=Sophos;i=\"5.84,316,1620716400\"; d=\"scan'208\";a=\"446554425\""
        ],
        "X-ExtLoop1": "1",
        "From": "Radu Nicolau <radu.nicolau@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, mdr@ashroe.eu, konstantin.ananyev@intel.com,\n vladimir.medvedkin@intel.com, bruce.richardson@intel.com,\n hemant.agrawal@nxp.com, gakhil@marvell.com, anoobj@marvell.com,\n declan.doherty@intel.com, abhijit.sinha@intel.com,\n daniel.m.buckley@intel.com, marchana@marvell.com, ktejasree@marvell.com,\n matan@nvidia.com, Radu Nicolau <radu.nicolau@intel.com>,\n Abhijit Sinha <abhijits.sinha@intel.com>",
        "Date": "Thu, 12 Aug 2021 14:54:21 +0100",
        "Message-Id": "<20210812135425.698189-7-radu.nicolau@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210812135425.698189-1-radu.nicolau@intel.com>",
        "References": "<20210713133542.3550525-1-radu.nicolau@intel.com>\n <20210812135425.698189-1-radu.nicolau@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v2 06/10] ipsec: add transmit segmentation\n offload support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for transmit segmentation offload to inline crypto processing\nmode. This offload is not supported by other offload modes, as at a\nminimum it requires inline crypto for IPsec to be supported on the\nnetwork interface.\n\nSigned-off-by: Declan Doherty <declan.doherty@intel.com>\nSigned-off-by: Radu Nicolau <radu.nicolau@intel.com>\nSigned-off-by: Abhijit Sinha <abhijits.sinha@intel.com>\nSigned-off-by: Daniel Martin Buckley <daniel.m.buckley@intel.com>\n---\n lib/ipsec/esp_inb.c  |   4 +-\n lib/ipsec/esp_outb.c | 115 +++++++++++++++++++++++++++++++++++--------\n lib/ipsec/iph.h      |  10 +++-\n lib/ipsec/sa.c       |   6 +++\n lib/ipsec/sa.h       |   4 ++\n 5 files changed, 114 insertions(+), 25 deletions(-)",
    "diff": "diff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c\nindex d66c88f05d..a6ab8fbdd5 100644\n--- a/lib/ipsec/esp_inb.c\n+++ b/lib/ipsec/esp_inb.c\n@@ -668,8 +668,8 @@ trs_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],\n \t\t\t/* modify packet's layout */\n \t\t\tnp = trs_process_step2(mb[i], ml[i], hl[i], cofs,\n \t\t\t\tto[i], tl, sqn + k);\n-\t\t\tupdate_trs_l3hdr(sa, np + l2, mb[i]->pkt_len,\n-\t\t\t\tl2, hl[i] - l2, espt[i].next_proto);\n+\t\t\tupdate_trs_l34hdrs(sa, np + l2, mb[i]->pkt_len,\n+\t\t\t\tl2, hl[i] - l2, espt[i].next_proto, 0);\n \n \t\t\t/* update mbuf's metadata */\n \t\t\ttrs_process_step3(mb[i]);\ndiff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c\nindex a3f77469c3..9fc7075796 100644\n--- a/lib/ipsec/esp_outb.c\n+++ b/lib/ipsec/esp_outb.c\n@@ -2,6 +2,8 @@\n  * Copyright(c) 2018-2020 Intel Corporation\n  */\n \n+#include <math.h>\n+\n #include <rte_ipsec.h>\n #include <rte_esp.h>\n #include <rte_ip.h>\n@@ -156,11 +158,20 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \n \t/* number of bytes to encrypt */\n \tclen = plen + sizeof(*espt);\n-\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n+\n+\t/* We don't need to pad/ailgn packet when using TSO offload */\n+\tif (likely(!(mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))))\n+\t\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n+\n \n \t/* pad length + esp tail */\n \tpdlen = clen - plen;\n-\ttlen = pdlen + sa->icv_len + sqh_len;\n+\n+\t/* We don't append ICV length when using TSO offload */\n+\tif (likely(!(mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))))\n+\t\ttlen = pdlen + sa->icv_len + sqh_len;\n+\telse\n+\t\ttlen = pdlen + sqh_len;\n \n \t/* do append and prepend */\n \tml = rte_pktmbuf_lastseg(mb);\n@@ -337,6 +348,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \tchar *ph, *pt;\n \tuint64_t *iv;\n \tuint32_t l2len, l3len;\n+\tuint8_t tso = mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG) ? 1 : 0;\n \n \tl2len = mb->l2_len;\n \tl3len = mb->l3_len;\n@@ -349,11 +361,19 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \n \t/* number of bytes to encrypt */\n \tclen = plen + sizeof(*espt);\n-\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n+\n+\t/* We don't need to pad/ailgn packet when using TSO offload */\n+\tif (likely(!tso))\n+\t\tclen = RTE_ALIGN_CEIL(clen, sa->pad_align);\n \n \t/* pad length + esp tail */\n \tpdlen = clen - plen;\n-\ttlen = pdlen + sa->icv_len + sqh_len;\n+\n+\t/* We don't append ICV length when using TSO offload */\n+\tif (likely(!tso))\n+\t\ttlen = pdlen + sa->icv_len + sqh_len;\n+\telse\n+\t\ttlen = pdlen + sqh_len;\n \n \t/* do append and insert */\n \tml = rte_pktmbuf_lastseg(mb);\n@@ -375,8 +395,8 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,\n \tinsert_esph(ph, ph + hlen, uhlen);\n \n \t/* update ip  header fields */\n-\tnp = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,\n-\t\t\tl3len, IPPROTO_ESP);\n+\tnp = update_trs_l34hdrs(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,\n+\t\t\tl3len, IPPROTO_ESP, tso);\n \n \t/* update spi, seqn and iv */\n \tesph = (struct rte_esp_hdr *)(ph + uhlen);\n@@ -651,6 +671,33 @@ inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,\n \t}\n }\n \n+/* check if packet will exceed MSS and segmentation is required */\n+static inline int\n+esn_outb_nb_segments(const struct rte_ipsec_sa *sa, struct rte_mbuf *m) {\n+\tuint16_t segments = 1;\n+\tuint16_t pkt_l3len = m->pkt_len - m->l2_len;\n+\n+\t/* Only support segmentation for UDP/TCP flows */\n+\tif (!(m->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)))\n+\t\treturn segments;\n+\n+\tif (sa->tso.enabled && pkt_l3len > sa->tso.mss) {\n+\t\tsegments = ceil((float)pkt_l3len / sa->tso.mss);\n+\n+\t\tif  (m->packet_type & RTE_PTYPE_L4_TCP) {\n+\t\t\tm->ol_flags |= (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM);\n+\t\t\tm->l4_len = sizeof(struct rte_tcp_hdr);\n+\t\t} else {\n+\t\t\tm->ol_flags |= (PKT_TX_UDP_SEG | PKT_TX_UDP_CKSUM);\n+\t\t\tm->l4_len = sizeof(struct rte_udp_hdr);\n+\t\t}\n+\n+\t\tm->tso_segsz = sa->tso.mss;\n+\t}\n+\n+\treturn segments;\n+}\n+\n /*\n  * process group of ESP outbound tunnel packets destined for\n  * INLINE_CRYPTO type of device.\n@@ -660,24 +707,29 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,\n \tstruct rte_mbuf *mb[], uint16_t num)\n {\n \tint32_t rc;\n-\tuint32_t i, k, n;\n+\tuint32_t i, k, nb_sqn = 0, nb_sqn_alloc;\n \tuint64_t sqn;\n \trte_be64_t sqc;\n \tstruct rte_ipsec_sa *sa;\n \tunion sym_op_data icv;\n \tuint64_t iv[IPSEC_MAX_IV_QWORD];\n \tuint32_t dr[num];\n+\tuint16_t nb_segs[num];\n \n \tsa = ss->sa;\n \n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n+\tfor (i = 0; i != num; i++) {\n+\t\tnb_segs[i] = esn_outb_nb_segments(sa, mb[i]);\n+\t\tnb_sqn += nb_segs[i];\n+\t}\n+\n+\tnb_sqn_alloc = nb_sqn;\n+\tsqn = esn_outb_update_sqn(sa, &nb_sqn_alloc);\n+\tif (nb_sqn_alloc != nb_sqn)\n \t\trte_errno = EOVERFLOW;\n \n \tk = 0;\n-\tfor (i = 0; i != n; i++) {\n-\n+\tfor (i = 0; i != num; i++) {\n \t\tsqc = rte_cpu_to_be_64(sqn + i);\n \t\tgen_iv(iv, sqc);\n \n@@ -691,11 +743,18 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,\n \t\t\tdr[i - k] = i;\n \t\t\trte_errno = -rc;\n \t\t}\n+\n+\t\t/**\n+\t\t * If packet is using tso, increment sqn by the number of\n+\t\t * segments for\tpacket\n+\t\t */\n+\t\tif  (mb[i]->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))\n+\t\t\tsqn += nb_segs[i] - 1;\n \t}\n \n \t/* copy not processed mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmove_bad_mbufs(mb, dr, n, n - k);\n+\tif (k != num && k != 0)\n+\t\tmove_bad_mbufs(mb, dr, num, num - k);\n \n \tinline_outb_mbuf_prepare(ss, mb, k);\n \treturn k;\n@@ -710,23 +769,30 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,\n \tstruct rte_mbuf *mb[], uint16_t num)\n {\n \tint32_t rc;\n-\tuint32_t i, k, n;\n+\tuint32_t i, k, nb_sqn, nb_sqn_alloc;\n \tuint64_t sqn;\n \trte_be64_t sqc;\n \tstruct rte_ipsec_sa *sa;\n \tunion sym_op_data icv;\n \tuint64_t iv[IPSEC_MAX_IV_QWORD];\n \tuint32_t dr[num];\n+\tuint16_t nb_segs[num];\n \n \tsa = ss->sa;\n \n-\tn = num;\n-\tsqn = esn_outb_update_sqn(sa, &n);\n-\tif (n != num)\n+\t/* Calculate number of sequence numbers required */\n+\tfor (i = 0, nb_sqn = 0; i != num; i++) {\n+\t\tnb_segs[i] = esn_outb_nb_segments(sa, mb[i]);\n+\t\tnb_sqn += nb_segs[i];\n+\t}\n+\n+\tnb_sqn_alloc = nb_sqn;\n+\tsqn = esn_outb_update_sqn(sa, &nb_sqn_alloc);\n+\tif (nb_sqn_alloc != nb_sqn)\n \t\trte_errno = EOVERFLOW;\n \n \tk = 0;\n-\tfor (i = 0; i != n; i++) {\n+\tfor (i = 0; i != num; i++) {\n \n \t\tsqc = rte_cpu_to_be_64(sqn + i);\n \t\tgen_iv(iv, sqc);\n@@ -741,11 +807,18 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,\n \t\t\tdr[i - k] = i;\n \t\t\trte_errno = -rc;\n \t\t}\n+\n+\t\t/**\n+\t\t * If packet is using tso, increment sqn by the number of\n+\t\t * segments for\tpacket\n+\t\t */\n+\t\tif  (mb[i]->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))\n+\t\t\tsqn += nb_segs[i] - 1;\n \t}\n \n \t/* copy not processed mbufs beyond good ones */\n-\tif (k != n && k != 0)\n-\t\tmove_bad_mbufs(mb, dr, n, n - k);\n+\tif (k != num && k != 0)\n+\t\tmove_bad_mbufs(mb, dr, num, num - k);\n \n \tinline_outb_mbuf_prepare(ss, mb, k);\n \treturn k;\ndiff --git a/lib/ipsec/iph.h b/lib/ipsec/iph.h\nindex 861f16905a..2d223199ac 100644\n--- a/lib/ipsec/iph.h\n+++ b/lib/ipsec/iph.h\n@@ -6,6 +6,8 @@\n #define _IPH_H_\n \n #include <rte_ip.h>\n+#include <rte_udp.h>\n+#include <rte_tcp.h>\n \n /**\n  * @file iph.h\n@@ -39,8 +41,8 @@ insert_esph(char *np, char *op, uint32_t hlen)\n \n /* update original ip header fields for transport case */\n static inline int\n-update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,\n-\t\tuint32_t l2len, uint32_t l3len, uint8_t proto)\n+update_trs_l34hdrs(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,\n+\t\tuint32_t l2len, uint32_t l3len, uint8_t proto, uint8_t tso)\n {\n \tint32_t rc;\n \n@@ -51,6 +53,10 @@ update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,\n \t\tv4h = p;\n \t\trc = v4h->next_proto_id;\n \t\tv4h->next_proto_id = proto;\n+\t\tif (tso) {\n+\t\t\tv4h->hdr_checksum = 0;\n+\t\t\tv4h->total_length = 0;\n+\t\t}\n \t\tv4h->total_length = rte_cpu_to_be_16(plen - l2len);\n \t/* IPv6 */\n \t} else {\ndiff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c\nindex 720e0f365b..2ecbbce0a4 100644\n--- a/lib/ipsec/sa.c\n+++ b/lib/ipsec/sa.c\n@@ -565,6 +565,12 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,\n \tsa->type = type;\n \tsa->size = sz;\n \n+\n+\tif (prm->ipsec_xform.options.tso == 1) {\n+\t\tsa->tso.enabled = 1;\n+\t\tsa->tso.mss = prm->ipsec_xform.mss;\n+\t}\n+\n \t/* check for ESN flag */\n \tsa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?\n \t\tUINT32_MAX : UINT64_MAX;\ndiff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h\nindex 107ebd1519..5e237f3525 100644\n--- a/lib/ipsec/sa.h\n+++ b/lib/ipsec/sa.h\n@@ -113,6 +113,10 @@ struct rte_ipsec_sa {\n \tuint8_t iv_len;\n \tuint8_t pad_align;\n \tuint8_t tos_mask;\n+\tstruct {\n+\t\tuint8_t enabled:1;\n+\t\tuint16_t mss;\n+\t} tso;\n \n \t/* template for tunnel header */\n \tuint8_t hdr[IPSEC_MAX_HDR_SIZE];\n",
    "prefixes": [
        "v2",
        "06/10"
    ]
}