get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54493/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54493,
    "url": "http://patches.dpdk.org/api/patches/54493/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190606115151.27805-3-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190606115151.27805-3-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190606115151.27805-3-konstantin.ananyev@intel.com",
    "date": "2019-06-06T11:51:48",
    "name": "[v2,2/5] examples/ipsec-secgw: support packet fragmentation and reassembly",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f5885f4fe507ba8ad89c7f6e7cd90585f4757574",
    "submitter": {
        "id": 33,
        "url": "http://patches.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190606115151.27805-3-konstantin.ananyev@intel.com/mbox/",
    "series": [
        {
            "id": 4928,
            "url": "http://patches.dpdk.org/api/series/4928/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4928",
            "date": "2019-06-06T11:51:46",
            "name": "examples/ipsec-secgw: support packet",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/4928/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54493/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/54493/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7162D1B9C0;\n\tThu,  6 Jun 2019 13:52:23 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n\tby dpdk.org (Postfix) with ESMTP id 2CF501B9BD\n\tfor <dev@dpdk.org>; Thu,  6 Jun 2019 13:52:21 +0200 (CEST)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n\tby fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t06 Jun 2019 04:52:20 -0700",
            "from sivswdev08.ir.intel.com ([10.237.217.47])\n\tby orsmga008.jf.intel.com with ESMTP; 06 Jun 2019 04:52:19 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "akhil.goyal@nxp.com,\n\tKonstantin Ananyev <konstantin.ananyev@intel.com>",
        "Date": "Thu,  6 Jun 2019 12:51:48 +0100",
        "Message-Id": "<20190606115151.27805-3-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<20190606115151.27805-1-konstantin.ananyev@intel.com>",
        "References": "<20190527184448.21264-1-konstantin.ananyev@intel.com>\n\t<20190606115151.27805-1-konstantin.ananyev@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 2/5] examples/ipsec-secgw: support packet\n\tfragmentation and reassembly",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add optional ability to fragment packet bigger then mtu,\nand reassemble fragmented packet.\nTo minimize possible performance effect, reassembly is\nimplemented as RX callback.\nTo support these features ipsec-secgw relies on librte_ipsec ability\nto handle multi-segment packets.\nAlso when reassemble/fragmentation support is enabled, attached\ncrypto devices have to support 'In Place SGL' offload capability.\nTo enable/disable this functionality, two new optional command-line\noptions are introduced:\n  --reassemble <val> - number of entries in reassemble table\n  --mtu <val> - MTU value for all attached ports\nAs separate '--mtu' option is introduced, '-j <val>' option is now used\nto specify mbuf data buffer size only.\n\nSigned-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n examples/ipsec-secgw/ipsec-secgw.c | 345 ++++++++++++++++++++++++++---\n examples/ipsec-secgw/ipsec.h       |   1 +\n examples/ipsec-secgw/meson.build   |   2 +-\n 3 files changed, 317 insertions(+), 31 deletions(-)",
    "diff": "diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c\nindex 4004f2bc2..04f3f3c53 100644\n--- a/examples/ipsec-secgw/ipsec-secgw.c\n+++ b/examples/ipsec-secgw/ipsec-secgw.c\n@@ -41,6 +41,7 @@\n #include <rte_jhash.h>\n #include <rte_cryptodev.h>\n #include <rte_security.h>\n+#include <rte_ip_frag.h>\n \n #include \"ipsec.h\"\n #include \"parser.h\"\n@@ -109,6 +110,11 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;\n \t\t(addr)->addr_bytes[4], (addr)->addr_bytes[5], \\\n \t\t0, 0)\n \n+#define\tFRAG_TBL_BUCKET_ENTRIES\t4\n+#define\tFRAG_TTL_MS\t\t(10 * MS_PER_S)\n+\n+#define MTU_TO_FRAMELEN(x)\t((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)\n+\n /* port/source ethernet addr and destination ethernet addr */\n struct ethaddr_info {\n \tuint64_t src, dst;\n@@ -126,6 +132,8 @@ struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {\n #define CMD_LINE_OPT_CRYPTODEV_MASK\t\"cryptodev_mask\"\n #define CMD_LINE_OPT_RX_OFFLOAD\t\t\"rxoffload\"\n #define CMD_LINE_OPT_TX_OFFLOAD\t\t\"txoffload\"\n+#define CMD_LINE_OPT_REASSEMBLE\t\t\"reassemble\"\n+#define CMD_LINE_OPT_MTU\t\t\"mtu\"\n \n enum {\n \t/* long options mapped to a short option */\n@@ -139,6 +147,8 @@ enum {\n \tCMD_LINE_OPT_CRYPTODEV_MASK_NUM,\n \tCMD_LINE_OPT_RX_OFFLOAD_NUM,\n \tCMD_LINE_OPT_TX_OFFLOAD_NUM,\n+\tCMD_LINE_OPT_REASSEMBLE_NUM,\n+\tCMD_LINE_OPT_MTU_NUM,\n };\n \n static const struct option lgopts[] = {\n@@ -147,6 +157,7 @@ static const struct option lgopts[] = {\n \t{CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},\n \t{CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM},\n \t{CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM},\n+\t{CMD_LINE_OPT_REASSEMBLE, 1, 0, CMD_LINE_OPT_REASSEMBLE_NUM},\n \t{NULL, 0, 0, 0}\n };\n \n@@ -159,7 +170,6 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */\n static uint32_t nb_lcores;\n static uint32_t single_sa;\n static uint32_t single_sa_idx;\n-static uint32_t frame_size;\n \n /*\n  * RX/TX HW offload capabilities to enable/use on ethernet ports.\n@@ -168,6 +178,13 @@ static uint32_t frame_size;\n static uint64_t dev_rx_offload = UINT64_MAX;\n static uint64_t dev_tx_offload = UINT64_MAX;\n \n+/*\n+ * global values that determine multi-seg policy\n+ */\n+static uint32_t frag_tbl_sz;\n+static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE;\n+static uint32_t mtu_size = RTE_ETHER_MTU;\n+\n /* application wide librte_ipsec/SA parameters */\n struct app_sa_prm app_sa_prm = {.enable = 0};\n \n@@ -204,6 +221,12 @@ struct lcore_conf {\n \tstruct ipsec_ctx outbound;\n \tstruct rt_ctx *rt4_ctx;\n \tstruct rt_ctx *rt6_ctx;\n+\tstruct {\n+\t\tstruct rte_ip_frag_tbl *tbl;\n+\t\tstruct rte_mempool *pool_dir;\n+\t\tstruct rte_mempool *pool_indir;\n+\t\tstruct rte_ip_frag_death_row dr;\n+\t} frag;\n } __rte_cache_aligned;\n \n static struct lcore_conf lcore_conf[RTE_MAX_LCORE];\n@@ -229,6 +252,18 @@ static struct rte_eth_conf port_conf = {\n \n static struct socket_ctx socket_ctx[NB_SOCKETS];\n \n+/*\n+ * Determine is multi-segment support required:\n+ *  - either frame buffer size is smaller then mtu\n+ *  - or reassmeble support is requested\n+ */\n+static int\n+multi_seg_required(void)\n+{\n+\treturn (MTU_TO_FRAMELEN(mtu_size) + RTE_PKTMBUF_HEADROOM >\n+\t\tframe_buf_size || frag_tbl_sz != 0);\n+}\n+\n static inline void\n adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,\n \tuint32_t l2_len)\n@@ -430,9 +465,52 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)\n \treturn 0;\n }\n \n+/*\n+ * Helper function to fragment and queue for TX one packet.\n+ */\n+static inline uint32_t\n+send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,\n+\tuint16_t port, uint8_t proto)\n+{\n+\tstruct buffer *tbl;\n+\tuint32_t len, n;\n+\tint32_t rc;\n+\n+\ttbl =  qconf->tx_mbufs + port;\n+\tlen = tbl->len;\n+\n+\t/* free space for new fragments */\n+\tif (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >=  RTE_DIM(tbl->m_table)) {\n+\t\tsend_burst(qconf, len, port);\n+\t\tlen = 0;\n+\t}\n+\n+\tn = RTE_DIM(tbl->m_table) - len;\n+\n+\tif (proto == IPPROTO_IP)\n+\t\trc = rte_ipv4_fragment_packet(m, tbl->m_table + len,\n+\t\t\tn, mtu_size, qconf->frag.pool_dir,\n+\t\t\tqconf->frag.pool_indir);\n+\telse\n+\t\trc = rte_ipv6_fragment_packet(m, tbl->m_table + len,\n+\t\t\tn, mtu_size, qconf->frag.pool_dir,\n+\t\t\tqconf->frag.pool_indir);\n+\n+\tif (rc >= 0)\n+\t\tlen += rc;\n+\telse\n+\t\tRTE_LOG(ERR, IPSEC,\n+\t\t\t\"%s: failed to fragment packet with size %u, \"\n+\t\t\t\"error code: %d\\n\",\n+\t\t\t__func__, m->pkt_len, rte_errno);\n+\n+\trte_pktmbuf_free(m);\n+\treturn len;\n+}\n+\n /* Enqueue a single packet, and send burst if queue is filled */\n static inline int32_t\n-send_single_packet(struct rte_mbuf *m, uint16_t port)\n+send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)\n {\n \tuint32_t lcore_id;\n \tuint16_t len;\n@@ -442,8 +520,14 @@ send_single_packet(struct rte_mbuf *m, uint16_t port)\n \n \tqconf = &lcore_conf[lcore_id];\n \tlen = qconf->tx_mbufs[port].len;\n-\tqconf->tx_mbufs[port].m_table[len] = m;\n-\tlen++;\n+\n+\tif (m->pkt_len <= mtu_size) {\n+\t\tqconf->tx_mbufs[port].m_table[len] = m;\n+\t\tlen++;\n+\n+\t/* need to fragment the packet */\n+\t} else\n+\t\tlen = send_fragment_packet(qconf, m, port, proto);\n \n \t/* enough pkts to be sent */\n \tif (unlikely(len == MAX_PKT_BURST)) {\n@@ -797,7 +881,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)\n \t\t\trte_pktmbuf_free(pkts[i]);\n \t\t\tcontinue;\n \t\t}\n-\t\tsend_single_packet(pkts[i], pkt_hop & 0xff);\n+\t\tsend_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);\n \t}\n }\n \n@@ -849,7 +933,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)\n \t\t\trte_pktmbuf_free(pkts[i]);\n \t\t\tcontinue;\n \t\t}\n-\t\tsend_single_packet(pkts[i], pkt_hop & 0xff);\n+\t\tsend_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);\n \t}\n }\n \n@@ -1015,6 +1099,8 @@ main_loop(__attribute__((unused)) void *dummy)\n \tqconf->outbound.session_pool = socket_ctx[socket_id].session_pool;\n \tqconf->outbound.session_priv_pool =\n \t\t\tsocket_ctx[socket_id].session_priv_pool;\n+\tqconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool;\n+\tqconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;\n \n \tif (qconf->nb_rx_queue == 0) {\n \t\tRTE_LOG(DEBUG, IPSEC, \"lcore %u has nothing to do\\n\",\n@@ -1161,12 +1247,14 @@ print_usage(const char *prgname)\n \t\t\" [--cryptodev_mask MASK]\"\n \t\t\" [--\" CMD_LINE_OPT_RX_OFFLOAD \" RX_OFFLOAD_MASK]\"\n \t\t\" [--\" CMD_LINE_OPT_TX_OFFLOAD \" TX_OFFLOAD_MASK]\"\n+\t\t\" [--\" CMD_LINE_OPT_REASSEMBLE \" REASSEMBLE_TABLE_SIZE]\"\n+\t\t\" [--\" CMD_LINE_OPT_MTU \" MTU]\"\n \t\t\"\\n\\n\"\n \t\t\"  -p PORTMASK: Hexadecimal bitmask of ports to configure\\n\"\n \t\t\"  -P : Enable promiscuous mode\\n\"\n \t\t\"  -u PORTMASK: Hexadecimal bitmask of unprotected ports\\n\"\n-\t\t\"  -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\\n\"\n-\t\t\"                packet size\\n\"\n+\t\t\"  -j FRAMESIZE: Data buffer size, minimum (and default)\\n\"\n+\t\t\"     value: RTE_MBUF_DEFAULT_BUF_SIZE\\n\"\n \t\t\"  -l enables code-path that uses librte_ipsec\\n\"\n \t\t\"  -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\\n\"\n \t\t\"     size for each SA\\n\"\n@@ -1184,6 +1272,13 @@ print_usage(const char *prgname)\n \t\t\"  --\" CMD_LINE_OPT_TX_OFFLOAD\n \t\t\": bitmask of the TX HW offload capabilities to enable/use\\n\"\n \t\t\"                         (DEV_TX_OFFLOAD_*)\\n\"\n+\t\t\"  --\" CMD_LINE_OPT_REASSEMBLE \" NUM\"\n+\t\t\": max number of entries in reassemble(fragment) table\\n\"\n+\t\t\"    (zero (default value) disables reassembly)\\n\"\n+\t\t\"  --\" CMD_LINE_OPT_MTU \" MTU\"\n+\t\t\": MTU value on all ports (default value: 1500)\\n\"\n+\t\t\"    outgoing packets with bigger size will be fragmented\\n\"\n+\t\t\"    incoming packets with bigger size will be discarded\\n\"\n \t\t\"\\n\",\n \t\tprgname);\n }\n@@ -1354,21 +1449,16 @@ parse_args(int32_t argc, char **argv)\n \t\t\tf_present = 1;\n \t\t\tbreak;\n \t\tcase 'j':\n-\t\t\t{\n-\t\t\t\tint32_t size = parse_decimal(optarg);\n-\t\t\t\tif (size <= 1518) {\n-\t\t\t\t\tprintf(\"Invalid jumbo frame size\\n\");\n-\t\t\t\t\tif (size < 0) {\n-\t\t\t\t\t\tprint_usage(prgname);\n-\t\t\t\t\t\treturn -1;\n-\t\t\t\t\t}\n-\t\t\t\t\tprintf(\"Using default value 9000\\n\");\n-\t\t\t\t\tframe_size = 9000;\n-\t\t\t\t} else {\n-\t\t\t\t\tframe_size = size;\n-\t\t\t\t}\n+\t\t\tret = parse_decimal(optarg);\n+\t\t\tif (ret < RTE_MBUF_DEFAULT_BUF_SIZE ||\n+\t\t\t\t\tret > UINT16_MAX) {\n+\t\t\t\tprintf(\"Invalid frame buffer size value: %s\\n\",\n+\t\t\t\t\toptarg);\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n \t\t\t}\n-\t\t\tprintf(\"Enabled jumbo frames size %u\\n\", frame_size);\n+\t\t\tframe_buf_size = ret;\n+\t\t\tprintf(\"Custom frame buffer size %u\\n\", frame_buf_size);\n \t\t\tbreak;\n \t\tcase 'l':\n \t\t\tapp_sa_prm.enable = 1;\n@@ -1436,6 +1526,26 @@ parse_args(int32_t argc, char **argv)\n \t\t\t\treturn -1;\n \t\t\t}\n \t\t\tbreak;\n+\t\tcase CMD_LINE_OPT_REASSEMBLE_NUM:\n+\t\t\tret = parse_decimal(optarg);\n+\t\t\tif (ret < 0) {\n+\t\t\t\tprintf(\"Invalid argument for \\'%s\\': %s\\n\",\n+\t\t\t\t\tCMD_LINE_OPT_REASSEMBLE, optarg);\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tfrag_tbl_sz = ret;\n+\t\t\tbreak;\n+\t\tcase CMD_LINE_OPT_MTU_NUM:\n+\t\t\tret = parse_decimal(optarg);\n+\t\t\tif (ret < 0 || ret > RTE_IPV4_MAX_PKT_LEN) {\n+\t\t\t\tprintf(\"Invalid argument for \\'%s\\': %s\\n\",\n+\t\t\t\t\tCMD_LINE_OPT_MTU, optarg);\n+\t\t\t\tprint_usage(prgname);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tmtu_size = ret;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tprint_usage(prgname);\n \t\t\treturn -1;\n@@ -1447,6 +1557,16 @@ parse_args(int32_t argc, char **argv)\n \t\treturn -1;\n \t}\n \n+\t/* check do we need to enable multi-seg support */\n+\tif (multi_seg_required()) {\n+\t\t/* legacy mode doesn't support multi-seg */\n+\t\tapp_sa_prm.enable = 1;\n+\t\tprintf(\"frame buf size: %u, mtu: %u, \"\n+\t\t\t\"number of reassemble entries: %u\\n\"\n+\t\t\t\"multi-segment support is required\\n\",\n+\t\t\tframe_buf_size, mtu_size, frag_tbl_sz);\n+\t}\n+\n \tprint_app_sa_prm(&app_sa_prm);\n \n \tif (optind >= 0)\n@@ -1664,6 +1784,9 @@ cryptodevs_init(void)\n \tint16_t cdev_id, port_id;\n \tstruct rte_hash_parameters params = { 0 };\n \n+\tconst uint64_t mseg_flag = multi_seg_required() ?\n+\t\t\t\tRTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;\n+\n \tparams.entries = CDEV_MAP_ENTRIES;\n \tparams.key_len = sizeof(struct cdev_key);\n \tparams.hash_func = rte_jhash;\n@@ -1732,6 +1855,12 @@ cryptodevs_init(void)\n \n \t\trte_cryptodev_info_get(cdev_id, &cdev_info);\n \n+\t\tif ((mseg_flag & cdev_info.feature_flags) != mseg_flag)\n+\t\t\trte_exit(EXIT_FAILURE,\n+\t\t\t\t\"Device %hd does not support \\'%s\\' feature\\n\",\n+\t\t\t\tcdev_id,\n+\t\t\t\trte_cryptodev_get_feature_name(mseg_flag));\n+\n \t\tif (nb_lcore_params > cdev_info.max_nb_queue_pairs)\n \t\t\tmax_nb_qps = cdev_info.max_nb_queue_pairs;\n \t\telse\n@@ -1860,6 +1989,7 @@ cryptodevs_init(void)\n static void\n port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n {\n+\tuint32_t frame_size;\n \tstruct rte_eth_dev_info dev_info;\n \tstruct rte_eth_txconf *txconf;\n \tuint16_t nb_tx_queue, nb_rx_queue;\n@@ -1898,9 +2028,14 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)\n \tprintf(\"Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\\n\",\n \t\t\tnb_rx_queue, nb_tx_queue);\n \n-\tif (frame_size) {\n-\t\tlocal_port_conf.rxmode.max_rx_pkt_len = frame_size;\n+\tframe_size = MTU_TO_FRAMELEN(mtu_size);\n+\tif (frame_size > local_port_conf.rxmode.max_rx_pkt_len)\n \t\tlocal_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;\n+\tlocal_port_conf.rxmode.max_rx_pkt_len = frame_size;\n+\n+\tif (multi_seg_required()) {\n+\t\tlocal_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;\n+\t\tlocal_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;\n \t}\n \n \tlocal_port_conf.rxmode.offloads |= req_rx_offloads;\n@@ -2021,16 +2156,25 @@ static void\n pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)\n {\n \tchar s[64];\n-\tuint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :\n-\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE;\n-\n+\tint32_t ms;\n \n \tsnprintf(s, sizeof(s), \"mbuf_pool_%d\", socket_id);\n \tctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,\n \t\t\tMEMPOOL_CACHE_SIZE, ipsec_metadata_size(),\n-\t\t\tbuff_size,\n-\t\t\tsocket_id);\n-\tif (ctx->mbuf_pool == NULL)\n+\t\t\tframe_buf_size, socket_id);\n+\n+\t/*\n+\t * if multi-segment support is enabled, then create a pool\n+\t * for indirect mbufs.\n+\t */\n+\tms = multi_seg_required();\n+\tif (ms != 0) {\n+\t\tsnprintf(s, sizeof(s), \"mbuf_pool_indir_%d\", socket_id);\n+\t\tctx->mbuf_pool_indir = rte_pktmbuf_pool_create(s, nb_mbuf,\n+\t\t\tMEMPOOL_CACHE_SIZE, 0, 0, socket_id);\n+\t}\n+\n+\tif (ctx->mbuf_pool == NULL || (ms != 0 && ctx->mbuf_pool_indir == NULL))\n \t\trte_exit(EXIT_FAILURE, \"Cannot init mbuf pool on socket %d\\n\",\n \t\t\t\tsocket_id);\n \telse\n@@ -2092,6 +2236,140 @@ inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,\n \treturn -1;\n }\n \n+static uint16_t\n+rx_callback(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts,\n+\t__rte_unused uint16_t max_pkts, void *user_param)\n+{\n+\tuint64_t tm;\n+\tuint32_t i, k;\n+\tstruct lcore_conf *lc;\n+\tstruct rte_mbuf *mb;\n+\tstruct rte_ether_hdr *eth;\n+\n+\tlc = user_param;\n+\tk = 0;\n+\ttm = 0;\n+\n+\tfor (i = 0; i != nb_pkts; i++) {\n+\n+\t\tmb = pkt[i];\n+\t\teth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);\n+\t\tif (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {\n+\n+\t\t\tstruct rte_ipv4_hdr *iph;\n+\n+\t\t\tiph = (struct rte_ipv4_hdr *)(eth + 1);\n+\t\t\tif (rte_ipv4_frag_pkt_is_fragmented(iph)) {\n+\n+\t\t\t\tmb->l2_len = sizeof(*eth);\n+\t\t\t\tmb->l3_len = sizeof(*iph);\n+\t\t\t\ttm = (tm != 0) ? tm : rte_rdtsc();\n+\t\t\t\tmb = rte_ipv4_frag_reassemble_packet(\n+\t\t\t\t\tlc->frag.tbl, &lc->frag.dr,\n+\t\t\t\t\tmb, tm, iph);\n+\n+\t\t\t\tif (mb != NULL) {\n+\t\t\t\t\t/* fix ip cksum after reassemble. */\n+\t\t\t\t\tiph = rte_pktmbuf_mtod_offset(mb,\n+\t\t\t\t\t\tstruct rte_ipv4_hdr *,\n+\t\t\t\t\t\tmb->l2_len);\n+\t\t\t\t\tiph->hdr_checksum = 0;\n+\t\t\t\t\tiph->hdr_checksum = rte_ipv4_cksum(iph);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t} else if (eth->ether_type ==\n+\t\t\t\trte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {\n+\n+\t\t\tstruct rte_ipv6_hdr *iph;\n+\t\t\tstruct ipv6_extension_fragment *fh;\n+\n+\t\t\tiph = (struct rte_ipv6_hdr *)(eth + 1);\n+\t\t\tfh = rte_ipv6_frag_get_ipv6_fragment_header(iph);\n+\t\t\tif (fh != NULL) {\n+\t\t\t\tmb->l2_len = sizeof(*eth);\n+\t\t\t\tmb->l3_len = (uintptr_t)fh - (uintptr_t)iph +\n+\t\t\t\t\tsizeof(*fh);\n+\t\t\t\ttm = (tm != 0) ? tm : rte_rdtsc();\n+\t\t\t\tmb = rte_ipv6_frag_reassemble_packet(\n+\t\t\t\t\tlc->frag.tbl, &lc->frag.dr,\n+\t\t\t\t\tmb, tm, iph, fh);\n+\t\t\t\tif (mb != NULL)\n+\t\t\t\t\t/* fix l3_len after reassemble. */\n+\t\t\t\t\tmb->l3_len = mb->l3_len - sizeof(*fh);\n+\t\t\t}\n+\t\t}\n+\n+\t\tpkt[k] = mb;\n+\t\tk += (mb != NULL);\n+\t}\n+\n+\t/* some fragments were encountered, drain death row */\n+\tif (tm != 0)\n+\t\trte_ip_frag_free_death_row(&lc->frag.dr, 0);\n+\n+\treturn k;\n+}\n+\n+\n+static int\n+reassemble_lcore_init(struct lcore_conf *lc, uint32_t cid)\n+{\n+\tint32_t sid;\n+\tuint32_t i;\n+\tuint64_t frag_cycles;\n+\tconst struct lcore_rx_queue *rxq;\n+\tconst struct rte_eth_rxtx_callback *cb;\n+\n+\t/* create fragment table */\n+\tsid = rte_lcore_to_socket_id(cid);\n+\tfrag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) /\n+\t\tMS_PER_S * FRAG_TTL_MS;\n+\n+\tlc->frag.tbl = rte_ip_frag_table_create(frag_tbl_sz,\n+\t\tFRAG_TBL_BUCKET_ENTRIES, frag_tbl_sz, frag_cycles, sid);\n+\tif (lc->frag.tbl == NULL) {\n+\t\tprintf(\"%s(%u): failed to create fragment table of size: %u, \"\n+\t\t\t\"error code: %d\\n\",\n+\t\t\t__func__, cid, frag_tbl_sz, rte_errno);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* setup reassemble RX callbacks for all queues */\n+\tfor (i = 0; i != lc->nb_rx_queue; i++) {\n+\n+\t\trxq = lc->rx_queue_list + i;\n+\t\tcb = rte_eth_add_rx_callback(rxq->port_id, rxq->queue_id,\n+\t\t\trx_callback, lc);\n+\t\tif (cb == NULL) {\n+\t\t\tprintf(\"%s(%u): failed to install RX callback for \"\n+\t\t\t\t\"portid=%u, queueid=%u, error code: %d\\n\",\n+\t\t\t\t__func__, cid,\n+\t\t\t\trxq->port_id, rxq->queue_id, rte_errno);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+reassemble_init(void)\n+{\n+\tint32_t rc;\n+\tuint32_t i, lc;\n+\n+\trc = 0;\n+\tfor (i = 0; i != nb_lcore_params; i++) {\n+\t\tlc = lcore_params[i].lcore_id;\n+\t\trc = reassemble_lcore_init(lcore_conf + lc, lc);\n+\t\tif (rc != 0)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn rc;\n+}\n+\n int32_t\n main(int32_t argc, char **argv)\n {\n@@ -2186,6 +2464,13 @@ main(int32_t argc, char **argv)\n \t\t\tRTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);\n \t}\n \n+\t/* fragment reassemble is enabled */\n+\tif (frag_tbl_sz != 0) {\n+\t\tret = reassemble_init();\n+\t\tif (ret != 0)\n+\t\t\trte_exit(EXIT_FAILURE, \"failed at reassemble init\");\n+\t}\n+\n \tcheck_all_ports_link_status(enabled_port_mask);\n \n \t/* launch per-lcore init on every lcore */\ndiff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h\nindex e9272d74b..7465ec92e 100644\n--- a/examples/ipsec-secgw/ipsec.h\n+++ b/examples/ipsec-secgw/ipsec.h\n@@ -180,6 +180,7 @@ struct socket_ctx {\n \tstruct rt_ctx *rt_ip4;\n \tstruct rt_ctx *rt_ip6;\n \tstruct rte_mempool *mbuf_pool;\n+\tstruct rte_mempool *mbuf_pool_indir;\n \tstruct rte_mempool *session_pool;\n \tstruct rte_mempool *session_priv_pool;\n };\ndiff --git a/examples/ipsec-secgw/meson.build b/examples/ipsec-secgw/meson.build\nindex 81c146ebc..9ece345cf 100644\n--- a/examples/ipsec-secgw/meson.build\n+++ b/examples/ipsec-secgw/meson.build\n@@ -6,7 +6,7 @@\n # To build this example as a standalone application with an already-installed\n # DPDK instance, use 'make'\n \n-deps += ['security', 'lpm', 'acl', 'hash', 'ipsec']\n+deps += ['security', 'lpm', 'acl', 'hash', 'ip_frag', 'ipsec']\n allow_experimental_apis = true\n sources = files(\n \t'esp.c', 'ipsec.c', 'ipsec_process.c', 'ipsec-secgw.c',\n",
    "prefixes": [
        "v2",
        "2/5"
    ]
}