get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/129471/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 129471,
    "url": "https://patches.dpdk.org/api/patches/129471/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230711165222.1755-2-pbhagavatula@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230711165222.1755-2-pbhagavatula@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230711165222.1755-2-pbhagavatula@marvell.com",
    "date": "2023-07-11T16:52:22",
    "name": "[v6,2/2] test: add reassembly perf test",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "0424fb9c89f365b73113f3fa198429b8816b09a6",
    "submitter": {
        "id": 1183,
        "url": "https://patches.dpdk.org/api/people/1183/?format=api",
        "name": "Pavan Nikhilesh Bhagavatula",
        "email": "pbhagavatula@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230711165222.1755-2-pbhagavatula@marvell.com/mbox/",
    "series": [
        {
            "id": 28907,
            "url": "https://patches.dpdk.org/api/series/28907/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=28907",
            "date": "2023-07-11T16:52:21",
            "name": "[v6,1/2] ip_frag: optimize key compare and hash generation",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/28907/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/129471/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/129471/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D4D5142E48;\n\tTue, 11 Jul 2023 18:52:43 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0B558410EE;\n\tTue, 11 Jul 2023 18:52:42 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 02D4B410E9\n for <dev@dpdk.org>; Tue, 11 Jul 2023 18:52:40 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 36BDNlR3009973; Tue, 11 Jul 2023 09:52:40 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3rq7ak0jcr-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Tue, 11 Jul 2023 09:52:40 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Tue, 11 Jul 2023 09:52:38 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Tue, 11 Jul 2023 09:52:38 -0700",
            "from MININT-80QBFE8.corp.innovium.com (unknown [10.28.164.122])\n by maili.marvell.com (Postfix) with ESMTP id 04FDF3F7053;\n Tue, 11 Jul 2023 09:52:33 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=6wPoG3YmdtDmDokBCCCy09Kot4V+BdkZympmt6bFKvk=;\n b=fF9QGlUMdndxcCsRi9mUavWDYRpK1yq2YPy/xRvXlznLCUT5b1L2CwDIrTZdcbA4sC8k\n i/jvomzml3y6CxRSHCmddLPQ9gtT1rQF2B2X05G7c1gtD/tOsGtUmCrARjxvJ/6BeQWr\n N55uaH953PcBOhVdYubaSbK6vMQRkE06y2D80nYMNiOS/4kP13F+d4UiZTVVfrTOEUjF\n kTfWxpRnXR7bA8vTsaPS6iFz7nnNtNMVTBUu54DFn90e4I0IGEFZWERU00Eh4jhThsSa\n ZyQSi18mvgfe6lkvP1Gr5/Qgoj9LztIpp8fu3noVMGe5TQOqAEgFgBAlVORgGafkKZJE tQ==",
        "From": "<pbhagavatula@marvell.com>",
        "To": "<jerinj@marvell.com>",
        "CC": "<dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>, \"Amit Prakash\n Shukla\" <amitprakashs@marvell.com>, Konstantin Ananyev\n <konstantin.v.ananyev@yandex.ru>",
        "Subject": "[PATCH v6 2/2] test: add reassembly perf test",
        "Date": "Tue, 11 Jul 2023 12:52:22 -0400",
        "Message-ID": "<20230711165222.1755-2-pbhagavatula@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230711165222.1755-1-pbhagavatula@marvell.com>",
        "References": "<20230602170147.4828-1-pbhagavatula@marvell.com>\n <20230711165222.1755-1-pbhagavatula@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "WFsbNw04YpaIezLYTgu_DJiMYcvMjZjY",
        "X-Proofpoint-GUID": "WFsbNw04YpaIezLYTgu_DJiMYcvMjZjY",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.957,Hydra:6.0.591,FMLib:17.11.176.26\n definitions=2023-07-11_09,2023-07-11_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Pavan Nikhilesh <pbhagavatula@marvell.com>\n\nAdd reassembly perf autotest for both ipv4 and ipv6 reassembly.\nEach test is performed with variable number of fragments per flow,\neither ordered or unordered fragments and interleaved flows.\n\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nReviewed-by: Amit Prakash Shukla <amitprakashs@marvell.com>\nTested-by: Amit Prakash Shukla <amitprakashs@marvell.com>\nAcked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>\n---\n app/test/meson.build            |    2 +\n app/test/test_reassembly_perf.c | 1003 +++++++++++++++++++++++++++++++\n 2 files changed, 1005 insertions(+)\n create mode 100644 app/test/test_reassembly_perf.c",
    "diff": "diff --git a/app/test/meson.build b/app/test/meson.build\nindex 3e0a2360a3..b89cf0368f 100644\n--- a/app/test/meson.build\n+++ b/app/test/meson.build\n@@ -106,6 +106,7 @@ test_sources = files(\n         'test_rawdev.c',\n         'test_rcu_qsbr.c',\n         'test_rcu_qsbr_perf.c',\n+        'test_reassembly_perf.c',\n         'test_reciprocal_division.c',\n         'test_reciprocal_division_perf.c',\n         'test_red.c',\n@@ -296,6 +297,7 @@ perf_test_names = [\n         'trace_perf_autotest',\n         'ipsec_perf_autotest',\n         'thash_perf_autotest',\n+        'reassembly_perf_autotest',\n ]\n \n driver_test_names = [\ndiff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c\nnew file mode 100644\nindex 0000000000..c11b65291f\n--- /dev/null\n+++ b/app/test/test_reassembly_perf.c\n@@ -0,0 +1,1003 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Marvell.\n+ */\n+\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_ether.h>\n+#include <rte_hexdump.h>\n+#include <rte_ip.h>\n+#include <rte_ip_frag.h>\n+#include <rte_mbuf.h>\n+#include <rte_mbuf_pool_ops.h>\n+#include <rte_os_shim.h>\n+#include <rte_random.h>\n+#include <rte_udp.h>\n+\n+#include \"test.h\"\n+\n+#define MAX_FLOWS\t    (1024 * 32)\n+#define MAX_BKTS\t    MAX_FLOWS\n+#define MAX_ENTRIES_PER_BKT 16\n+#define MAX_FRAGMENTS\t    RTE_LIBRTE_IP_FRAG_MAX_FRAG\n+#define MIN_FRAGMENTS\t    2\n+#define MAX_PKTS\t    (MAX_FLOWS * MAX_FRAGMENTS)\n+\n+#define MAX_PKT_LEN 2048\n+#define MAX_TTL_MS  (5 * MS_PER_S)\n+\n+/* use RFC863 Discard Protocol */\n+#define UDP_SRC_PORT 9\n+#define UDP_DST_PORT 9\n+\n+/* use RFC5735 / RFC2544 reserved network test addresses */\n+#define IP_SRC_ADDR(x) ((198U << 24) | (18 << 16) | (0 << 8) | (x))\n+#define IP_DST_ADDR(x) ((198U << 24) | (18 << 16) | (1 << 15) | (x))\n+\n+/* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */\n+static uint8_t ip6_addr[16] = {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};\n+#define IP6_VERSION 6\n+\n+#define IP_DEFTTL 64 /* from RFC 1340. */\n+\n+static struct rte_ip_frag_tbl *frag_tbl;\n+static struct rte_mempool *pkt_pool;\n+static struct rte_mbuf *mbufs[MAX_FLOWS][MAX_FRAGMENTS];\n+static uint8_t frag_per_flow[MAX_FLOWS];\n+static uint32_t flow_cnt;\n+\n+#define FILL_MODE_LINEAR      0\n+#define FILL_MODE_RANDOM      1\n+#define FILL_MODE_INTERLEAVED 2\n+\n+static int\n+reassembly_test_setup(void)\n+{\n+\tuint64_t max_ttl_cyc = (MAX_TTL_MS * rte_get_timer_hz()) / 1E3;\n+\n+\tfrag_tbl = rte_ip_frag_table_create(MAX_BKTS, MAX_ENTRIES_PER_BKT,\n+\t\t\t\t\t    MAX_BKTS * MAX_ENTRIES_PER_BKT, max_ttl_cyc,\n+\t\t\t\t\t    rte_socket_id());\n+\tif (frag_tbl == NULL)\n+\t\treturn TEST_FAILED;\n+\n+\trte_mbuf_set_user_mempool_ops(\"ring_mp_mc\");\n+\tpkt_pool = rte_pktmbuf_pool_create(\n+\t\t\"reassembly_perf_pool\", MAX_FLOWS * MAX_FRAGMENTS, 0, 0,\n+\t\tRTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());\n+\tif (pkt_pool == NULL) {\n+\t\tprintf(\"[%s] Failed to create pkt pool\\n\", __func__);\n+\t\trte_ip_frag_table_destroy(frag_tbl);\n+\t\treturn TEST_FAILED;\n+\t}\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+reassembly_test_teardown(void)\n+{\n+\tif (frag_tbl != NULL)\n+\t\trte_ip_frag_table_destroy(frag_tbl);\n+\n+\tif (pkt_pool != NULL)\n+\t\trte_mempool_free(pkt_pool);\n+}\n+\n+static void\n+randomize_array_positions(void **array, uint8_t sz)\n+{\n+\tvoid *tmp;\n+\tint i, j;\n+\n+\tif (sz == 2) {\n+\t\ttmp = array[0];\n+\t\tarray[0] = array[1];\n+\t\tarray[1] = tmp;\n+\t} else {\n+\t\tfor (i = sz - 1; i > 0; i--) {\n+\t\t\tj = rte_rand_max(i + 1);\n+\t\t\ttmp = array[i];\n+\t\t\tarray[i] = array[j];\n+\t\t\tarray[j] = tmp;\n+\t\t}\n+\t}\n+}\n+\n+static void\n+reassembly_print_banner(const char *proto_str)\n+{\n+\tprintf(\"+==============================================================\"\n+\t       \"============================================+\\n\");\n+\tprintf(\"| %-32s| %-3s : %-58d|\\n\", proto_str, \"Flow Count\", MAX_FLOWS);\n+\tprintf(\"+================+================+=============+=============+\"\n+\t       \"========================+===================+\\n\");\n+\tprintf(\"%-17s%-17s%-14s%-14s%-25s%-20s\\n\", \"| Fragment Order\",\n+\t       \"| Fragments/Flow\", \"| Outstanding\", \"| Cycles/Flow\",\n+\t       \"| Cycles/Fragment insert\", \"| Cycles/Reassembly |\");\n+\tprintf(\"+================+================+=============+=============+\"\n+\t       \"========================+===================+\\n\");\n+}\n+\n+static void\n+ipv4_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id,\n+\t\t    uint8_t fill_mode)\n+{\n+\tstruct rte_ether_hdr *eth_hdr;\n+\tstruct rte_ipv4_hdr *ip_hdr;\n+\tstruct rte_udp_hdr *udp_hdr;\n+\tuint16_t frag_len;\n+\tuint8_t i;\n+\n+\tfrag_len = MAX_PKT_LEN / nb_frags;\n+\tif (frag_len % 8)\n+\t\tfrag_len = RTE_ALIGN_MUL_CEIL(frag_len, 8);\n+\n+\tfor (i = 0; i < nb_frags; i++) {\n+\t\tstruct rte_mbuf *frag = mbuf[i];\n+\t\tuint16_t frag_offset = 0;\n+\t\tuint32_t ip_cksum;\n+\t\tuint16_t pkt_len;\n+\t\tuint16_t *ptr16;\n+\n+\t\tfrag_offset = i * (frag_len / 8);\n+\n+\t\tif (i == nb_frags - 1)\n+\t\t\tfrag_len = MAX_PKT_LEN - (frag_len * (nb_frags - 1));\n+\t\telse\n+\t\t\tfrag_offset |= RTE_IPV4_HDR_MF_FLAG;\n+\n+\t\trte_pktmbuf_reset_headroom(frag);\n+\t\teth_hdr = rte_pktmbuf_mtod(frag, struct rte_ether_hdr *);\n+\t\tip_hdr = rte_pktmbuf_mtod_offset(frag, struct rte_ipv4_hdr *,\n+\t\t\t\t\t\t sizeof(struct rte_ether_hdr));\n+\t\tudp_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\tfrag, struct rte_udp_hdr *,\n+\t\t\tsizeof(struct rte_ether_hdr) +\n+\t\t\t\tsizeof(struct rte_ipv4_hdr));\n+\n+\t\trte_ether_unformat_addr(\"02:00:00:00:00:01\",\n+\t\t\t\t\t&eth_hdr->dst_addr);\n+\t\trte_ether_unformat_addr(\"02:00:00:00:00:00\",\n+\t\t\t\t\t&eth_hdr->src_addr);\n+\t\teth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);\n+\n+\t\tpkt_len = frag_len;\n+\t\t/*\n+\t\t * Initialize UDP header.\n+\t\t */\n+\t\tif (i == 0) {\n+\t\t\tudp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);\n+\t\t\tudp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);\n+\t\t\tudp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);\n+\t\t\tudp_hdr->dgram_cksum = 0; /* No UDP checksum. */\n+\t\t}\n+\n+\t\t/*\n+\t\t * Initialize IP header.\n+\t\t */\n+\t\tpkt_len = (uint16_t)(pkt_len + sizeof(struct rte_ipv4_hdr));\n+\t\tip_hdr->version_ihl = RTE_IPV4_VHL_DEF;\n+\t\tip_hdr->type_of_service = 0;\n+\t\tip_hdr->fragment_offset = rte_cpu_to_be_16(frag_offset);\n+\t\tip_hdr->time_to_live = IP_DEFTTL;\n+\t\tip_hdr->next_proto_id = IPPROTO_UDP;\n+\t\tip_hdr->packet_id =\n+\t\t\trte_cpu_to_be_16((flow_id + 1) % UINT16_MAX);\n+\t\tip_hdr->total_length = rte_cpu_to_be_16(pkt_len);\n+\t\t/* Using more than 32K flows will modify the 2nd octet of the IP. */\n+\t\tip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR(flow_id));\n+\t\tip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR(flow_id));\n+\n+\t\t/*\n+\t\t * Compute IP header checksum.\n+\t\t */\n+\t\tptr16 = (unaligned_uint16_t *)ip_hdr;\n+\t\tip_cksum = 0;\n+\t\tip_cksum += ptr16[0];\n+\t\tip_cksum += ptr16[1];\n+\t\tip_cksum += ptr16[2];\n+\t\tip_cksum += ptr16[3];\n+\t\tip_cksum += ptr16[4];\n+\t\tip_cksum += ptr16[6];\n+\t\tip_cksum += ptr16[7];\n+\t\tip_cksum += ptr16[8];\n+\t\tip_cksum += ptr16[9];\n+\n+\t\t/*\n+\t\t * Reduce 32 bit checksum to 16 bits and complement it.\n+\t\t */\n+\t\tip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +\n+\t\t\t   (ip_cksum & 0x0000FFFF);\n+\t\tif (ip_cksum > 65535)\n+\t\t\tip_cksum -= 65535;\n+\t\tip_cksum = (~ip_cksum) & 0x0000FFFF;\n+\t\tif (ip_cksum == 0)\n+\t\t\tip_cksum = 0xFFFF;\n+\t\tip_hdr->hdr_checksum = (uint16_t)ip_cksum;\n+\n+\t\tfrag->data_len = sizeof(struct rte_ether_hdr) + pkt_len;\n+\t\tfrag->pkt_len = frag->data_len;\n+\t\tfrag->l2_len = sizeof(struct rte_ether_hdr);\n+\t\tfrag->l3_len = sizeof(struct rte_ipv4_hdr);\n+\t}\n+\n+\tif (fill_mode == FILL_MODE_RANDOM)\n+\t\trandomize_array_positions((void **)mbuf, nb_frags);\n+}\n+\n+static uint8_t\n+get_rand_frags(uint8_t max_frag)\n+{\n+\tuint8_t frags = rte_rand_max(max_frag + 1);\n+\n+\treturn frags <= 1 ? MIN_FRAGMENTS : frags;\n+}\n+\n+static int\n+ipv4_rand_frag_pkt_setup(uint8_t fill_mode, uint8_t max_frag)\n+{\n+\tuint8_t nb_frag;\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_FLOWS; i++) {\n+\t\tnb_frag = get_rand_frags(max_frag);\n+\t\tif (rte_mempool_get_bulk(pkt_pool, (void **)mbufs[i], nb_frag) <\n+\t\t    0)\n+\t\t\treturn TEST_FAILED;\n+\t\tipv4_frag_fill_data(mbufs[i], nb_frag, i, fill_mode);\n+\t\tfrag_per_flow[i] = nb_frag;\n+\t}\n+\tflow_cnt = i;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv4_frag_pkt_setup(uint8_t fill_mode, uint8_t nb_frag)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_FLOWS; i++) {\n+\t\tif (rte_mempool_get_bulk(pkt_pool, (void **)mbufs[i], nb_frag) <\n+\t\t    0)\n+\t\t\treturn TEST_FAILED;\n+\t\tipv4_frag_fill_data(mbufs[i], nb_frag, i, fill_mode);\n+\t\tfrag_per_flow[i] = nb_frag;\n+\t}\n+\tflow_cnt = i;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+ipv6_frag_fill_data(struct rte_mbuf **mbuf, uint8_t nb_frags, uint32_t flow_id,\n+\t\t    uint8_t fill_mode)\n+{\n+\tstruct ipv6_extension_fragment *frag_hdr;\n+\tstruct rte_ether_hdr *eth_hdr;\n+\tstruct rte_ipv6_hdr *ip_hdr;\n+\tstruct rte_udp_hdr *udp_hdr;\n+\tuint16_t frag_len;\n+\tuint8_t i;\n+\n+\tfrag_len = MAX_PKT_LEN / nb_frags;\n+\tif (frag_len % 8)\n+\t\tfrag_len = RTE_ALIGN_MUL_CEIL(frag_len, 8);\n+\n+\tfor (i = 0; i < nb_frags; i++) {\n+\t\tstruct rte_mbuf *frag = mbuf[i];\n+\t\tuint16_t frag_offset = 0;\n+\t\tuint16_t pkt_len;\n+\n+\t\tfrag_offset = i * (frag_len / 8);\n+\t\tfrag_offset <<= 3;\n+\t\tif (i == nb_frags - 1) {\n+\t\t\tfrag_len = MAX_PKT_LEN - (frag_len * (nb_frags - 1));\n+\t\t\tfrag_offset = RTE_IPV6_SET_FRAG_DATA(frag_offset, 0);\n+\t\t} else {\n+\t\t\tfrag_offset = RTE_IPV6_SET_FRAG_DATA(frag_offset, 1);\n+\t\t}\n+\n+\t\trte_pktmbuf_reset_headroom(frag);\n+\t\teth_hdr = rte_pktmbuf_mtod(frag, struct rte_ether_hdr *);\n+\t\tip_hdr = rte_pktmbuf_mtod_offset(frag, struct rte_ipv6_hdr *,\n+\t\t\t\t\t\t sizeof(struct rte_ether_hdr));\n+\t\tudp_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\tfrag, struct rte_udp_hdr *,\n+\t\t\tsizeof(struct rte_ether_hdr) +\n+\t\t\t\tsizeof(struct rte_ipv6_hdr) +\n+\t\t\t\tRTE_IPV6_FRAG_HDR_SIZE);\n+\t\tfrag_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\tfrag, struct ipv6_extension_fragment *,\n+\t\t\tsizeof(struct rte_ether_hdr) +\n+\t\t\t\tsizeof(struct rte_ipv6_hdr));\n+\n+\t\trte_ether_unformat_addr(\"02:00:00:00:00:01\",\n+\t\t\t\t\t&eth_hdr->dst_addr);\n+\t\trte_ether_unformat_addr(\"02:00:00:00:00:00\",\n+\t\t\t\t\t&eth_hdr->src_addr);\n+\t\teth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);\n+\n+\t\tpkt_len = frag_len;\n+\t\t/*\n+\t\t * Initialize UDP header.\n+\t\t */\n+\t\tif (i == 0) {\n+\t\t\tudp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);\n+\t\t\tudp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);\n+\t\t\tudp_hdr->dgram_len = rte_cpu_to_be_16(pkt_len);\n+\t\t\tudp_hdr->dgram_cksum = 0; /* No UDP checksum. */\n+\t\t}\n+\n+\t\t/*\n+\t\t * Initialize IP header.\n+\t\t */\n+\t\tpkt_len = (uint16_t)(pkt_len + sizeof(struct rte_ipv6_hdr) +\n+\t\t\t\t     RTE_IPV6_FRAG_HDR_SIZE);\n+\t\tip_hdr->vtc_flow = rte_cpu_to_be_32(IP6_VERSION << 28);\n+\t\tip_hdr->payload_len =\n+\t\t\trte_cpu_to_be_16(pkt_len - sizeof(struct rte_ipv6_hdr));\n+\t\tip_hdr->proto = IPPROTO_FRAGMENT;\n+\t\tip_hdr->hop_limits = IP_DEFTTL;\n+\t\tmemcpy(ip_hdr->src_addr, ip6_addr, sizeof(ip_hdr->src_addr));\n+\t\tmemcpy(ip_hdr->dst_addr, ip6_addr, sizeof(ip_hdr->dst_addr));\n+\t\tip_hdr->src_addr[7] = (flow_id >> 16) & 0xf;\n+\t\tip_hdr->src_addr[7] |= 0x10;\n+\t\tip_hdr->src_addr[8] = (flow_id >> 8) & 0xff;\n+\t\tip_hdr->src_addr[9] = flow_id & 0xff;\n+\n+\t\tip_hdr->dst_addr[7] = (flow_id >> 16) & 0xf;\n+\t\tip_hdr->dst_addr[7] |= 0x20;\n+\t\tip_hdr->dst_addr[8] = (flow_id >> 8) & 0xff;\n+\t\tip_hdr->dst_addr[9] = flow_id & 0xff;\n+\n+\t\tfrag_hdr->next_header = IPPROTO_UDP;\n+\t\tfrag_hdr->reserved = 0;\n+\t\tfrag_hdr->frag_data = rte_cpu_to_be_16(frag_offset);\n+\t\tfrag_hdr->id = rte_cpu_to_be_32(flow_id + 1);\n+\n+\t\tfrag->data_len = sizeof(struct rte_ether_hdr) + pkt_len;\n+\t\tfrag->pkt_len = frag->data_len;\n+\t\tfrag->l2_len = sizeof(struct rte_ether_hdr);\n+\t\tfrag->l3_len =\n+\t\t\tsizeof(struct rte_ipv6_hdr) + RTE_IPV6_FRAG_HDR_SIZE;\n+\t}\n+\n+\tif (fill_mode == FILL_MODE_RANDOM)\n+\t\trandomize_array_positions((void **)mbuf, nb_frags);\n+}\n+\n+static int\n+ipv6_rand_frag_pkt_setup(uint8_t fill_mode, uint8_t max_frag)\n+{\n+\tuint8_t nb_frag;\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_FLOWS; i++) {\n+\t\tnb_frag = get_rand_frags(max_frag);\n+\t\tif (rte_mempool_get_bulk(pkt_pool, (void **)mbufs[i], nb_frag) <\n+\t\t    0)\n+\t\t\treturn TEST_FAILED;\n+\t\tipv6_frag_fill_data(mbufs[i], nb_frag, i, fill_mode);\n+\t\tfrag_per_flow[i] = nb_frag;\n+\t}\n+\tflow_cnt = i;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv6_frag_pkt_setup(uint8_t fill_mode, uint8_t nb_frag)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_FLOWS; i++) {\n+\t\tif (rte_mempool_get_bulk(pkt_pool, (void **)mbufs[i], nb_frag) <\n+\t\t    0)\n+\t\t\treturn TEST_FAILED;\n+\t\tipv6_frag_fill_data(mbufs[i], nb_frag, i, fill_mode);\n+\t\tfrag_per_flow[i] = nb_frag;\n+\t}\n+\tflow_cnt = i;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+frag_pkt_teardown(void)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < flow_cnt; i++)\n+\t\trte_pktmbuf_free(mbufs[i][0]);\n+}\n+\n+static void\n+reassembly_print_stats(int8_t nb_frags, uint8_t fill_order,\n+\t\t       uint32_t outstanding, uint64_t cyc_per_flow,\n+\t\t       uint64_t cyc_per_frag_insert,\n+\t\t       uint64_t cyc_per_reassembly)\n+{\n+\tchar frag_str[8], order_str[12];\n+\n+\tif (nb_frags > 0)\n+\t\tsnprintf(frag_str, sizeof(frag_str), \"%d\", nb_frags);\n+\telse\n+\t\tsnprintf(frag_str, sizeof(frag_str), \"RANDOM\");\n+\n+\tswitch (fill_order) {\n+\tcase FILL_MODE_LINEAR:\n+\t\tsnprintf(order_str, sizeof(order_str), \"LINEAR\");\n+\t\tbreak;\n+\tcase FILL_MODE_RANDOM:\n+\t\tsnprintf(order_str, sizeof(order_str), \"RANDOM\");\n+\t\tbreak;\n+\tcase FILL_MODE_INTERLEAVED:\n+\t\tsnprintf(order_str, sizeof(order_str), \"INTERLEAVED\");\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tprintf(\"| %-14s | %-14s | %-11d | %-11\" PRIu64 \" | %-22\" PRIu64\n+\t       \" | %-17\" PRIu64 \" |\\n\",\n+\t       order_str, frag_str, outstanding, cyc_per_flow,\n+\t       cyc_per_frag_insert, cyc_per_reassembly);\n+\tprintf(\"+================+================+=============+=============+\"\n+\t       \"========================+===================+\\n\");\n+}\n+\n+static void\n+join_array(struct rte_mbuf **dest_arr, struct rte_mbuf **src_arr,\n+\t   uint8_t offset, uint8_t sz)\n+{\n+\tint i, j;\n+\n+\tfor (i = offset, j = 0; j < sz; i++, j++)\n+\t\tdest_arr[i] = src_arr[j];\n+}\n+\n+static int\n+ipv4_reassembly_perf(int8_t nb_frags, uint8_t fill_order)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j;\n+\n+\tfor (i = 0; i < flow_cnt; i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\t\tuint8_t reassembled = 0;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < frag_per_flow[i]; j++) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv4_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv4_hdr *, buf->l2_len);\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv4_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr);\n+\n+\t\t\tif (buf_out == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled = 1;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (!reassembled || buf_out->nb_segs != frag_per_flow[i])\n+\t\t\treturn TEST_FAILED;\n+\t\tmemset(mbufs[i], 0, sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\tmbufs[i][0] = buf_out;\n+\t}\n+\n+\treassembly_print_stats(nb_frags, fill_order, 0, total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv4_outstanding_reassembly_perf(int8_t nb_frags, uint8_t fill_order,\n+\t\t\t\t uint32_t outstanding)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j, k;\n+\n+\tk = outstanding;\n+\t/* Insert outstanding fragments */\n+\tfor (i = 0; k && (i < flow_cnt); i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = frag_per_flow[i] - 1; j > 0; j--) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv4_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv4_hdr *, buf->l2_len);\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv4_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr);\n+\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\tfrag_processed++;\n+\t\t\tif (buf_out != NULL)\n+\t\t\t\treturn TEST_FAILED;\n+\n+\t\t\tk--;\n+\t\t}\n+\t\tfrag_per_flow[i] = 1;\n+\t}\n+\n+\tfor (i = 0; i < flow_cnt; i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\t\tuint8_t reassembled = 0;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < frag_per_flow[i]; j++) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv4_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv4_hdr *, buf->l2_len);\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv4_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr);\n+\n+\t\t\tif (buf_out == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled = 1;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (!reassembled)\n+\t\t\treturn TEST_FAILED;\n+\t\tmemset(mbufs[i], 0, sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\tmbufs[i][0] = buf_out;\n+\t}\n+\n+\treassembly_print_stats(nb_frags, fill_order, outstanding,\n+\t\t\t       total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv4_reassembly_interleaved_flows_perf(uint8_t nb_frags)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j;\n+\n+\tfor (i = 0; i < flow_cnt; i += 4) {\n+\t\tstruct rte_mbuf *buf_out[4] = {NULL};\n+\t\tuint8_t reassembled = 0;\n+\t\tuint8_t nb_frags = 0;\n+\t\tuint8_t prev = 0;\n+\n+\t\tfor (j = 0; j < 4; j++)\n+\t\t\tnb_frags += frag_per_flow[i + j];\n+\n+\t\tstruct rte_mbuf *buf_arr[nb_frags];\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tjoin_array(buf_arr, mbufs[i + j], prev,\n+\t\t\t\t   frag_per_flow[i + j]);\n+\t\t\tprev += frag_per_flow[i + j];\n+\t\t}\n+\t\trandomize_array_positions((void **)buf_arr, nb_frags);\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < nb_frags; j++) {\n+\t\t\tstruct rte_mbuf *buf = buf_arr[j];\n+\t\t\tstruct rte_ipv4_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv4_hdr *, buf->l2_len);\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out[reassembled] = rte_ipv4_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr);\n+\n+\t\t\tif (buf_out[reassembled] == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled++;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (reassembled != 4)\n+\t\t\treturn TEST_FAILED;\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tmemset(mbufs[i + j], 0,\n+\t\t\t       sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\t\tmbufs[i + j][0] = buf_out[j];\n+\t\t}\n+\t}\n+\n+\treassembly_print_stats(nb_frags, FILL_MODE_INTERLEAVED, 0,\n+\t\t\t       total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv6_reassembly_perf(int8_t nb_frags, uint8_t fill_order)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j;\n+\n+\tfor (i = 0; i < flow_cnt; i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\t\tuint8_t reassembled = 0;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < frag_per_flow[i]; j++) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv6_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv6_hdr *, buf->l2_len);\n+\t\t\tstruct ipv6_extension_fragment *frag_hdr =\n+\t\t\t\trte_pktmbuf_mtod_offset(\n+\t\t\t\t\tbuf, struct ipv6_extension_fragment *,\n+\t\t\t\t\tbuf->l2_len +\n+\t\t\t\t\t\tsizeof(struct rte_ipv6_hdr));\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv6_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr,\n+\t\t\t\tfrag_hdr);\n+\n+\t\t\tif (buf_out == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled = 1;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (!reassembled || buf_out->nb_segs != frag_per_flow[i])\n+\t\t\treturn TEST_FAILED;\n+\t\tmemset(mbufs[i], 0, sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\tmbufs[i][0] = buf_out;\n+\t}\n+\n+\treassembly_print_stats(nb_frags, fill_order, 0, total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv6_outstanding_reassembly_perf(int8_t nb_frags, uint8_t fill_order,\n+\t\t\t\t uint32_t outstanding)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j, k;\n+\n+\tk = outstanding;\n+\t/* Insert outstanding fragments */\n+\tfor (i = 0; k && (i < flow_cnt); i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = frag_per_flow[i] - 1; j > 0; j--) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv6_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv6_hdr *, buf->l2_len);\n+\t\t\tstruct ipv6_extension_fragment *frag_hdr =\n+\t\t\t\trte_pktmbuf_mtod_offset(\n+\t\t\t\t\tbuf, struct ipv6_extension_fragment *,\n+\t\t\t\t\tbuf->l2_len +\n+\t\t\t\t\t\tsizeof(struct rte_ipv6_hdr));\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv6_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr,\n+\t\t\t\tfrag_hdr);\n+\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\tfrag_processed++;\n+\n+\t\t\tif (buf_out != NULL)\n+\t\t\t\treturn TEST_FAILED;\n+\n+\t\t\tk--;\n+\t\t}\n+\t\tfrag_per_flow[i] = 1;\n+\t}\n+\n+\tfor (i = 0; i < flow_cnt; i++) {\n+\t\tstruct rte_mbuf *buf_out = NULL;\n+\t\tuint8_t reassembled = 0;\n+\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < frag_per_flow[i]; j++) {\n+\t\t\tstruct rte_mbuf *buf = mbufs[i][j];\n+\t\t\tstruct rte_ipv6_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv6_hdr *, buf->l2_len);\n+\t\t\tstruct ipv6_extension_fragment *frag_hdr =\n+\t\t\t\trte_pktmbuf_mtod_offset(\n+\t\t\t\t\tbuf, struct ipv6_extension_fragment *,\n+\t\t\t\t\tbuf->l2_len +\n+\t\t\t\t\t\tsizeof(struct rte_ipv6_hdr));\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out = rte_ipv6_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr,\n+\t\t\t\tfrag_hdr);\n+\n+\t\t\tif (buf_out == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled = 1;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (!reassembled)\n+\t\t\treturn TEST_FAILED;\n+\t\tmemset(mbufs[i], 0, sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\tmbufs[i][0] = buf_out;\n+\t}\n+\n+\treassembly_print_stats(nb_frags, fill_order, outstanding,\n+\t\t\t       total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv6_reassembly_interleaved_flows_perf(int8_t nb_frags)\n+{\n+\tstruct rte_ip_frag_death_row death_row;\n+\tuint64_t total_reassembled_cyc = 0;\n+\tuint64_t total_empty_cyc = 0;\n+\tuint64_t tstamp, flow_tstamp;\n+\tuint64_t frag_processed = 0;\n+\tuint64_t total_cyc = 0;\n+\tuint32_t i, j;\n+\n+\tfor (i = 0; i < flow_cnt; i += 4) {\n+\t\tstruct rte_mbuf *buf_out[4] = {NULL};\n+\t\tuint8_t reassembled = 0;\n+\t\tuint8_t nb_frags = 0;\n+\t\tuint8_t prev = 0;\n+\n+\t\tfor (j = 0; j < 4; j++)\n+\t\t\tnb_frags += frag_per_flow[i + j];\n+\n+\t\tstruct rte_mbuf *buf_arr[nb_frags];\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tjoin_array(buf_arr, mbufs[i + j], prev,\n+\t\t\t\t   frag_per_flow[i + j]);\n+\t\t\tprev += frag_per_flow[i + j];\n+\t\t}\n+\t\trandomize_array_positions((void **)buf_arr, nb_frags);\n+\t\tflow_tstamp = rte_rdtsc_precise();\n+\t\tfor (j = 0; j < nb_frags; j++) {\n+\t\t\tstruct rte_mbuf *buf = buf_arr[j];\n+\t\t\tstruct rte_ipv6_hdr *ip_hdr = rte_pktmbuf_mtod_offset(\n+\t\t\t\tbuf, struct rte_ipv6_hdr *, buf->l2_len);\n+\t\t\tstruct ipv6_extension_fragment *frag_hdr =\n+\t\t\t\trte_pktmbuf_mtod_offset(\n+\t\t\t\t\tbuf, struct ipv6_extension_fragment *,\n+\t\t\t\t\tbuf->l2_len +\n+\t\t\t\t\t\tsizeof(struct rte_ipv6_hdr));\n+\n+\t\t\ttstamp = rte_rdtsc_precise();\n+\t\t\tbuf_out[reassembled] = rte_ipv6_frag_reassemble_packet(\n+\t\t\t\tfrag_tbl, &death_row, buf, flow_tstamp, ip_hdr,\n+\t\t\t\tfrag_hdr);\n+\n+\t\t\tif (buf_out[reassembled] == NULL) {\n+\t\t\t\ttotal_empty_cyc += rte_rdtsc_precise() - tstamp;\n+\t\t\t\tfrag_processed++;\n+\t\t\t\tcontinue;\n+\t\t\t} else {\n+\t\t\t\t/*Packet out*/\n+\t\t\t\ttotal_reassembled_cyc +=\n+\t\t\t\t\trte_rdtsc_precise() - tstamp;\n+\t\t\t\treassembled++;\n+\t\t\t}\n+\t\t}\n+\t\ttotal_cyc += rte_rdtsc_precise() - flow_tstamp;\n+\t\tif (reassembled != 4)\n+\t\t\treturn TEST_FAILED;\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tmemset(mbufs[i + j], 0,\n+\t\t\t       sizeof(struct rte_mbuf *) * MAX_FRAGMENTS);\n+\t\t\tmbufs[i + j][0] = buf_out[j];\n+\t\t}\n+\t}\n+\n+\treassembly_print_stats(nb_frags, FILL_MODE_INTERLEAVED, 0,\n+\t\t\t       total_cyc / flow_cnt,\n+\t\t\t       total_empty_cyc / frag_processed,\n+\t\t\t       total_reassembled_cyc / flow_cnt);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+ipv4_reassembly_test(int8_t nb_frags, uint8_t fill_order, uint32_t outstanding)\n+{\n+\tint rc;\n+\n+\tif (nb_frags > 0)\n+\t\trc = ipv4_frag_pkt_setup(fill_order, nb_frags);\n+\telse\n+\t\trc = ipv4_rand_frag_pkt_setup(fill_order, MAX_FRAGMENTS);\n+\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (outstanding)\n+\t\trc = ipv4_outstanding_reassembly_perf(nb_frags, fill_order,\n+\t\t\t\t\t\t      outstanding);\n+\telse if (fill_order == FILL_MODE_INTERLEAVED)\n+\t\trc = ipv4_reassembly_interleaved_flows_perf(nb_frags);\n+\telse\n+\t\trc = ipv4_reassembly_perf(nb_frags, fill_order);\n+\n+\tfrag_pkt_teardown();\n+\n+\treturn rc;\n+}\n+\n+static int\n+ipv6_reassembly_test(int8_t nb_frags, uint8_t fill_order, uint32_t outstanding)\n+{\n+\tint rc;\n+\n+\tif (nb_frags > 0)\n+\t\trc = ipv6_frag_pkt_setup(fill_order, nb_frags);\n+\telse\n+\t\trc = ipv6_rand_frag_pkt_setup(fill_order, MAX_FRAGMENTS);\n+\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (outstanding)\n+\t\trc = ipv6_outstanding_reassembly_perf(nb_frags, fill_order,\n+\t\t\t\t\t\t      outstanding);\n+\telse if (fill_order == FILL_MODE_INTERLEAVED)\n+\t\trc = ipv6_reassembly_interleaved_flows_perf(nb_frags);\n+\telse\n+\t\trc = ipv6_reassembly_perf(nb_frags, fill_order);\n+\n+\tfrag_pkt_teardown();\n+\n+\treturn rc;\n+}\n+\n+static int\n+test_reassembly_perf(void)\n+{\n+\tint8_t nb_fragments[] = {2, 3, MAX_FRAGMENTS, -1 /* Random */};\n+\tuint8_t order_type[] = {FILL_MODE_LINEAR, FILL_MODE_RANDOM};\n+\tuint32_t outstanding[] = {100, 500, 1000, 2000, 3000};\n+\tuint32_t i, j;\n+\tint rc;\n+\n+\trc = reassembly_test_setup();\n+\tif (rc)\n+\t\treturn rc;\n+\n+\treassembly_print_banner(\"IPV4\");\n+\t/* Test variable fragment count and ordering. */\n+\tfor (i = 0; i < RTE_DIM(nb_fragments); i++) {\n+\t\tfor (j = 0; j < RTE_DIM(order_type); j++) {\n+\t\t\trc = ipv4_reassembly_test(nb_fragments[i],\n+\t\t\t\t\t\t  order_type[j], 0);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Test outstanding fragments in the table. */\n+\tfor (i = 0; i < RTE_DIM(outstanding); i++) {\n+\t\trc = ipv4_reassembly_test(2, 0, outstanding[i]);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\tfor (i = 0; i < RTE_DIM(outstanding); i++) {\n+\t\trc = ipv4_reassembly_test(MAX_FRAGMENTS, 0, outstanding[i]);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\n+\t/* Test interleaved flow reassembly perf */\n+\tfor (i = 0; i < RTE_DIM(nb_fragments); i++) {\n+\t\trc = ipv4_reassembly_test(nb_fragments[i],\n+\t\t\t\t\t  FILL_MODE_INTERLEAVED, 0);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\tprintf(\"\\n\");\n+\treassembly_print_banner(\"IPV6\");\n+\t/* Test variable fragment count and ordering. */\n+\tfor (i = 0; i < RTE_DIM(nb_fragments); i++) {\n+\t\tfor (j = 0; j < RTE_DIM(order_type); j++) {\n+\t\t\trc = ipv6_reassembly_test(nb_fragments[i],\n+\t\t\t\t\t\t  order_type[j], 0);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Test outstanding fragments in the table. */\n+\tfor (i = 0; i < RTE_DIM(outstanding); i++) {\n+\t\trc = ipv6_reassembly_test(2, 0, outstanding[i]);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\n+\tfor (i = 0; i < RTE_DIM(outstanding); i++) {\n+\t\trc = ipv6_reassembly_test(MAX_FRAGMENTS, 0, outstanding[i]);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\n+\t/* Test interleaved flow reassembly perf */\n+\tfor (i = 0; i < RTE_DIM(nb_fragments); i++) {\n+\t\trc = ipv6_reassembly_test(nb_fragments[i],\n+\t\t\t\t\t  FILL_MODE_INTERLEAVED, 0);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t}\n+\treassembly_test_teardown();\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(reassembly_perf_autotest, test_reassembly_perf);\n",
    "prefixes": [
        "v6",
        "2/2"
    ]
}