get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/64571/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 64571,
    "url": "http://patches.dpdk.org/api/patches/64571/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200113172518.37815-5-honnappa.nagarahalli@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200113172518.37815-5-honnappa.nagarahalli@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200113172518.37815-5-honnappa.nagarahalli@arm.com",
    "date": "2020-01-13T17:25:16",
    "name": "[v8,4/6] test/ring: modify perf test cases to use rte_ring_xxx_elem APIs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f9ebfb4556d768c4f44add044bf43c8676395a5d",
    "submitter": {
        "id": 1045,
        "url": "http://patches.dpdk.org/api/people/1045/?format=api",
        "name": "Honnappa Nagarahalli",
        "email": "honnappa.nagarahalli@arm.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200113172518.37815-5-honnappa.nagarahalli@arm.com/mbox/",
    "series": [
        {
            "id": 8086,
            "url": "http://patches.dpdk.org/api/series/8086/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8086",
            "date": "2020-01-13T17:25:12",
            "name": "lib/ring: APIs to support custom element size",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/8086/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/64571/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/64571/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F2E9BA04F1;\n\tMon, 13 Jan 2020 18:26:23 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2379F1D37E;\n\tMon, 13 Jan 2020 18:25:50 +0100 (CET)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by dpdk.org (Postfix) with ESMTP id 534D61D171\n for <dev@dpdk.org>; Mon, 13 Jan 2020 18:25:40 +0100 (CET)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id C792C1435;\n Mon, 13 Jan 2020 09:25:39 -0800 (PST)",
            "from qc2400f-1.austin.arm.com (qc2400f-1.austin.arm.com\n [10.118.14.48])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id ADE8F3F85E;\n Mon, 13 Jan 2020 09:25:39 -0800 (PST)"
        ],
        "From": "Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>",
        "To": "olivier.matz@6wind.com, sthemmin@microsoft.com, jerinj@marvell.com,\n bruce.richardson@intel.com, david.marchand@redhat.com,\n pbhagavatula@marvell.com, konstantin.ananyev@intel.com,\n honnappa.nagarahalli@arm.com",
        "Cc": "dev@dpdk.org, dharmik.thakkar@arm.com, ruifeng.wang@arm.com,\n gavin.hu@arm.com, nd@arm.com",
        "Date": "Mon, 13 Jan 2020 11:25:16 -0600",
        "Message-Id": "<20200113172518.37815-5-honnappa.nagarahalli@arm.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200113172518.37815-1-honnappa.nagarahalli@arm.com>",
        "References": "<20190906190510.11146-1-honnappa.nagarahalli@arm.com>\n <20200113172518.37815-1-honnappa.nagarahalli@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v8 4/6] test/ring: modify perf test cases to use\n\trte_ring_xxx_elem APIs",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adjust the performance test cases to test rte_ring_xxx_elem APIs.\n\nSigned-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nReviewed-by: Gavin Hu <gavin.hu@arm.com>\n---\n app/test/test_ring_perf.c | 454 +++++++++++++++++++++++---------------\n 1 file changed, 273 insertions(+), 181 deletions(-)",
    "diff": "diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c\nindex 6c2aca483..8d1217951 100644\n--- a/app/test/test_ring_perf.c\n+++ b/app/test/test_ring_perf.c\n@@ -13,6 +13,7 @@\n #include <string.h>\n \n #include \"test.h\"\n+#include \"test_ring.h\"\n \n /*\n  * Ring\n@@ -41,6 +42,35 @@ struct lcore_pair {\n \n static volatile unsigned lcore_count = 0;\n \n+static void\n+test_ring_print_test_string(unsigned int api_type, int esize,\n+\tunsigned int bsz, double value)\n+{\n+\tif (esize == -1)\n+\t\tprintf(\"legacy APIs\");\n+\telse\n+\t\tprintf(\"elem APIs: element size %dB\", esize);\n+\n+\tif (api_type == TEST_RING_IGNORE_API_TYPE)\n+\t\treturn;\n+\n+\tif ((api_type & TEST_RING_THREAD_DEF) == TEST_RING_THREAD_DEF)\n+\t\tprintf(\": default enqueue/dequeue: \");\n+\telse if ((api_type & TEST_RING_THREAD_SPSC) == TEST_RING_THREAD_SPSC)\n+\t\tprintf(\": SP/SC: \");\n+\telse if ((api_type & TEST_RING_THREAD_MPMC) == TEST_RING_THREAD_MPMC)\n+\t\tprintf(\": MP/MC: \");\n+\n+\tif ((api_type & TEST_RING_ELEM_SINGLE) == TEST_RING_ELEM_SINGLE)\n+\t\tprintf(\"single: \");\n+\telse if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)\n+\t\tprintf(\"bulk (size: %u): \", bsz);\n+\telse if ((api_type & TEST_RING_ELEM_BURST) == TEST_RING_ELEM_BURST)\n+\t\tprintf(\"burst (size: %u): \", bsz);\n+\n+\tprintf(\"%.2F\\n\", value);\n+}\n+\n /**** Functions to analyse our core mask to get cores for different tests ***/\n \n static int\n@@ -117,27 +147,21 @@ get_two_sockets(struct lcore_pair *lcp)\n \n /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */\n static void\n-test_empty_dequeue(struct rte_ring *r)\n+test_empty_dequeue(struct rte_ring *r, const int esize,\n+\t\t\tconst unsigned int api_type)\n {\n-\tconst unsigned iter_shift = 26;\n-\tconst unsigned iterations = 1<<iter_shift;\n-\tunsigned i = 0;\n+\tconst unsigned int iter_shift = 26;\n+\tconst unsigned int iterations = 1 << iter_shift;\n+\tunsigned int i = 0;\n \tvoid *burst[MAX_BURST];\n \n-\tconst uint64_t sc_start = rte_rdtsc();\n+\tconst uint64_t start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\trte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);\n-\tconst uint64_t sc_end = rte_rdtsc();\n+\t\ttest_ring_dequeue(r, burst, esize, bulk_sizes[0], api_type);\n+\tconst uint64_t end = rte_rdtsc();\n \n-\tconst uint64_t mc_start = rte_rdtsc();\n-\tfor (i = 0; i < iterations; i++)\n-\t\trte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);\n-\tconst uint64_t mc_end = rte_rdtsc();\n-\n-\tprintf(\"SC empty dequeue: %.2F\\n\",\n-\t\t\t(double)(sc_end-sc_start) / iterations);\n-\tprintf(\"MC empty dequeue: %.2F\\n\",\n-\t\t\t(double)(mc_end-mc_start) / iterations);\n+\ttest_ring_print_test_string(api_type, esize, bulk_sizes[0],\n+\t\t\t\t\t((double)(end - start)) / iterations);\n }\n \n /*\n@@ -151,19 +175,21 @@ struct thread_params {\n };\n \n /*\n- * Function that uses rdtsc to measure timing for ring enqueue. Needs pair\n- * thread running dequeue_bulk function\n+ * Helper function to call bulk SP/MP enqueue functions.\n+ * flag == 0 -> enqueue\n+ * flag == 1 -> dequeue\n  */\n-static int\n-enqueue_bulk(void *p)\n+static __rte_always_inline int\n+enqueue_dequeue_bulk_helper(const unsigned int flag, const int esize,\n+\tstruct thread_params *p)\n {\n-\tconst unsigned iter_shift = 23;\n-\tconst unsigned iterations = 1<<iter_shift;\n-\tstruct thread_params *params = p;\n-\tstruct rte_ring *r = params->r;\n-\tconst unsigned size = params->size;\n-\tunsigned i;\n-\tvoid *burst[MAX_BURST] = {0};\n+\tint ret;\n+\tconst unsigned int iter_shift = 23;\n+\tconst unsigned int iterations = 1 << iter_shift;\n+\tstruct rte_ring *r = p->r;\n+\tunsigned int bsize = p->size;\n+\tunsigned int i;\n+\tvoid *burst = NULL;\n \n #ifdef RTE_USE_C11_MEM_MODEL\n \tif (__atomic_add_fetch(&lcore_count, 1, __ATOMIC_RELAXED) != 2)\n@@ -173,23 +199,67 @@ enqueue_bulk(void *p)\n \t\twhile(lcore_count != 2)\n \t\t\trte_pause();\n \n+\tburst = test_ring_calloc(MAX_BURST, esize);\n+\tif (burst == NULL)\n+\t\treturn -1;\n+\n \tconst uint64_t sp_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)\n-\t\t\trte_pause();\n+\t\tdo {\n+\t\t\tif (flag == 0)\n+\t\t\t\tret = test_ring_enqueue(r, burst, esize, bsize,\n+\t\t\t\t\t\tTEST_RING_THREAD_SPSC |\n+\t\t\t\t\t\tTEST_RING_ELEM_BULK);\n+\t\t\telse if (flag == 1)\n+\t\t\t\tret = test_ring_dequeue(r, burst, esize, bsize,\n+\t\t\t\t\t\tTEST_RING_THREAD_SPSC |\n+\t\t\t\t\t\tTEST_RING_ELEM_BULK);\n+\t\t\tif (ret == 0)\n+\t\t\t\trte_pause();\n+\t\t} while (!ret);\n \tconst uint64_t sp_end = rte_rdtsc();\n \n \tconst uint64_t mp_start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)\n-\t\t\trte_pause();\n+\t\tdo {\n+\t\t\tif (flag == 0)\n+\t\t\t\tret = test_ring_enqueue(r, burst, esize, bsize,\n+\t\t\t\t\t\tTEST_RING_THREAD_MPMC |\n+\t\t\t\t\t\tTEST_RING_ELEM_BULK);\n+\t\t\telse if (flag == 1)\n+\t\t\t\tret = test_ring_dequeue(r, burst, esize, bsize,\n+\t\t\t\t\t\tTEST_RING_THREAD_MPMC |\n+\t\t\t\t\t\tTEST_RING_ELEM_BULK);\n+\t\t\tif (ret == 0)\n+\t\t\t\trte_pause();\n+\t\t} while (!ret);\n \tconst uint64_t mp_end = rte_rdtsc();\n \n-\tparams->spsc = ((double)(sp_end - sp_start))/(iterations*size);\n-\tparams->mpmc = ((double)(mp_end - mp_start))/(iterations*size);\n+\tp->spsc = ((double)(sp_end - sp_start))/(iterations * bsize);\n+\tp->mpmc = ((double)(mp_end - mp_start))/(iterations * bsize);\n \treturn 0;\n }\n \n+/*\n+ * Function that uses rdtsc to measure timing for ring enqueue. Needs pair\n+ * thread running dequeue_bulk function\n+ */\n+static int\n+enqueue_bulk(void *p)\n+{\n+\tstruct thread_params *params = p;\n+\n+\treturn enqueue_dequeue_bulk_helper(0, -1, params);\n+}\n+\n+static int\n+enqueue_bulk_16B(void *p)\n+{\n+\tstruct thread_params *params = p;\n+\n+\treturn enqueue_dequeue_bulk_helper(0, 16, params);\n+}\n+\n /*\n  * Function that uses rdtsc to measure timing for ring dequeue. Needs pair\n  * thread running enqueue_bulk function\n@@ -197,49 +267,38 @@ enqueue_bulk(void *p)\n static int\n dequeue_bulk(void *p)\n {\n-\tconst unsigned iter_shift = 23;\n-\tconst unsigned iterations = 1<<iter_shift;\n \tstruct thread_params *params = p;\n-\tstruct rte_ring *r = params->r;\n-\tconst unsigned size = params->size;\n-\tunsigned i;\n-\tvoid *burst[MAX_BURST] = {0};\n-\n-#ifdef RTE_USE_C11_MEM_MODEL\n-\tif (__atomic_add_fetch(&lcore_count, 1, __ATOMIC_RELAXED) != 2)\n-#else\n-\tif (__sync_add_and_fetch(&lcore_count, 1) != 2)\n-#endif\n-\t\twhile(lcore_count != 2)\n-\t\t\trte_pause();\n \n-\tconst uint64_t sc_start = rte_rdtsc();\n-\tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)\n-\t\t\trte_pause();\n-\tconst uint64_t sc_end = rte_rdtsc();\n+\treturn enqueue_dequeue_bulk_helper(1, -1, params);\n+}\n \n-\tconst uint64_t mc_start = rte_rdtsc();\n-\tfor (i = 0; i < iterations; i++)\n-\t\twhile (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)\n-\t\t\trte_pause();\n-\tconst uint64_t mc_end = rte_rdtsc();\n+static int\n+dequeue_bulk_16B(void *p)\n+{\n+\tstruct thread_params *params = p;\n \n-\tparams->spsc = ((double)(sc_end - sc_start))/(iterations*size);\n-\tparams->mpmc = ((double)(mc_end - mc_start))/(iterations*size);\n-\treturn 0;\n+\treturn enqueue_dequeue_bulk_helper(1, 16, params);\n }\n \n /*\n  * Function that calls the enqueue and dequeue bulk functions on pairs of cores.\n  * used to measure ring perf between hyperthreads, cores and sockets.\n  */\n-static void\n-run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,\n-\t\tlcore_function_t f1, lcore_function_t f2)\n+static int\n+run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r, const int esize)\n {\n+\tlcore_function_t *f1, *f2;\n \tstruct thread_params param1 = {0}, param2 = {0};\n \tunsigned i;\n+\n+\tif (esize == -1) {\n+\t\tf1 = enqueue_bulk;\n+\t\tf2 = dequeue_bulk;\n+\t} else {\n+\t\tf1 = enqueue_bulk_16B;\n+\t\tf2 = dequeue_bulk_16B;\n+\t}\n+\n \tfor (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {\n \t\tlcore_count = 0;\n \t\tparam1.size = param2.size = bulk_sizes[i];\n@@ -251,14 +310,20 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,\n \t\t} else {\n \t\t\trte_eal_remote_launch(f1, &param1, cores->c1);\n \t\t\trte_eal_remote_launch(f2, &param2, cores->c2);\n-\t\t\trte_eal_wait_lcore(cores->c1);\n-\t\t\trte_eal_wait_lcore(cores->c2);\n+\t\t\tif (rte_eal_wait_lcore(cores->c1) < 0)\n+\t\t\t\treturn -1;\n+\t\t\tif (rte_eal_wait_lcore(cores->c2) < 0)\n+\t\t\t\treturn -1;\n \t\t}\n-\t\tprintf(\"SP/SC bulk enq/dequeue (size: %u): %.2F\\n\", bulk_sizes[i],\n-\t\t\t\tparam1.spsc + param2.spsc);\n-\t\tprintf(\"MP/MC bulk enq/dequeue (size: %u): %.2F\\n\", bulk_sizes[i],\n-\t\t\t\tparam1.mpmc + param2.mpmc);\n+\t\ttest_ring_print_test_string(\n+\t\t\tTEST_RING_THREAD_SPSC | TEST_RING_ELEM_BULK,\n+\t\t\tesize, bulk_sizes[i], param1.spsc + param2.spsc);\n+\t\ttest_ring_print_test_string(\n+\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK,\n+\t\t\tesize, bulk_sizes[i], param1.mpmc + param2.mpmc);\n \t}\n+\n+\treturn 0;\n }\n \n static rte_atomic32_t synchro;\n@@ -267,7 +332,7 @@ static uint64_t queue_count[RTE_MAX_LCORE];\n #define TIME_MS 100\n \n static int\n-load_loop_fn(void *p)\n+load_loop_fn_helper(struct thread_params *p, const int esize)\n {\n \tuint64_t time_diff = 0;\n \tuint64_t begin = 0;\n@@ -275,7 +340,11 @@ load_loop_fn(void *p)\n \tuint64_t lcount = 0;\n \tconst unsigned int lcore = rte_lcore_id();\n \tstruct thread_params *params = p;\n-\tvoid *burst[MAX_BURST] = {0};\n+\tvoid *burst = NULL;\n+\n+\tburst = test_ring_calloc(MAX_BURST, esize);\n+\tif (burst == NULL)\n+\t\treturn -1;\n \n \t/* wait synchro for slaves */\n \tif (lcore != rte_get_master_lcore())\n@@ -284,22 +353,49 @@ load_loop_fn(void *p)\n \n \tbegin = rte_get_timer_cycles();\n \twhile (time_diff < hz * TIME_MS / 1000) {\n-\t\trte_ring_mp_enqueue_bulk(params->r, burst, params->size, NULL);\n-\t\trte_ring_mc_dequeue_bulk(params->r, burst, params->size, NULL);\n+\t\ttest_ring_enqueue(params->r, burst, esize, params->size,\n+\t\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK);\n+\t\ttest_ring_dequeue(params->r, burst, esize, params->size,\n+\t\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK);\n \t\tlcount++;\n \t\ttime_diff = rte_get_timer_cycles() - begin;\n \t}\n \tqueue_count[lcore] = lcount;\n+\n+\trte_free(burst);\n+\n \treturn 0;\n }\n \n static int\n-run_on_all_cores(struct rte_ring *r)\n+load_loop_fn(void *p)\n+{\n+\tstruct thread_params *params = p;\n+\n+\treturn load_loop_fn_helper(params, -1);\n+}\n+\n+static int\n+load_loop_fn_16B(void *p)\n+{\n+\tstruct thread_params *params = p;\n+\n+\treturn load_loop_fn_helper(params, 16);\n+}\n+\n+static int\n+run_on_all_cores(struct rte_ring *r, const int esize)\n {\n \tuint64_t total = 0;\n \tstruct thread_params param;\n+\tlcore_function_t *lcore_f;\n \tunsigned int i, c;\n \n+\tif (esize == -1)\n+\t\tlcore_f = load_loop_fn;\n+\telse\n+\t\tlcore_f = load_loop_fn_16B;\n+\n \tmemset(&param, 0, sizeof(struct thread_params));\n \tfor (i = 0; i < RTE_DIM(bulk_sizes); i++) {\n \t\tprintf(\"\\nBulk enq/dequeue count on size %u\\n\", bulk_sizes[i]);\n@@ -308,13 +404,12 @@ run_on_all_cores(struct rte_ring *r)\n \n \t\t/* clear synchro and start slaves */\n \t\trte_atomic32_set(&synchro, 0);\n-\t\tif (rte_eal_mp_remote_launch(load_loop_fn, &param,\n-\t\t\tSKIP_MASTER) < 0)\n+\t\tif (rte_eal_mp_remote_launch(lcore_f, &param, SKIP_MASTER) < 0)\n \t\t\treturn -1;\n \n \t\t/* start synchro and launch test on master */\n \t\trte_atomic32_set(&synchro, 1);\n-\t\tload_loop_fn(&param);\n+\t\tlcore_f(&param);\n \n \t\trte_eal_mp_wait_lcore();\n \n@@ -335,155 +430,152 @@ run_on_all_cores(struct rte_ring *r)\n  * Test function that determines how long an enqueue + dequeue of a single item\n  * takes on a single lcore. Result is for comparison with the bulk enq+deq.\n  */\n-static void\n-test_single_enqueue_dequeue(struct rte_ring *r)\n+static int\n+test_single_enqueue_dequeue(struct rte_ring *r, const int esize,\n+\tconst unsigned int api_type)\n {\n-\tconst unsigned iter_shift = 24;\n-\tconst unsigned iterations = 1<<iter_shift;\n-\tunsigned i = 0;\n+\tconst unsigned int iter_shift = 24;\n+\tconst unsigned int iterations = 1 << iter_shift;\n+\tunsigned int i = 0;\n \tvoid *burst = NULL;\n \n-\tconst uint64_t sc_start = rte_rdtsc();\n-\tfor (i = 0; i < iterations; i++) {\n-\t\trte_ring_sp_enqueue(r, burst);\n-\t\trte_ring_sc_dequeue(r, &burst);\n-\t}\n-\tconst uint64_t sc_end = rte_rdtsc();\n+\t/* alloc dummy object pointers */\n+\tburst = test_ring_calloc(1, esize);\n+\tif (burst == NULL)\n+\t\treturn -1;\n \n-\tconst uint64_t mc_start = rte_rdtsc();\n+\tconst uint64_t start = rte_rdtsc();\n \tfor (i = 0; i < iterations; i++) {\n-\t\trte_ring_mp_enqueue(r, burst);\n-\t\trte_ring_mc_dequeue(r, &burst);\n+\t\ttest_ring_enqueue(r, burst, esize, 1, api_type);\n+\t\ttest_ring_dequeue(r, burst, esize, 1, api_type);\n \t}\n-\tconst uint64_t mc_end = rte_rdtsc();\n+\tconst uint64_t end = rte_rdtsc();\n+\n+\ttest_ring_print_test_string(api_type, esize, 1,\n+\t\t\t\t\t((double)(end - start)) / iterations);\n \n-\tprintf(\"SP/SC single enq/dequeue: %.2F\\n\",\n-\t\t\t((double)(sc_end-sc_start)) / iterations);\n-\tprintf(\"MP/MC single enq/dequeue: %.2F\\n\",\n-\t\t\t((double)(mc_end-mc_start)) / iterations);\n+\trte_free(burst);\n+\n+\treturn 0;\n }\n \n /*\n- * Test that does both enqueue and dequeue on a core using the burst() API calls\n- * instead of the bulk() calls used in other tests. Results should be the same\n- * as for the bulk function called on a single lcore.\n+ * Test that does both enqueue and dequeue on a core using the burst/bulk API\n+ * calls Results should be the same as for the bulk function called on a\n+ * single lcore.\n  */\n-static void\n-test_burst_enqueue_dequeue(struct rte_ring *r)\n+static int\n+test_burst_bulk_enqueue_dequeue(struct rte_ring *r, const int esize,\n+\tconst unsigned int api_type)\n {\n-\tconst unsigned iter_shift = 23;\n-\tconst unsigned iterations = 1<<iter_shift;\n-\tunsigned sz, i = 0;\n-\tvoid *burst[MAX_BURST] = {0};\n+\tconst unsigned int iter_shift = 23;\n+\tconst unsigned int iterations = 1 << iter_shift;\n+\tunsigned int sz, i = 0;\n+\tvoid **burst = NULL;\n \n-\tfor (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {\n-\t\tconst uint64_t sc_start = rte_rdtsc();\n-\t\tfor (i = 0; i < iterations; i++) {\n-\t\t\trte_ring_sp_enqueue_burst(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t\trte_ring_sc_dequeue_burst(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t}\n-\t\tconst uint64_t sc_end = rte_rdtsc();\n+\tburst = test_ring_calloc(MAX_BURST, esize);\n+\tif (burst == NULL)\n+\t\treturn -1;\n \n-\t\tconst uint64_t mc_start = rte_rdtsc();\n+\tfor (sz = 0; sz < RTE_DIM(bulk_sizes); sz++) {\n+\t\tconst uint64_t start = rte_rdtsc();\n \t\tfor (i = 0; i < iterations; i++) {\n-\t\t\trte_ring_mp_enqueue_burst(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t\trte_ring_mc_dequeue_burst(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n+\t\t\ttest_ring_enqueue(r, burst, esize, bulk_sizes[sz],\n+\t\t\t\t\t\tapi_type);\n+\t\t\ttest_ring_dequeue(r, burst, esize, bulk_sizes[sz],\n+\t\t\t\t\t\tapi_type);\n \t\t}\n-\t\tconst uint64_t mc_end = rte_rdtsc();\n-\n-\t\tdouble mc_avg = ((double)(mc_end-mc_start) / iterations) /\n-\t\t\t\t\tbulk_sizes[sz];\n-\t\tdouble sc_avg = ((double)(sc_end-sc_start) / iterations) /\n-\t\t\t\t\tbulk_sizes[sz];\n+\t\tconst uint64_t end = rte_rdtsc();\n \n-\t\tprintf(\"SP/SC burst enq/dequeue (size: %u): %.2F\\n\",\n-\t\t\t\tbulk_sizes[sz], sc_avg);\n-\t\tprintf(\"MP/MC burst enq/dequeue (size: %u): %.2F\\n\",\n-\t\t\t\tbulk_sizes[sz], mc_avg);\n+\t\ttest_ring_print_test_string(api_type, esize, bulk_sizes[sz],\n+\t\t\t\t\t((double)(end - start)) / iterations);\n \t}\n-}\n \n-/* Times enqueue and dequeue on a single lcore */\n-static void\n-test_bulk_enqueue_dequeue(struct rte_ring *r)\n-{\n-\tconst unsigned iter_shift = 23;\n-\tconst unsigned iterations = 1<<iter_shift;\n-\tunsigned sz, i = 0;\n-\tvoid *burst[MAX_BURST] = {0};\n+\trte_free(burst);\n \n-\tfor (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {\n-\t\tconst uint64_t sc_start = rte_rdtsc();\n-\t\tfor (i = 0; i < iterations; i++) {\n-\t\t\trte_ring_sp_enqueue_bulk(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t\trte_ring_sc_dequeue_bulk(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t}\n-\t\tconst uint64_t sc_end = rte_rdtsc();\n-\n-\t\tconst uint64_t mc_start = rte_rdtsc();\n-\t\tfor (i = 0; i < iterations; i++) {\n-\t\t\trte_ring_mp_enqueue_bulk(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t\trte_ring_mc_dequeue_bulk(r, burst,\n-\t\t\t\t\tbulk_sizes[sz], NULL);\n-\t\t}\n-\t\tconst uint64_t mc_end = rte_rdtsc();\n-\n-\t\tdouble sc_avg = ((double)(sc_end-sc_start) /\n-\t\t\t\t(iterations * bulk_sizes[sz]));\n-\t\tdouble mc_avg = ((double)(mc_end-mc_start) /\n-\t\t\t\t(iterations * bulk_sizes[sz]));\n-\n-\t\tprintf(\"SP/SC bulk enq/dequeue (size: %u): %.2F\\n\", bulk_sizes[sz],\n-\t\t\t\tsc_avg);\n-\t\tprintf(\"MP/MC bulk enq/dequeue (size: %u): %.2F\\n\", bulk_sizes[sz],\n-\t\t\t\tmc_avg);\n-\t}\n+\treturn 0;\n }\n \n-static int\n-test_ring_perf(void)\n+/* Run all tests for a given element size */\n+static __rte_always_inline int\n+test_ring_perf_esize(const int esize)\n {\n \tstruct lcore_pair cores;\n \tstruct rte_ring *r = NULL;\n \n-\tr = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);\n+\t/*\n+\t * Performance test for legacy/_elem APIs\n+\t * SP-SC/MP-MC, single\n+\t */\n+\tr = test_ring_create(RING_NAME, esize, RING_SIZE, rte_socket_id(), 0);\n \tif (r == NULL)\n \t\treturn -1;\n \n-\tprintf(\"### Testing single element and burst enq/deq ###\\n\");\n-\ttest_single_enqueue_dequeue(r);\n-\ttest_burst_enqueue_dequeue(r);\n+\tprintf(\"\\n### Testing single element enq/deq ###\\n\");\n+\tif (test_single_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_SPSC | TEST_RING_ELEM_SINGLE) < 0)\n+\t\treturn -1;\n+\tif (test_single_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_SINGLE) < 0)\n+\t\treturn -1;\n+\n+\tprintf(\"\\n### Testing burst enq/deq ###\\n\");\n+\tif (test_burst_bulk_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_SPSC | TEST_RING_ELEM_BURST) < 0)\n+\t\treturn -1;\n+\tif (test_burst_bulk_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BURST) < 0)\n+\t\treturn -1;\n \n-\tprintf(\"\\n### Testing empty dequeue ###\\n\");\n-\ttest_empty_dequeue(r);\n+\tprintf(\"\\n### Testing bulk enq/deq ###\\n\");\n+\tif (test_burst_bulk_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_SPSC | TEST_RING_ELEM_BULK) < 0)\n+\t\treturn -1;\n+\tif (test_burst_bulk_enqueue_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK) < 0)\n+\t\treturn -1;\n \n-\tprintf(\"\\n### Testing using a single lcore ###\\n\");\n-\ttest_bulk_enqueue_dequeue(r);\n+\tprintf(\"\\n### Testing empty bulk deq ###\\n\");\n+\ttest_empty_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_SPSC | TEST_RING_ELEM_BULK);\n+\ttest_empty_dequeue(r, esize,\n+\t\t\tTEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK);\n \n \tif (get_two_hyperthreads(&cores) == 0) {\n \t\tprintf(\"\\n### Testing using two hyperthreads ###\\n\");\n-\t\trun_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);\n+\t\tif (run_on_core_pair(&cores, r, esize) < 0)\n+\t\t\treturn -1;\n \t}\n \tif (get_two_cores(&cores) == 0) {\n \t\tprintf(\"\\n### Testing using two physical cores ###\\n\");\n-\t\trun_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);\n+\t\tif (run_on_core_pair(&cores, r, esize) < 0)\n+\t\t\treturn -1;\n \t}\n \tif (get_two_sockets(&cores) == 0) {\n \t\tprintf(\"\\n### Testing using two NUMA nodes ###\\n\");\n-\t\trun_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);\n+\t\tif (run_on_core_pair(&cores, r, esize) < 0)\n+\t\t\treturn -1;\n \t}\n \n \tprintf(\"\\n### Testing using all slave nodes ###\\n\");\n-\trun_on_all_cores(r);\n+\tif (run_on_all_cores(r, esize) < 0)\n+\t\treturn -1;\n \n \trte_ring_free(r);\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_ring_perf(void)\n+{\n+\t/* Run all the tests for different element sizes */\n+\tif (test_ring_perf_esize(-1) == -1)\n+\t\treturn -1;\n+\n+\tif (test_ring_perf_esize(16) == -1)\n+\t\treturn -1;\n+\n \treturn 0;\n }\n \n",
    "prefixes": [
        "v8",
        "4/6"
    ]
}