get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53182/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53182,
    "url": "http://patches.dpdk.org/api/patches/53182/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190501035419.33524-3-honnappa.nagarahalli@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190501035419.33524-3-honnappa.nagarahalli@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190501035419.33524-3-honnappa.nagarahalli@arm.com",
    "date": "2019-05-01T03:54:17",
    "name": "[v9,2/4] test/rcu_qsbr: add API and functional tests",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a63392780560e1db1dd4ef281ab6e6c2962c48ec",
    "submitter": {
        "id": 1045,
        "url": "http://patches.dpdk.org/api/people/1045/?format=api",
        "name": "Honnappa Nagarahalli",
        "email": "honnappa.nagarahalli@arm.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190501035419.33524-3-honnappa.nagarahalli@arm.com/mbox/",
    "series": [
        {
            "id": 4524,
            "url": "http://patches.dpdk.org/api/series/4524/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4524",
            "date": "2019-05-01T03:54:15",
            "name": "lib/rcu: add RCU library supporting QSBR mechanism",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/4524/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53182/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/53182/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 991D25B38;\n\tWed,  1 May 2019 05:54:36 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.101.70])\n\tby dpdk.org (Postfix) with ESMTP id E9C545B12\n\tfor <dev@dpdk.org>; Wed,  1 May 2019 05:54:32 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249])\n\tby usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 36EA515AD;\n\tTue, 30 Apr 2019 20:54:32 -0700 (PDT)",
            "from qc2400f-1.austin.arm.com (qc2400f-1.austin.arm.com\n\t[10.118.12.65])\n\tby usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id\n\t9EFA13F719; Tue, 30 Apr 2019 20:54:31 -0700 (PDT)"
        ],
        "From": "Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>",
        "To": "konstantin.ananyev@intel.com, stephen@networkplumber.org,\n\tpaulmck@linux.ibm.com, marko.kovacevic@intel.com, dev@dpdk.org",
        "Cc": "honnappa.nagarahalli@arm.com, gavin.hu@arm.com, dharmik.thakkar@arm.com, \n\tmalvika.gupta@arm.com",
        "Date": "Tue, 30 Apr 2019 22:54:17 -0500",
        "Message-Id": "<20190501035419.33524-3-honnappa.nagarahalli@arm.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190501035419.33524-1-honnappa.nagarahalli@arm.com>",
        "References": "<20181122033055.3431-1-honnappa.nagarahalli@arm.com>\n\t<20190501035419.33524-1-honnappa.nagarahalli@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v9 2/4] test/rcu_qsbr: add API and functional\n\ttests",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Dharmik Thakkar <dharmik.thakkar@arm.com>\n\nAdd API positive/negative test cases, functional tests and\nperformance tests.\n\nSigned-off-by: Malvika Gupta <malvika.gupta@arm.com>\nSigned-off-by: Dharmik Thakkar <dharmik.thakkar@arm.com>\nSigned-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nReviewed-by: Gavin Hu <gavin.hu@arm.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n app/test/Makefile             |    2 +\n app/test/autotest_data.py     |   12 +\n app/test/meson.build          |    5 +\n app/test/test_rcu_qsbr.c      | 1014 +++++++++++++++++++++++++++++++++\n app/test/test_rcu_qsbr_perf.c |  704 +++++++++++++++++++++++\n 5 files changed, 1737 insertions(+)\n create mode 100644 app/test/test_rcu_qsbr.c\n create mode 100644 app/test/test_rcu_qsbr_perf.c",
    "diff": "diff --git a/app/test/Makefile b/app/test/Makefile\nindex 54f706792..68d6b4fbc 100644\n--- a/app/test/Makefile\n+++ b/app/test/Makefile\n@@ -218,6 +218,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c\n \n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += test_bpf.c\n \n+SRCS-$(CONFIG_RTE_LIBRTE_RCU) += test_rcu_qsbr.c test_rcu_qsbr_perf.c\n+\n SRCS-$(CONFIG_RTE_LIBRTE_IPSEC) += test_ipsec.c\n ifeq ($(CONFIG_RTE_LIBRTE_IPSEC),y)\n LDLIBS += -lrte_ipsec\ndiff --git a/app/test/autotest_data.py b/app/test/autotest_data.py\nindex 72c56e528..fba66045f 100644\n--- a/app/test/autotest_data.py\n+++ b/app/test/autotest_data.py\n@@ -700,6 +700,18 @@\n         \"Func\":    default_autotest,\n         \"Report\":  None,\n     },\n+    {\n+        \"Name\":    \"RCU QSBR autotest\",\n+        \"Command\": \"rcu_qsbr_autotest\",\n+        \"Func\":    default_autotest,\n+        \"Report\":  None,\n+    },\n+    {\n+        \"Name\":    \"RCU QSBR performance autotest\",\n+        \"Command\": \"rcu_qsbr_perf_autotest\",\n+        \"Func\":    default_autotest,\n+        \"Report\":  None,\n+    },\n     #\n     # Please always make sure that ring_perf is the last test!\n     #\ndiff --git a/app/test/meson.build b/app/test/meson.build\nindex 80cdea5d1..4e8077cd2 100644\n--- a/app/test/meson.build\n+++ b/app/test/meson.build\n@@ -85,6 +85,8 @@ test_sources = files('commands.c',\n \t'test_power_acpi_cpufreq.c',\n \t'test_power_kvm_vm.c',\n \t'test_prefetch.c',\n+\t'test_rcu_qsbr.c',\n+\t'test_rcu_qsbr_perf.c',\n \t'test_reciprocal_division.c',\n \t'test_reciprocal_division_perf.c',\n \t'test_red.c',\n@@ -134,6 +136,7 @@ test_deps = ['acl',\n \t'metrics',\n \t'pipeline',\n \t'port',\n+\t'rcu',\n \t'reorder',\n \t'ring',\n \t'stack',\n@@ -172,6 +175,7 @@ fast_parallel_test_names = [\n         'multiprocess_autotest',\n         'per_lcore_autotest',\n         'prefetch_autotest',\n+        'rcu_qsbr_autotest',\n         'red_autotest',\n         'ring_autotest',\n         'ring_pmd_autotest',\n@@ -240,6 +244,7 @@ perf_test_names = [\n         'member_perf_autotest',\n         'efd_perf_autotest',\n         'lpm6_perf_autotest',\n+        'rcu_qsbr_perf_autotest',\n         'red_perf',\n         'distributor_perf_autotest',\n         'ring_pmd_perf_autotest',\ndiff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c\nnew file mode 100644\nindex 000000000..ed6934a47\n--- /dev/null\n+++ b/app/test/test_rcu_qsbr.c\n@@ -0,0 +1,1014 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2018 Arm Limited\n+ */\n+\n+#include <stdio.h>\n+#include <stdbool.h>\n+#include <rte_pause.h>\n+#include <rte_rcu_qsbr.h>\n+#include <rte_hash.h>\n+#include <rte_hash_crc.h>\n+#include <rte_malloc.h>\n+#include <rte_cycles.h>\n+#include <unistd.h>\n+\n+#include \"test.h\"\n+\n+/* Check condition and return an error if true. */\n+#define TEST_RCU_QSBR_RETURN_IF_ERROR(cond, str, ...) do { \\\n+\tif (cond) { \\\n+\t\tprintf(\"ERROR file %s, line %d: \" str \"\\n\", __FILE__, \\\n+\t\t\t__LINE__, ##__VA_ARGS__); \\\n+\t\treturn -1; \\\n+\t} \\\n+} while (0)\n+\n+/* Make sure that this has the same value as __RTE_QSBR_CNT_INIT */\n+#define TEST_RCU_QSBR_CNT_INIT 1\n+\n+#define TEST_RCU_MAX_LCORE 128\n+uint16_t enabled_core_ids[TEST_RCU_MAX_LCORE];\n+uint8_t num_cores;\n+\n+static uint32_t *keys;\n+#define TOTAL_ENTRY (1024 * 8)\n+#define COUNTER_VALUE 4096\n+static uint32_t *hash_data[TEST_RCU_MAX_LCORE][TOTAL_ENTRY];\n+static uint8_t writer_done;\n+\n+static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];\n+struct rte_hash *h[TEST_RCU_MAX_LCORE];\n+char hash_name[TEST_RCU_MAX_LCORE][8];\n+\n+static inline int\n+get_enabled_cores_mask(void)\n+{\n+\tuint16_t core_id;\n+\tuint32_t max_cores = rte_lcore_count();\n+\n+\tif (max_cores > TEST_RCU_MAX_LCORE) {\n+\t\tprintf(\"Number of cores exceed %d\\n\", TEST_RCU_MAX_LCORE);\n+\t\treturn -1;\n+\t}\n+\n+\tcore_id = 0;\n+\tnum_cores = 0;\n+\tRTE_LCORE_FOREACH_SLAVE(core_id) {\n+\t\tenabled_core_ids[num_cores] = core_id;\n+\t\tnum_cores++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+alloc_rcu(void)\n+{\n+\tint i;\n+\tuint32_t sz;\n+\n+\tsz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);\n+\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\tt[i] = (struct rte_rcu_qsbr *)rte_zmalloc(NULL, sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\n+\treturn 0;\n+}\n+\n+static int\n+free_rcu(void)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_free(t[i]);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_thread_register: Add a reader thread, to the list of threads\n+ * reporting their quiescent state on a QS variable.\n+ */\n+static int\n+test_rcu_qsbr_get_memsize(void)\n+{\n+\tuint32_t sz;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_thread_register()\\n\");\n+\n+\tsz = rte_rcu_qsbr_get_memsize(0);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((sz != 1), \"Get Memsize for 0 threads\");\n+\n+\tsz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);\n+\t/* For 128 threads,\n+\t * for machines with cache line size of 64B - 8384\n+\t * for machines with cache line size of 128 - 16768\n+\t */\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((sz != 8384 && sz != 16768),\n+\t\t\"Get Memsize\");\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_init: Initialize a QSBR variable.\n+ */\n+static int\n+test_rcu_qsbr_init(void)\n+{\n+\tint r;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_init()\\n\");\n+\n+\tr = rte_rcu_qsbr_init(NULL, TEST_RCU_MAX_LCORE);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((r != 1), \"NULL variable\");\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_thread_register: Add a reader thread, to the list of threads\n+ * reporting their quiescent state on a QS variable.\n+ */\n+static int\n+test_rcu_qsbr_thread_register(void)\n+{\n+\tint ret;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_thread_register()\\n\");\n+\n+\tret = rte_rcu_qsbr_thread_register(NULL, enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"NULL variable check\");\n+\n+\tret = rte_rcu_qsbr_thread_register(NULL, 100000);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\"NULL variable, invalid thread id\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t/* Register valid thread id */\n+\tret = rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), \"Valid thread id\");\n+\n+\t/* Re-registering should not return error */\n+\tret = rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),\n+\t\t\"Already registered thread id\");\n+\n+\t/* Register valid thread id - max allowed thread id */\n+\tret = rte_rcu_qsbr_thread_register(t[0], TEST_RCU_MAX_LCORE - 1);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), \"Max thread id\");\n+\n+\tret = rte_rcu_qsbr_thread_register(t[0], 100000);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\"NULL variable, invalid thread id\");\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_thread_unregister: Remove a reader thread, from the list of\n+ * threads reporting their quiescent state on a QS variable.\n+ */\n+static int\n+test_rcu_qsbr_thread_unregister(void)\n+{\n+\tint i, j, ret;\n+\tuint64_t token;\n+\tuint8_t num_threads[3] = {1, TEST_RCU_MAX_LCORE, 1};\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_thread_unregister()\\n\");\n+\n+\tret = rte_rcu_qsbr_thread_unregister(NULL, enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"NULL variable check\");\n+\n+\tret = rte_rcu_qsbr_thread_unregister(NULL, 100000);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\"NULL variable, invalid thread id\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\n+\tret = rte_rcu_qsbr_thread_unregister(t[0], 100000);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\"NULL variable, invalid thread id\");\n+\n+\t/* Find first disabled core */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++) {\n+\t\tif (enabled_core_ids[i] == 0)\n+\t\t\tbreak;\n+\t}\n+\t/* Test with disabled lcore */\n+\tret = rte_rcu_qsbr_thread_unregister(t[0], i);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),\n+\t\t\"disabled thread id\");\n+\t/* Unregister already unregistered core */\n+\tret = rte_rcu_qsbr_thread_unregister(t[0], i);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),\n+\t\t\"Already unregistered core\");\n+\n+\t/* Test with enabled lcore */\n+\tret = rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),\n+\t\t\"enabled thread id\");\n+\t/* Unregister already unregistered core */\n+\tret = rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),\n+\t\t\"Already unregistered core\");\n+\n+\t/*\n+\t * Test with different thread_ids:\n+\t * 1 - thread_id = 0\n+\t * 2 - All possible thread_ids, from 0 to TEST_RCU_MAX_LCORE\n+\t * 3 - thread_id = TEST_RCU_MAX_LCORE - 1\n+\t */\n+\tfor (j = 0; j < 3; j++) {\n+\t\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t\tfor (i = 0; i < num_threads[j]; i++)\n+\t\t\trte_rcu_qsbr_thread_register(t[0],\n+\t\t\t\t(j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);\n+\n+\t\ttoken = rte_rcu_qsbr_start(t[0]);\n+\t\tTEST_RCU_QSBR_RETURN_IF_ERROR(\n+\t\t\t(token != (TEST_RCU_QSBR_CNT_INIT + 1)), \"QSBR Start\");\n+\t\t/* Update quiescent state counter */\n+\t\tfor (i = 0; i < num_threads[j]; i++) {\n+\t\t\t/* Skip one update */\n+\t\t\tif (i == (TEST_RCU_MAX_LCORE - 10))\n+\t\t\t\tcontinue;\n+\t\t\trte_rcu_qsbr_quiescent(t[0],\n+\t\t\t\t(j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);\n+\t\t}\n+\n+\t\tif (j == 1) {\n+\t\t\t/* Validate the updates */\n+\t\t\tret = rte_rcu_qsbr_check(t[0], token, false);\n+\t\t\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\t\t\t\t\"Non-blocking QSBR check\");\n+\t\t\t/* Update the previously skipped thread */\n+\t\t\trte_rcu_qsbr_quiescent(t[0], TEST_RCU_MAX_LCORE - 10);\n+\t\t}\n+\n+\t\t/* Validate the updates */\n+\t\tret = rte_rcu_qsbr_check(t[0], token, false);\n+\t\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\t\t\t\t\"Non-blocking QSBR check\");\n+\n+\t\tfor (i = 0; i < num_threads[j]; i++)\n+\t\t\trte_rcu_qsbr_thread_unregister(t[0],\n+\t\t\t\t(j == 2) ? (TEST_RCU_MAX_LCORE - 1) : i);\n+\n+\t\t/* Check with no thread registered */\n+\t\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\t\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),\n+\t\t\t\t\t\t\"Blocking QSBR check\");\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_start: Ask the worker threads to report the quiescent state\n+ * status.\n+ */\n+static int\n+test_rcu_qsbr_start(void)\n+{\n+\tuint64_t token;\n+\tint i;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_start()\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\tfor (i = 0; i < 3; i++)\n+\t\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);\n+\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR(\n+\t\t(token != (TEST_RCU_QSBR_CNT_INIT + 1)), \"QSBR Start\");\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_check_reader(void *arg)\n+{\n+\tstruct rte_rcu_qsbr *temp;\n+\tuint8_t read_type = (uint8_t)((uintptr_t)arg);\n+\n+\ttemp = t[read_type];\n+\n+\t/* Update quiescent state counter */\n+\trte_rcu_qsbr_quiescent(temp, enabled_core_ids[0]);\n+\trte_rcu_qsbr_quiescent(temp, enabled_core_ids[1]);\n+\trte_rcu_qsbr_thread_unregister(temp, enabled_core_ids[2]);\n+\trte_rcu_qsbr_quiescent(temp, enabled_core_ids[3]);\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_check: Checks if all the worker threads have entered the queis-\n+ * cent state 'n' number of times. 'n' is provided in rte_rcu_qsbr_start API.\n+ */\n+static int\n+test_rcu_qsbr_check(void)\n+{\n+\tint i, ret;\n+\tuint64_t token;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_check()\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR(\n+\t\t(token != (TEST_RCU_QSBR_CNT_INIT + 1)), \"QSBR Start\");\n+\n+\n+\tret = rte_rcu_qsbr_check(t[0], 0, false);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"Token = 0\");\n+\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"Blocking QSBR check\");\n+\n+\tfor (i = 0; i < 3; i++)\n+\t\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);\n+\n+\tret = rte_rcu_qsbr_check(t[0], token, false);\n+\t/* Threads are offline, hence this should pass */\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"Non-blocking QSBR check\");\n+\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR(\n+\t\t(token != (TEST_RCU_QSBR_CNT_INIT + 2)), \"QSBR Start\");\n+\n+\tret = rte_rcu_qsbr_check(t[0], token, false);\n+\t/* Threads are offline, hence this should pass */\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"Non-blocking QSBR check\");\n+\n+\tfor (i = 0; i < 3; i++)\n+\t\trte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[i]);\n+\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"Blocking QSBR check\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\tfor (i = 0; i < 4; i++)\n+\t\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);\n+\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR(\n+\t\t(token != (TEST_RCU_QSBR_CNT_INIT + 1)), \"QSBR Start\");\n+\n+\trte_eal_remote_launch(test_rcu_qsbr_check_reader, NULL,\n+\t\t\t\t\t\t\tenabled_core_ids[0]);\n+\n+\trte_eal_mp_wait_lcore();\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret != 1), \"Blocking QSBR check\");\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_synchronize_reader(void *arg)\n+{\n+\tuint32_t lcore_id = rte_lcore_id();\n+\t(void)arg;\n+\n+\t/* Register and become online */\n+\trte_rcu_qsbr_thread_register(t[0], lcore_id);\n+\trte_rcu_qsbr_thread_online(t[0], lcore_id);\n+\n+\twhile (!writer_done)\n+\t\trte_rcu_qsbr_quiescent(t[0], lcore_id);\n+\n+\trte_rcu_qsbr_thread_offline(t[0], lcore_id);\n+\trte_rcu_qsbr_thread_unregister(t[0], lcore_id);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered\n+ * the queiscent state.\n+ */\n+static int\n+test_rcu_qsbr_synchronize(void)\n+{\n+\tint i;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_synchronize()\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t/* Test if the API returns when there are no threads reporting\n+\t * QS on the variable.\n+\t */\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\n+\t/* Test if the API returns when there are threads registered\n+\t * but not online.\n+\t */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_thread_register(t[0], i);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\n+\t/* Test if the API returns when the caller is also\n+\t * reporting the QS status.\n+\t */\n+\trte_rcu_qsbr_thread_online(t[0], 0);\n+\trte_rcu_qsbr_synchronize(t[0], 0);\n+\trte_rcu_qsbr_thread_offline(t[0], 0);\n+\n+\t/* Check the other boundary */\n+\trte_rcu_qsbr_thread_online(t[0], TEST_RCU_MAX_LCORE - 1);\n+\trte_rcu_qsbr_synchronize(t[0], TEST_RCU_MAX_LCORE - 1);\n+\trte_rcu_qsbr_thread_offline(t[0], TEST_RCU_MAX_LCORE - 1);\n+\n+\t/* Test if the API returns after unregisterng all the threads */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_thread_unregister(t[0], i);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\n+\t/* Test if the API returns with the live threads */\n+\twriter_done = 0;\n+\tfor (i = 0; i < num_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_synchronize_reader,\n+\t\t\tNULL, enabled_core_ids[i]);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\trte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);\n+\n+\twriter_done = 1;\n+\trte_eal_mp_wait_lcore();\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_thread_online: Add a registered reader thread, to\n+ * the list of threads reporting their quiescent state on a QS variable.\n+ */\n+static int\n+test_rcu_qsbr_thread_online(void)\n+{\n+\tint i, ret;\n+\tuint64_t token;\n+\n+\tprintf(\"Test rte_rcu_qsbr_thread_online()\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t/* Register 2 threads to validate that only the\n+\t * online thread is waited upon.\n+\t */\n+\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[1]);\n+\n+\t/* Use qsbr_start to verify that the thread_online API\n+\t * succeeded.\n+\t */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\n+\t/* Make the thread online */\n+\trte_rcu_qsbr_thread_online(t[0], enabled_core_ids[0]);\n+\n+\t/* Check if the thread is online */\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread online\");\n+\n+\t/* Check if the online thread, can report QS */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\trte_rcu_qsbr_quiescent(t[0], enabled_core_ids[0]);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread update\");\n+\n+\t/* Make all the threads online */\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++) {\n+\t\trte_rcu_qsbr_thread_register(t[0], i);\n+\t\trte_rcu_qsbr_thread_online(t[0], i);\n+\t}\n+\t/* Check if all the threads are online */\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread online\");\n+\t/* Check if all the online threads can report QS */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_quiescent(t[0], i);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread update\");\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_thread_offline: Remove a registered reader thread, from\n+ * the list of threads reporting their quiescent state on a QS variable.\n+ */\n+static int\n+test_rcu_qsbr_thread_offline(void)\n+{\n+\tint i, ret;\n+\tuint64_t token;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_thread_offline()\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\n+\t/* Make the thread offline */\n+\trte_rcu_qsbr_thread_offline(t[0], enabled_core_ids[0]);\n+\n+\t/* Use qsbr_start to verify that the thread_offline API\n+\t * succeeded.\n+\t */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\t/* Check if the thread is offline */\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread offline\");\n+\n+\t/* Bring an offline thread online and check if it can\n+\t * report QS.\n+\t */\n+\trte_rcu_qsbr_thread_online(t[0], enabled_core_ids[0]);\n+\t/* Check if the online thread, can report QS */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\trte_rcu_qsbr_quiescent(t[0], enabled_core_ids[0]);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"offline to online\");\n+\n+\t/*\n+\t * Check a sequence of online/status/offline/status/online/status\n+\t */\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\t/* Make the threads online */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++) {\n+\t\trte_rcu_qsbr_thread_register(t[0], i);\n+\t\trte_rcu_qsbr_thread_online(t[0], i);\n+\t}\n+\n+\t/* Check if all the threads are online */\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"thread online\");\n+\n+\t/* Check if all the online threads can report QS */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_quiescent(t[0], i);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"report QS\");\n+\n+\t/* Make all the threads offline */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_thread_offline(t[0], i);\n+\t/* Make sure these threads are not being waited on */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"offline QS\");\n+\n+\t/* Make the threads online */\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_thread_online(t[0], i);\n+\t/* Check if all the online threads can report QS */\n+\ttoken = rte_rcu_qsbr_start(t[0]);\n+\tfor (i = 0; i < TEST_RCU_MAX_LCORE; i++)\n+\t\trte_rcu_qsbr_quiescent(t[0], i);\n+\tret = rte_rcu_qsbr_check(t[0], token, true);\n+\tTEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), \"online again\");\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * rte_rcu_qsbr_dump: Dump status of a single QS variable to a file\n+ */\n+static int\n+test_rcu_qsbr_dump(void)\n+{\n+\tint i;\n+\n+\tprintf(\"\\nTest rte_rcu_qsbr_dump()\\n\");\n+\n+\t/* Negative tests */\n+\trte_rcu_qsbr_dump(NULL, t[0]);\n+\trte_rcu_qsbr_dump(stdout, NULL);\n+\trte_rcu_qsbr_dump(NULL, NULL);\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\trte_rcu_qsbr_init(t[1], TEST_RCU_MAX_LCORE);\n+\n+\t/* QS variable with 0 core mask */\n+\trte_rcu_qsbr_dump(stdout, t[0]);\n+\n+\trte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);\n+\n+\tfor (i = 1; i < 3; i++)\n+\t\trte_rcu_qsbr_thread_register(t[1], enabled_core_ids[i]);\n+\n+\trte_rcu_qsbr_dump(stdout, t[0]);\n+\trte_rcu_qsbr_dump(stdout, t[1]);\n+\tprintf(\"\\n\");\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_reader(void *arg)\n+{\n+\tstruct rte_rcu_qsbr *temp;\n+\tstruct rte_hash *hash = NULL;\n+\tint i;\n+\tuint32_t lcore_id = rte_lcore_id();\n+\tuint8_t read_type = (uint8_t)((uintptr_t)arg);\n+\tuint32_t *pdata;\n+\n+\ttemp = t[read_type];\n+\thash = h[read_type];\n+\n+\tdo {\n+\t\trte_rcu_qsbr_thread_register(temp, lcore_id);\n+\t\trte_rcu_qsbr_thread_online(temp, lcore_id);\n+\t\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\t\trte_rcu_qsbr_lock(temp, lcore_id);\n+\t\t\tif (rte_hash_lookup_data(hash, keys+i,\n+\t\t\t\t\t(void **)&pdata) != -ENOENT) {\n+\t\t\t\t*pdata = 0;\n+\t\t\t\twhile (*pdata < COUNTER_VALUE)\n+\t\t\t\t\t++*pdata;\n+\t\t\t}\n+\t\t\trte_rcu_qsbr_unlock(temp, lcore_id);\n+\t\t}\n+\t\t/* Update quiescent state counter */\n+\t\trte_rcu_qsbr_quiescent(temp, lcore_id);\n+\t\trte_rcu_qsbr_thread_offline(temp, lcore_id);\n+\t\trte_rcu_qsbr_thread_unregister(temp, lcore_id);\n+\t} while (!writer_done);\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_writer(void *arg)\n+{\n+\tuint64_t token;\n+\tint32_t pos;\n+\tstruct rte_rcu_qsbr *temp;\n+\tstruct rte_hash *hash = NULL;\n+\tuint8_t writer_type = (uint8_t)((uintptr_t)arg);\n+\n+\ttemp = t[(writer_type/2) % TEST_RCU_MAX_LCORE];\n+\thash = h[(writer_type/2) % TEST_RCU_MAX_LCORE];\n+\n+\t/* Delete element from the shared data structure */\n+\tpos = rte_hash_del_key(hash, keys + (writer_type % TOTAL_ENTRY));\n+\tif (pos < 0) {\n+\t\tprintf(\"Delete key failed #%d\\n\",\n+\t\t       keys[writer_type % TOTAL_ENTRY]);\n+\t\treturn -1;\n+\t}\n+\t/* Start the quiescent state query process */\n+\ttoken = rte_rcu_qsbr_start(temp);\n+\t/* Check the quiescent state status */\n+\trte_rcu_qsbr_check(temp, token, true);\n+\tif (*hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]\n+\t    [writer_type % TOTAL_ENTRY] != COUNTER_VALUE &&\n+\t    *hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]\n+\t    [writer_type % TOTAL_ENTRY] != 0) {\n+\t\tprintf(\"Reader did not complete #%d = %d\\t\", writer_type,\n+\t\t\t*hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]\n+\t\t\t\t[writer_type % TOTAL_ENTRY]);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_hash_free_key_with_position(hash, pos) < 0) {\n+\t\tprintf(\"Failed to free the key #%d\\n\",\n+\t\t       keys[writer_type % TOTAL_ENTRY]);\n+\t\treturn -1;\n+\t}\n+\trte_free(hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]\n+\t\t\t\t[writer_type % TOTAL_ENTRY]);\n+\thash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]\n+\t\t\t[writer_type % TOTAL_ENTRY] = NULL;\n+\n+\treturn 0;\n+}\n+\n+static struct rte_hash *\n+init_hash(int hash_id)\n+{\n+\tint i;\n+\tstruct rte_hash *h = NULL;\n+\n+\tsprintf(hash_name[hash_id], \"hash%d\", hash_id);\n+\tstruct rte_hash_parameters hash_params = {\n+\t\t.entries = TOTAL_ENTRY,\n+\t\t.key_len = sizeof(uint32_t),\n+\t\t.hash_func_init_val = 0,\n+\t\t.socket_id = rte_socket_id(),\n+\t\t.hash_func = rte_hash_crc,\n+\t\t.extra_flag =\n+\t\t\tRTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF,\n+\t\t.name = hash_name[hash_id],\n+\t};\n+\n+\th = rte_hash_create(&hash_params);\n+\tif (h == NULL) {\n+\t\tprintf(\"Hash create Failed\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\thash_data[hash_id][i] = rte_zmalloc(NULL, sizeof(uint32_t), 0);\n+\t\tif (hash_data[hash_id][i] == NULL) {\n+\t\t\tprintf(\"No memory\\n\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\tkeys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);\n+\tif (keys == NULL) {\n+\t\tprintf(\"No memory\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\tkeys[i] = i;\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\tif (rte_hash_add_key_data(h, keys + i,\n+\t\t\t\t(void *)((uintptr_t)hash_data[hash_id][i]))\n+\t\t\t\t< 0) {\n+\t\t\tprintf(\"Hash key add Failed #%d\\n\", i);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\treturn h;\n+}\n+\n+/*\n+ * Functional test:\n+ * Single writer, Single QS variable, simultaneous QSBR Queries\n+ */\n+static int\n+test_rcu_qsbr_sw_sv_3qs(void)\n+{\n+\tuint64_t token[3];\n+\tint i;\n+\tint32_t pos[3];\n+\n+\twriter_done = 0;\n+\n+\tprintf(\"Test: 1 writer, 1 QSBR variable, simultaneous QSBR queries\\n\");\n+\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t/* Shared data structure created */\n+\th[0] = init_hash(0);\n+\tif (h[0] == NULL) {\n+\t\tprintf(\"Hash init failed\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < 4; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_reader, NULL,\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\t/* Delete element from the shared data structure */\n+\tpos[0] = rte_hash_del_key(h[0], keys + 0);\n+\tif (pos[0] < 0) {\n+\t\tprintf(\"Delete key failed #%d\\n\", keys[0]);\n+\t\tgoto error;\n+\t}\n+\t/* Start the quiescent state query process */\n+\ttoken[0] = rte_rcu_qsbr_start(t[0]);\n+\n+\t/* Delete element from the shared data structure */\n+\tpos[1] = rte_hash_del_key(h[0], keys + 3);\n+\tif (pos[1] < 0) {\n+\t\tprintf(\"Delete key failed #%d\\n\", keys[3]);\n+\t\tgoto error;\n+\t}\n+\t/* Start the quiescent state query process */\n+\ttoken[1] = rte_rcu_qsbr_start(t[0]);\n+\n+\t/* Delete element from the shared data structure */\n+\tpos[2] = rte_hash_del_key(h[0], keys + 6);\n+\tif (pos[2] < 0) {\n+\t\tprintf(\"Delete key failed #%d\\n\", keys[6]);\n+\t\tgoto error;\n+\t}\n+\t/* Start the quiescent state query process */\n+\ttoken[2] = rte_rcu_qsbr_start(t[0]);\n+\n+\t/* Check the quiescent state status */\n+\trte_rcu_qsbr_check(t[0], token[0], true);\n+\tif (*hash_data[0][0] != COUNTER_VALUE && *hash_data[0][0] != 0) {\n+\t\tprintf(\"Reader did not complete #0 = %d\\n\", *hash_data[0][0]);\n+\t\tgoto error;\n+\t}\n+\n+\tif (rte_hash_free_key_with_position(h[0], pos[0]) < 0) {\n+\t\tprintf(\"Failed to free the key #%d\\n\", keys[0]);\n+\t\tgoto error;\n+\t}\n+\trte_free(hash_data[0][0]);\n+\thash_data[0][0] = NULL;\n+\n+\t/* Check the quiescent state status */\n+\trte_rcu_qsbr_check(t[0], token[1], true);\n+\tif (*hash_data[0][3] != COUNTER_VALUE && *hash_data[0][3] != 0) {\n+\t\tprintf(\"Reader did not complete #3 = %d\\n\", *hash_data[0][3]);\n+\t\tgoto error;\n+\t}\n+\n+\tif (rte_hash_free_key_with_position(h[0], pos[1]) < 0) {\n+\t\tprintf(\"Failed to free the key #%d\\n\", keys[3]);\n+\t\tgoto error;\n+\t}\n+\trte_free(hash_data[0][3]);\n+\thash_data[0][3] = NULL;\n+\n+\t/* Check the quiescent state status */\n+\trte_rcu_qsbr_check(t[0], token[2], true);\n+\tif (*hash_data[0][6] != COUNTER_VALUE && *hash_data[0][6] != 0) {\n+\t\tprintf(\"Reader did not complete #6 = %d\\n\", *hash_data[0][6]);\n+\t\tgoto error;\n+\t}\n+\n+\tif (rte_hash_free_key_with_position(h[0], pos[2]) < 0) {\n+\t\tprintf(\"Failed to free the key #%d\\n\", keys[6]);\n+\t\tgoto error;\n+\t}\n+\trte_free(hash_data[0][6]);\n+\thash_data[0][6] = NULL;\n+\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\t/* Check return value from threads */\n+\tfor (i = 0; i < 4; i++)\n+\t\tif (lcore_config[enabled_core_ids[i]].ret < 0)\n+\t\t\tgoto error;\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\n+\treturn 0;\n+\n+error:\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\trte_free(hash_data[0][i]);\n+\n+\treturn -1;\n+}\n+\n+/*\n+ * Multi writer, Multiple QS variable, simultaneous QSBR queries\n+ */\n+static int\n+test_rcu_qsbr_mw_mv_mqs(void)\n+{\n+\tint i, j;\n+\tuint8_t test_cores;\n+\n+\twriter_done = 0;\n+\ttest_cores = num_cores / 4;\n+\ttest_cores = test_cores * 4;\n+\n+\tprintf(\"Test: %d writers, %d QSBR variable, simultaneous QSBR queries\\n\"\n+\t       , test_cores / 2, test_cores / 4);\n+\n+\tfor (i = 0; i < num_cores / 4; i++) {\n+\t\trte_rcu_qsbr_init(t[i], TEST_RCU_MAX_LCORE);\n+\t\th[i] = init_hash(i);\n+\t\tif (h[i] == NULL) {\n+\t\t\tprintf(\"Hash init failed\\n\");\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < test_cores / 2; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_reader,\n+\t\t\t\t      (void *)(uintptr_t)(i / 2),\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\t/* Writer threads are launched */\n+\tfor (; i < test_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_writer,\n+\t\t\t\t      (void *)(uintptr_t)(i - (test_cores / 2)),\n+\t\t\t\t\tenabled_core_ids[i]);\n+\t/* Wait for writers to complete */\n+\tfor (i = test_cores / 2; i < test_cores;  i++)\n+\t\trte_eal_wait_lcore(enabled_core_ids[i]);\n+\n+\twriter_done = 1;\n+\t/* Wait for readers to complete */\n+\trte_eal_mp_wait_lcore();\n+\n+\t/* Check return value from threads */\n+\tfor (i = 0; i < test_cores; i++)\n+\t\tif (lcore_config[enabled_core_ids[i]].ret < 0)\n+\t\t\tgoto error;\n+\n+\tfor (i = 0; i < num_cores / 4; i++)\n+\t\trte_hash_free(h[i]);\n+\n+\trte_free(keys);\n+\n+\treturn 0;\n+\n+error:\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\tfor (i = 0; i < num_cores / 4; i++)\n+\t\trte_hash_free(h[i]);\n+\trte_free(keys);\n+\tfor (j = 0; j < TEST_RCU_MAX_LCORE; j++)\n+\t\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\t\trte_free(hash_data[j][i]);\n+\n+\treturn -1;\n+}\n+\n+static int\n+test_rcu_qsbr_main(void)\n+{\n+\tif (get_enabled_cores_mask() != 0)\n+\t\treturn -1;\n+\n+\tif (num_cores < 4) {\n+\t\tprintf(\"Test failed! Need 4 or more cores\\n\");\n+\t\tgoto test_fail;\n+\t}\n+\n+\t/* Error-checking test cases */\n+\tif (test_rcu_qsbr_get_memsize() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_init() < 0)\n+\t\tgoto test_fail;\n+\n+\talloc_rcu();\n+\n+\tif (test_rcu_qsbr_thread_register() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_thread_unregister() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_start() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_check() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_synchronize() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_dump() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_thread_online() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_thread_offline() < 0)\n+\t\tgoto test_fail;\n+\n+\tprintf(\"\\nFunctional tests\\n\");\n+\n+\tif (test_rcu_qsbr_sw_sv_3qs() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_mw_mv_mqs() < 0)\n+\t\tgoto test_fail;\n+\n+\tfree_rcu();\n+\n+\tprintf(\"\\n\");\n+\treturn 0;\n+\n+test_fail:\n+\tfree_rcu();\n+\n+\treturn -1;\n+}\n+\n+REGISTER_TEST_COMMAND(rcu_qsbr_autotest, test_rcu_qsbr_main);\ndiff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c\nnew file mode 100644\nindex 000000000..16a43f8db\n--- /dev/null\n+++ b/app/test/test_rcu_qsbr_perf.c\n@@ -0,0 +1,704 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2018 Arm Limited\n+ */\n+\n+#include <stdio.h>\n+#include <stdbool.h>\n+#include <inttypes.h>\n+#include <rte_pause.h>\n+#include <rte_rcu_qsbr.h>\n+#include <rte_hash.h>\n+#include <rte_hash_crc.h>\n+#include <rte_malloc.h>\n+#include <rte_cycles.h>\n+#include <unistd.h>\n+\n+#include \"test.h\"\n+\n+/* Check condition and return an error if true. */\n+#define TEST_RCU_MAX_LCORE 128\n+static uint16_t enabled_core_ids[TEST_RCU_MAX_LCORE];\n+static uint8_t num_cores;\n+\n+static uint32_t *keys;\n+#define TOTAL_ENTRY (1024 * 8)\n+#define COUNTER_VALUE 4096\n+static uint32_t *hash_data[TEST_RCU_MAX_LCORE][TOTAL_ENTRY];\n+static volatile uint8_t writer_done;\n+static volatile uint8_t all_registered;\n+static volatile uint32_t thr_id;\n+\n+static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];\n+static struct rte_hash *h[TEST_RCU_MAX_LCORE];\n+static char hash_name[TEST_RCU_MAX_LCORE][8];\n+static rte_atomic64_t updates, checks;\n+static rte_atomic64_t update_cycles, check_cycles;\n+\n+/* Scale down results to 1000 operations to support lower\n+ * granularity clocks.\n+ */\n+#define RCU_SCALE_DOWN 1000\n+\n+/* Simple way to allocate thread ids in 0 to TEST_RCU_MAX_LCORE space */\n+static inline uint32_t\n+alloc_thread_id(void)\n+{\n+\tuint32_t tmp_thr_id;\n+\n+\ttmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);\n+\tif (tmp_thr_id >= TEST_RCU_MAX_LCORE)\n+\t\tprintf(\"Invalid thread id %u\\n\", tmp_thr_id);\n+\n+\treturn tmp_thr_id;\n+}\n+\n+static inline int\n+get_enabled_cores_mask(void)\n+{\n+\tuint16_t core_id;\n+\tuint32_t max_cores = rte_lcore_count();\n+\n+\tif (max_cores > TEST_RCU_MAX_LCORE) {\n+\t\tprintf(\"Number of cores exceed %d\\n\", TEST_RCU_MAX_LCORE);\n+\t\treturn -1;\n+\t}\n+\n+\tcore_id = 0;\n+\tnum_cores = 0;\n+\tRTE_LCORE_FOREACH_SLAVE(core_id) {\n+\t\tenabled_core_ids[num_cores] = core_id;\n+\t\tnum_cores++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_reader_perf(void *arg)\n+{\n+\tbool writer_present = (bool)arg;\n+\tuint32_t thread_id = alloc_thread_id();\n+\tuint64_t loop_cnt = 0;\n+\tuint64_t begin, cycles;\n+\n+\t/* Register for report QS */\n+\trte_rcu_qsbr_thread_register(t[0], thread_id);\n+\t/* Make the thread online */\n+\trte_rcu_qsbr_thread_online(t[0], thread_id);\n+\n+\tbegin = rte_rdtsc_precise();\n+\n+\tif (writer_present) {\n+\t\twhile (!writer_done) {\n+\t\t\t/* Update quiescent state counter */\n+\t\t\trte_rcu_qsbr_quiescent(t[0], thread_id);\n+\t\t\tloop_cnt++;\n+\t\t}\n+\t} else {\n+\t\twhile (loop_cnt < 100000000) {\n+\t\t\t/* Update quiescent state counter */\n+\t\t\trte_rcu_qsbr_quiescent(t[0], thread_id);\n+\t\t\tloop_cnt++;\n+\t\t}\n+\t}\n+\n+\tcycles = rte_rdtsc_precise() - begin;\n+\trte_atomic64_add(&update_cycles, cycles);\n+\trte_atomic64_add(&updates, loop_cnt);\n+\n+\t/* Make the thread offline */\n+\trte_rcu_qsbr_thread_offline(t[0], thread_id);\n+\t/* Unregister before exiting to avoid writer from waiting */\n+\trte_rcu_qsbr_thread_unregister(t[0], thread_id);\n+\n+\treturn 0;\n+}\n+\n+static int\n+test_rcu_qsbr_writer_perf(void *arg)\n+{\n+\tbool wait = (bool)arg;\n+\tuint64_t token = 0;\n+\tuint64_t loop_cnt = 0;\n+\tuint64_t begin, cycles;\n+\n+\tbegin = rte_rdtsc_precise();\n+\n+\tdo {\n+\t\t/* Start the quiescent state query process */\n+\t\tif (wait)\n+\t\t\ttoken = rte_rcu_qsbr_start(t[0]);\n+\n+\t\t/* Check quiescent state status */\n+\t\trte_rcu_qsbr_check(t[0], token, wait);\n+\t\tloop_cnt++;\n+\t} while (loop_cnt < 20000000);\n+\n+\tcycles = rte_rdtsc_precise() - begin;\n+\trte_atomic64_add(&check_cycles, cycles);\n+\trte_atomic64_add(&checks, loop_cnt);\n+\treturn 0;\n+}\n+\n+/*\n+ * Perf test: Reader/writer\n+ * Single writer, Multiple Readers, Single QS var, Non-Blocking rcu_qsbr_check\n+ */\n+static int\n+test_rcu_qsbr_perf(void)\n+{\n+\tint i, sz;\n+\tint tmp_num_cores;\n+\n+\twriter_done = 0;\n+\n+\trte_atomic64_clear(&updates);\n+\trte_atomic64_clear(&update_cycles);\n+\trte_atomic64_clear(&checks);\n+\trte_atomic64_clear(&check_cycles);\n+\n+\tprintf(\"\\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\\n\",\n+\t\tnum_cores - 1);\n+\n+\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\n+\tif (all_registered == 1)\n+\t\ttmp_num_cores = num_cores - 1;\n+\telse\n+\t\ttmp_num_cores = TEST_RCU_MAX_LCORE;\n+\n+\tsz = rte_rcu_qsbr_get_memsize(tmp_num_cores);\n+\tt[0] = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t/* QS variable is initialized */\n+\trte_rcu_qsbr_init(t[0], tmp_num_cores);\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < num_cores - 1; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_reader_perf, (void *)1,\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\t/* Writer thread is launched */\n+\trte_eal_remote_launch(test_rcu_qsbr_writer_perf,\n+\t\t\t      (void *)1, enabled_core_ids[i]);\n+\n+\t/* Wait for the writer thread */\n+\trte_eal_wait_lcore(enabled_core_ids[i]);\n+\twriter_done = 1;\n+\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\tprintf(\"Total RCU updates = %\"PRIi64\"\\n\", rte_atomic64_read(&updates));\n+\tprintf(\"Cycles per %d updates: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n+\t\trte_atomic64_read(&update_cycles) /\n+\t\t(rte_atomic64_read(&updates) / RCU_SCALE_DOWN));\n+\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", rte_atomic64_read(&checks));\n+\tprintf(\"Cycles per %d checks: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n+\t\trte_atomic64_read(&check_cycles) /\n+\t\t(rte_atomic64_read(&checks) / RCU_SCALE_DOWN));\n+\n+\trte_free(t[0]);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Perf test: Readers\n+ * Single writer, Multiple readers, Single QS variable\n+ */\n+static int\n+test_rcu_qsbr_rperf(void)\n+{\n+\tint i, sz;\n+\tint tmp_num_cores;\n+\n+\trte_atomic64_clear(&updates);\n+\trte_atomic64_clear(&update_cycles);\n+\n+\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\n+\tprintf(\"\\nPerf Test: %d Readers\\n\", num_cores);\n+\n+\tif (all_registered == 1)\n+\t\ttmp_num_cores = num_cores;\n+\telse\n+\t\ttmp_num_cores = TEST_RCU_MAX_LCORE;\n+\n+\tsz = rte_rcu_qsbr_get_memsize(tmp_num_cores);\n+\tt[0] = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t/* QS variable is initialized */\n+\trte_rcu_qsbr_init(t[0], tmp_num_cores);\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_reader_perf, NULL,\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\tprintf(\"Total RCU updates = %\"PRIi64\"\\n\", rte_atomic64_read(&updates));\n+\tprintf(\"Cycles per %d updates: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n+\t\trte_atomic64_read(&update_cycles) /\n+\t\t(rte_atomic64_read(&updates) / RCU_SCALE_DOWN));\n+\n+\trte_free(t[0]);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Perf test:\n+ * Multiple writer, Single QS variable, Non-blocking rcu_qsbr_check\n+ */\n+static int\n+test_rcu_qsbr_wperf(void)\n+{\n+\tint i, sz;\n+\n+\trte_atomic64_clear(&checks);\n+\trte_atomic64_clear(&check_cycles);\n+\n+\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\n+\tprintf(\"\\nPerf test: %d Writers ('wait' in qsbr_check == false)\\n\",\n+\t\tnum_cores);\n+\n+\t/* Number of readers does not matter for QS variable in this test\n+\t * case as no reader will be registered.\n+\t */\n+\tsz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);\n+\tt[0] = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t/* QS variable is initialized */\n+\trte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);\n+\n+\t/* Writer threads are launched */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_writer_perf,\n+\t\t\t\t(void *)0, enabled_core_ids[i]);\n+\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\tprintf(\"Total RCU checks = %\"PRIi64\"\\n\", rte_atomic64_read(&checks));\n+\tprintf(\"Cycles per %d checks: %\"PRIi64\"\\n\", RCU_SCALE_DOWN,\n+\t\trte_atomic64_read(&check_cycles) /\n+\t\t(rte_atomic64_read(&checks) / RCU_SCALE_DOWN));\n+\n+\trte_free(t[0]);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * RCU test cases using rte_hash data structure.\n+ */\n+static int\n+test_rcu_qsbr_hash_reader(void *arg)\n+{\n+\tstruct rte_rcu_qsbr *temp;\n+\tstruct rte_hash *hash = NULL;\n+\tint i;\n+\tuint64_t loop_cnt = 0;\n+\tuint64_t begin, cycles;\n+\tuint32_t thread_id = alloc_thread_id();\n+\tuint8_t read_type = (uint8_t)((uintptr_t)arg);\n+\tuint32_t *pdata;\n+\n+\ttemp = t[read_type];\n+\thash = h[read_type];\n+\n+\trte_rcu_qsbr_thread_register(temp, thread_id);\n+\n+\tbegin = rte_rdtsc_precise();\n+\n+\tdo {\n+\t\trte_rcu_qsbr_thread_online(temp, thread_id);\n+\t\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\t\trte_rcu_qsbr_lock(temp, thread_id);\n+\t\t\tif (rte_hash_lookup_data(hash, keys+i,\n+\t\t\t\t\t(void **)&pdata) != -ENOENT) {\n+\t\t\t\t*pdata = 0;\n+\t\t\t\twhile (*pdata < COUNTER_VALUE)\n+\t\t\t\t\t++*pdata;\n+\t\t\t}\n+\t\t\trte_rcu_qsbr_unlock(temp, thread_id);\n+\t\t}\n+\t\t/* Update quiescent state counter */\n+\t\trte_rcu_qsbr_quiescent(temp, thread_id);\n+\t\trte_rcu_qsbr_thread_offline(temp, thread_id);\n+\t\tloop_cnt++;\n+\t} while (!writer_done);\n+\n+\tcycles = rte_rdtsc_precise() - begin;\n+\trte_atomic64_add(&update_cycles, cycles);\n+\trte_atomic64_add(&updates, loop_cnt);\n+\n+\trte_rcu_qsbr_thread_unregister(temp, thread_id);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_hash *\n+init_hash(int hash_id)\n+{\n+\tint i;\n+\tstruct rte_hash *h = NULL;\n+\n+\tsprintf(hash_name[hash_id], \"hash%d\", hash_id);\n+\tstruct rte_hash_parameters hash_params = {\n+\t\t.entries = TOTAL_ENTRY,\n+\t\t.key_len = sizeof(uint32_t),\n+\t\t.hash_func_init_val = 0,\n+\t\t.socket_id = rte_socket_id(),\n+\t\t.hash_func = rte_hash_crc,\n+\t\t.extra_flag =\n+\t\t\tRTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF,\n+\t\t.name = hash_name[hash_id],\n+\t};\n+\n+\th = rte_hash_create(&hash_params);\n+\tif (h == NULL) {\n+\t\tprintf(\"Hash create Failed\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\thash_data[hash_id][i] = rte_zmalloc(NULL, sizeof(uint32_t), 0);\n+\t\tif (hash_data[hash_id][i] == NULL) {\n+\t\t\tprintf(\"No memory\\n\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\tkeys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);\n+\tif (keys == NULL) {\n+\t\tprintf(\"No memory\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\tkeys[i] = i;\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\tif (rte_hash_add_key_data(h, keys + i,\n+\t\t\t\t(void *)((uintptr_t)hash_data[hash_id][i]))\n+\t\t\t\t< 0) {\n+\t\t\tprintf(\"Hash key add Failed #%d\\n\", i);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\treturn h;\n+}\n+\n+/*\n+ * Functional test:\n+ * Single writer, Single QS variable Single QSBR query, Blocking rcu_qsbr_check\n+ */\n+static int\n+test_rcu_qsbr_sw_sv_1qs(void)\n+{\n+\tuint64_t token, begin, cycles;\n+\tint i, tmp_num_cores, sz;\n+\tint32_t pos;\n+\n+\twriter_done = 0;\n+\n+\trte_atomic64_clear(&updates);\n+\trte_atomic64_clear(&update_cycles);\n+\trte_atomic64_clear(&checks);\n+\trte_atomic64_clear(&check_cycles);\n+\n+\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\n+\tprintf(\"\\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\\n\", num_cores);\n+\n+\tif (all_registered == 1)\n+\t\ttmp_num_cores = num_cores;\n+\telse\n+\t\ttmp_num_cores = TEST_RCU_MAX_LCORE;\n+\n+\tsz = rte_rcu_qsbr_get_memsize(tmp_num_cores);\n+\tt[0] = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t/* QS variable is initialized */\n+\trte_rcu_qsbr_init(t[0], tmp_num_cores);\n+\n+\t/* Shared data structure created */\n+\th[0] = init_hash(0);\n+\tif (h[0] == NULL) {\n+\t\tprintf(\"Hash init failed\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_hash_reader, NULL,\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\tbegin = rte_rdtsc_precise();\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\t/* Delete elements from the shared data structure */\n+\t\tpos = rte_hash_del_key(h[0], keys + i);\n+\t\tif (pos < 0) {\n+\t\t\tprintf(\"Delete key failed #%d\\n\", keys[i]);\n+\t\t\tgoto error;\n+\t\t}\n+\t\t/* Start the quiescent state query process */\n+\t\ttoken = rte_rcu_qsbr_start(t[0]);\n+\n+\t\t/* Check the quiescent state status */\n+\t\trte_rcu_qsbr_check(t[0], token, true);\n+\t\tif (*hash_data[0][i] != COUNTER_VALUE &&\n+\t\t\t*hash_data[0][i] != 0) {\n+\t\t\tprintf(\"Reader did not complete #%d =  %d\\n\", i,\n+\t\t\t\t\t\t\t*hash_data[0][i]);\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tif (rte_hash_free_key_with_position(h[0], pos) < 0) {\n+\t\t\tprintf(\"Failed to free the key #%d\\n\", keys[i]);\n+\t\t\tgoto error;\n+\t\t}\n+\t\trte_free(hash_data[0][i]);\n+\t\thash_data[0][i] = NULL;\n+\t}\n+\n+\tcycles = rte_rdtsc_precise() - begin;\n+\trte_atomic64_add(&check_cycles, cycles);\n+\trte_atomic64_add(&checks, i);\n+\n+\twriter_done = 1;\n+\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\t/* Check return value from threads */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\tif (lcore_config[enabled_core_ids[i]].ret < 0)\n+\t\t\tgoto error;\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\n+\tprintf(\"Following numbers include calls to rte_hash functions\\n\");\n+\tprintf(\"Cycles per 1 update(online/update/offline): %\"PRIi64\"\\n\",\n+\t\trte_atomic64_read(&update_cycles) /\n+\t\trte_atomic64_read(&updates));\n+\n+\tprintf(\"Cycles per 1 check(start, check): %\"PRIi64\"\\n\\n\",\n+\t\trte_atomic64_read(&check_cycles) /\n+\t\trte_atomic64_read(&checks));\n+\n+\trte_free(t[0]);\n+\n+\treturn 0;\n+\n+error:\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\trte_free(hash_data[0][i]);\n+\n+\trte_free(t[0]);\n+\n+\treturn -1;\n+}\n+\n+/*\n+ * Functional test:\n+ * Single writer, Single QS variable, Single QSBR query,\n+ * Non-blocking rcu_qsbr_check\n+ */\n+static int\n+test_rcu_qsbr_sw_sv_1qs_non_blocking(void)\n+{\n+\tuint64_t token, begin, cycles;\n+\tint i, ret, tmp_num_cores, sz;\n+\tint32_t pos;\n+\n+\twriter_done = 0;\n+\n+\tprintf(\"Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\\n\", num_cores);\n+\n+\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n+\n+\tif (all_registered == 1)\n+\t\ttmp_num_cores = num_cores;\n+\telse\n+\t\ttmp_num_cores = TEST_RCU_MAX_LCORE;\n+\n+\tsz = rte_rcu_qsbr_get_memsize(tmp_num_cores);\n+\tt[0] = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t/* QS variable is initialized */\n+\trte_rcu_qsbr_init(t[0], tmp_num_cores);\n+\n+\t/* Shared data structure created */\n+\th[0] = init_hash(0);\n+\tif (h[0] == NULL) {\n+\t\tprintf(\"Hash init failed\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\t/* Reader threads are launched */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\trte_eal_remote_launch(test_rcu_qsbr_hash_reader, NULL,\n+\t\t\t\t\tenabled_core_ids[i]);\n+\n+\tbegin = rte_rdtsc_precise();\n+\n+\tfor (i = 0; i < TOTAL_ENTRY; i++) {\n+\t\t/* Delete elements from the shared data structure */\n+\t\tpos = rte_hash_del_key(h[0], keys + i);\n+\t\tif (pos < 0) {\n+\t\t\tprintf(\"Delete key failed #%d\\n\", keys[i]);\n+\t\t\tgoto error;\n+\t\t}\n+\t\t/* Start the quiescent state query process */\n+\t\ttoken = rte_rcu_qsbr_start(t[0]);\n+\n+\t\t/* Check the quiescent state status */\n+\t\tdo {\n+\t\t\tret = rte_rcu_qsbr_check(t[0], token, false);\n+\t\t} while (ret == 0);\n+\t\tif (*hash_data[0][i] != COUNTER_VALUE &&\n+\t\t\t*hash_data[0][i] != 0) {\n+\t\t\tprintf(\"Reader did not complete  #%d = %d\\n\", i,\n+\t\t\t\t\t\t\t*hash_data[0][i]);\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tif (rte_hash_free_key_with_position(h[0], pos) < 0) {\n+\t\t\tprintf(\"Failed to free the key #%d\\n\", keys[i]);\n+\t\t\tgoto error;\n+\t\t}\n+\t\trte_free(hash_data[0][i]);\n+\t\thash_data[0][i] = NULL;\n+\t}\n+\n+\tcycles = rte_rdtsc_precise() - begin;\n+\trte_atomic64_add(&check_cycles, cycles);\n+\trte_atomic64_add(&checks, i);\n+\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\t/* Check return value from threads */\n+\tfor (i = 0; i < num_cores; i++)\n+\t\tif (lcore_config[enabled_core_ids[i]].ret < 0)\n+\t\t\tgoto error;\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\n+\tprintf(\"Following numbers include calls to rte_hash functions\\n\");\n+\tprintf(\"Cycles per 1 update(online/update/offline): %\"PRIi64\"\\n\",\n+\t\trte_atomic64_read(&update_cycles) /\n+\t\trte_atomic64_read(&updates));\n+\n+\tprintf(\"Cycles per 1 check(start, check): %\"PRIi64\"\\n\\n\",\n+\t\trte_atomic64_read(&check_cycles) /\n+\t\trte_atomic64_read(&checks));\n+\n+\trte_free(t[0]);\n+\n+\treturn 0;\n+\n+error:\n+\twriter_done = 1;\n+\t/* Wait until all readers have exited */\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_hash_free(h[0]);\n+\trte_free(keys);\n+\tfor (i = 0; i < TOTAL_ENTRY; i++)\n+\t\trte_free(hash_data[0][i]);\n+\n+\trte_free(t[0]);\n+\n+\treturn -1;\n+}\n+\n+static int\n+test_rcu_qsbr_main(void)\n+{\n+\trte_atomic64_init(&updates);\n+\trte_atomic64_init(&update_cycles);\n+\trte_atomic64_init(&checks);\n+\trte_atomic64_init(&check_cycles);\n+\n+\tif (get_enabled_cores_mask() != 0)\n+\t\treturn -1;\n+\n+\tprintf(\"Number of cores provided = %d\\n\", num_cores);\n+\tif (num_cores < 2) {\n+\t\tprintf(\"Test failed! Need 2 or more cores\\n\");\n+\t\tgoto test_fail;\n+\t}\n+\tif (num_cores > TEST_RCU_MAX_LCORE) {\n+\t\tprintf(\"Test failed! %d cores supported\\n\", TEST_RCU_MAX_LCORE);\n+\t\tgoto test_fail;\n+\t}\n+\n+\tprintf(\"Perf test with all reader threads registered\\n\");\n+\tprintf(\"--------------------------------------------\\n\");\n+\tall_registered = 1;\n+\n+\tif (test_rcu_qsbr_perf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_rperf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_wperf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_sw_sv_1qs() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_sw_sv_1qs_non_blocking() < 0)\n+\t\tgoto test_fail;\n+\n+\t/* Make sure the actual number of cores provided is less than\n+\t * TEST_RCU_MAX_LCORE. This will allow for some threads not\n+\t * to be registered on the QS variable.\n+\t */\n+\tif (num_cores >= TEST_RCU_MAX_LCORE) {\n+\t\tprintf(\"Test failed! number of cores provided should be less than %d\\n\",\n+\t\t\tTEST_RCU_MAX_LCORE);\n+\t\tgoto test_fail;\n+\t}\n+\n+\tprintf(\"Perf test with some of reader threads registered\\n\");\n+\tprintf(\"------------------------------------------------\\n\");\n+\tall_registered = 0;\n+\n+\tif (test_rcu_qsbr_perf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_rperf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_wperf() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_sw_sv_1qs() < 0)\n+\t\tgoto test_fail;\n+\n+\tif (test_rcu_qsbr_sw_sv_1qs_non_blocking() < 0)\n+\t\tgoto test_fail;\n+\n+\tprintf(\"\\n\");\n+\n+\treturn 0;\n+\n+test_fail:\n+\treturn -1;\n+}\n+\n+REGISTER_TEST_COMMAND(rcu_qsbr_perf_autotest, test_rcu_qsbr_main);\n",
    "prefixes": [
        "v9",
        "2/4"
    ]
}