List patch comments

GET /api/patches/73451/comments/?format=api
HTTP 200 OK
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Link: 
<https://patches.dpdk.org/api/patches/73451/comments/?format=api&page=1>; rel="first",
<https://patches.dpdk.org/api/patches/73451/comments/?format=api&page=1>; rel="last"
Vary: Accept
[ { "id": 115513, "web_url": "https://patches.dpdk.org/comment/115513/", "msgid": "<0f200402-18e3-93f8-dc8f-a0f254c65032@intel.com>", "list_archive_url": "https://inbox.dpdk.org/dev/0f200402-18e3-93f8-dc8f-a0f254c65032@intel.com", "date": "2020-07-08T12:37:23", "subject": "Re: [dpdk-dev] [PATCH v7 3/3] test/lpm: add RCU integration\n\tperformance tests", "submitter": { "id": 1216, "url": "https://patches.dpdk.org/api/people/1216/?format=api", "name": "Vladimir Medvedkin", "email": "vladimir.medvedkin@intel.com" }, "content": "On 07/07/2020 16:15, Ruifeng Wang wrote:\n> From: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\n>\n> Add performance tests for RCU integration. The performance\n> difference with and without RCU integration is very small\n> (~1% to ~2%) on both Arm and x86 platforms.\n>\n> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\n> Reviewed-by: Gavin Hu <gavin.hu@arm.com>\n> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\n> ---\n> app/test/test_lpm_perf.c | 492 ++++++++++++++++++++++++++++++++++++++-\n> 1 file changed, 489 insertions(+), 3 deletions(-)\n>\n> diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c\n> index 489719c40..dfe186426 100644\n> --- a/app/test/test_lpm_perf.c\n> +++ b/app/test/test_lpm_perf.c\n> @@ -1,5 +1,6 @@\n> /* SPDX-License-Identifier: BSD-3-Clause\n> * Copyright(c) 2010-2014 Intel Corporation\n> + * Copyright(c) 2020 Arm Limited\n> */\n> \n> #include <stdio.h>\n> @@ -10,12 +11,27 @@\n> #include <rte_cycles.h>\n> #include <rte_random.h>\n> #include <rte_branch_prediction.h>\n> +#include <rte_malloc.h>\n> #include <rte_ip.h>\n> #include <rte_lpm.h>\n> \n> #include \"test.h\"\n> #include \"test_xmmt_ops.h\"\n> \n> +struct rte_lpm *lpm;\n> +static struct rte_rcu_qsbr *rv;\n> +static volatile uint8_t writer_done;\n> +static volatile uint32_t thr_id;\n> +static uint64_t gwrite_cycles;\n> +static uint64_t gwrites;\n> +/* LPM APIs are not thread safe, use mutex to provide thread safety */\n> +static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;\n> +\n> +/* Report quiescent state interval every 1024 lookups. Larger critical\n> + * sections in reader will result in writer polling multiple times.\n> + */\n> +#define QSBR_REPORTING_INTERVAL 1024\n> +\n> #define TEST_LPM_ASSERT(cond) do { \\\n> \tif (!(cond)) { \\\n> \t\tprintf(\"Error at line %d: \\n\", __LINE__); \\\n> @@ -24,6 +40,7 @@\n> } while(0)\n> \n> #define ITERATIONS (1 << 10)\n> +#define RCU_ITERATIONS 10\n> #define BATCH_SIZE (1 << 12)\n> #define BULK_SIZE 32\n> \n> @@ -35,9 +52,13 @@ struct route_rule {\n> };\n> \n> static struct route_rule large_route_table[MAX_RULE_NUM];\n> +/* Route table for routes with depth > 24 */\n> +struct route_rule large_ldepth_route_table[MAX_RULE_NUM];\n> \n> static uint32_t num_route_entries;\n> +static uint32_t num_ldepth_route_entries;\n> #define NUM_ROUTE_ENTRIES num_route_entries\n> +#define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries\n> \n> enum {\n> \tIP_CLASS_A,\n> @@ -191,7 +212,7 @@ static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)\n> \tuint32_t ip_head_mask;\n> \tuint32_t rule_num;\n> \tuint32_t k;\n> -\tstruct route_rule *ptr_rule;\n> +\tstruct route_rule *ptr_rule, *ptr_ldepth_rule;\n> \n> \tif (ip_class == IP_CLASS_A) { /* IP Address class A */\n> \t\tfixed_bit_num = IP_HEAD_BIT_NUM_A;\n> @@ -236,10 +257,20 @@ static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)\n> \t */\n> \tstart = lrand48() & mask;\n> \tptr_rule = &large_route_table[num_route_entries];\n> +\tptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];\n> \tfor (k = 0; k < rule_num; k++) {\n> \t\tptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))\n> \t\t\t| ip_head_mask;\n> \t\tptr_rule->depth = depth;\n> +\t\t/* If the depth of the route is more than 24, store it\n> +\t\t * in another table as well.\n> +\t\t */\n> +\t\tif (depth > 24) {\n> +\t\t\tptr_ldepth_rule->ip = ptr_rule->ip;\n> +\t\t\tptr_ldepth_rule->depth = ptr_rule->depth;\n> +\t\t\tptr_ldepth_rule++;\n> +\t\t\tnum_ldepth_route_entries++;\n> +\t\t}\n> \t\tptr_rule++;\n> \t\tstart = (start + step) & mask;\n> \t}\n> @@ -273,6 +304,7 @@ static void generate_large_route_rule_table(void)\n> \tuint8_t depth;\n> \n> \tnum_route_entries = 0;\n> +\tnum_ldepth_route_entries = 0;\n> \tmemset(large_route_table, 0, sizeof(large_route_table));\n> \n> \tfor (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {\n> @@ -316,10 +348,460 @@ print_route_distribution(const struct route_rule *table, uint32_t n)\n> \tprintf(\"\\n\");\n> }\n> \n> +/* Check condition and return an error if true. */\n> +static uint16_t enabled_core_ids[RTE_MAX_LCORE];\n> +static unsigned int num_cores;\n> +\n> +/* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */\n> +static inline uint32_t\n> +alloc_thread_id(void)\n> +{\n> +\tuint32_t tmp_thr_id;\n> +\n> +\ttmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);\n> +\tif (tmp_thr_id >= RTE_MAX_LCORE)\n> +\t\tprintf(\"Invalid thread id %u\\n\", tmp_thr_id);\n> +\n> +\treturn tmp_thr_id;\n> +}\n> +\n> +/*\n> + * Reader thread using rte_lpm data structure without RCU.\n> + */\n> +static int\n> +test_lpm_reader(void *arg)\n> +{\n> +\tint i;\n> +\tuint32_t ip_batch[QSBR_REPORTING_INTERVAL];\n> +\tuint32_t next_hop_return = 0;\n> +\n> +\tRTE_SET_USED(arg);\n> +\tdo {\n> +\t\tfor (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n> +\t\t\tip_batch[i] = rte_rand();\n> +\n> +\t\tfor (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n> +\t\t\trte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);\n> +\n> +\t} while (!writer_done);\n> +\n> +\treturn 0;\n> +}\n> +\n> +/*\n> + * Reader thread using rte_lpm data structure with RCU.\n> + */\n> +static int\n> +test_lpm_rcu_qsbr_reader(void *arg)\n> +{\n> +\tint i;\n> +\tuint32_t thread_id = alloc_thread_id();\n> +\tuint32_t ip_batch[QSBR_REPORTING_INTERVAL];\n> +\tuint32_t next_hop_return = 0;\n> +\n> +\tRTE_SET_USED(arg);\n> +\t/* Register this thread to report quiescent state */\n> +\trte_rcu_qsbr_thread_register(rv, thread_id);\n> +\trte_rcu_qsbr_thread_online(rv, thread_id);\n> +\n> +\tdo {\n> +\t\tfor (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n> +\t\t\tip_batch[i] = rte_rand();\n> +\n> +\t\tfor (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n> +\t\t\trte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);\n> +\n> +\t\t/* Update quiescent state */\n> +\t\trte_rcu_qsbr_quiescent(rv, thread_id);\n> +\t} while (!writer_done);\n> +\n> +\trte_rcu_qsbr_thread_offline(rv, thread_id);\n> +\trte_rcu_qsbr_thread_unregister(rv, thread_id);\n> +\n> +\treturn 0;\n> +}\n> +\n> +/*\n> + * Writer thread using rte_lpm data structure with RCU.\n> + */\n> +static int\n> +test_lpm_rcu_qsbr_writer(void *arg)\n> +{\n> +\tunsigned int i, j, si, ei;\n> +\tuint64_t begin, total_cycles;\n> +\tuint8_t core_id = (uint8_t)((uintptr_t)arg);\n> +\tuint32_t next_hop_add = 0xAA;\n> +\n> +\tRTE_SET_USED(arg);\n> +\t/* 2 writer threads are used */\n> +\tif (core_id % 2 == 0) {\n> +\t\tsi = 0;\n> +\t\tei = NUM_LDEPTH_ROUTE_ENTRIES / 2;\n> +\t} else {\n> +\t\tsi = NUM_LDEPTH_ROUTE_ENTRIES / 2;\n> +\t\tei = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\t}\n> +\n> +\t/* Measure add/delete. */\n> +\tbegin = rte_rdtsc_precise();\n> +\tfor (i = 0; i < RCU_ITERATIONS; i++) {\n> +\t\t/* Add all the entries */\n> +\t\tfor (j = si; j < ei; j++) {\n> +\t\t\tpthread_mutex_lock(&lpm_mutex);\n> +\t\t\tif (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\t\tlarge_ldepth_route_table[j].depth,\n> +\t\t\t\t\tnext_hop_add) != 0) {\n> +\t\t\t\tprintf(\"Failed to add iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t}\n> +\t\t\tpthread_mutex_unlock(&lpm_mutex);\n> +\t\t}\n> +\n> +\t\t/* Delete all the entries */\n> +\t\tfor (j = si; j < ei; j++) {\n> +\t\t\tpthread_mutex_lock(&lpm_mutex);\n> +\t\t\tif (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\tlarge_ldepth_route_table[j].depth) != 0) {\n> +\t\t\t\tprintf(\"Failed to delete iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t}\n> +\t\t\tpthread_mutex_unlock(&lpm_mutex);\n> +\t\t}\n> +\t}\n> +\n> +\ttotal_cycles = rte_rdtsc_precise() - begin;\n> +\n> +\t__atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);\n> +\t__atomic_fetch_add(&gwrites,\n> +\t\t\t2 * NUM_LDEPTH_ROUTE_ENTRIES * RCU_ITERATIONS,\n> +\t\t\t__ATOMIC_RELAXED);\n> +\n> +\treturn 0;\n> +}\n> +\n> +/*\n> + * Functional test:\n> + * 2 writers, rest are readers\n> + */\n> +static int\n> +test_lpm_rcu_perf_multi_writer(void)\n> +{\n> +\tstruct rte_lpm_config config;\n> +\tsize_t sz;\n> +\tunsigned int i;\n> +\tuint16_t core_id;\n> +\tstruct rte_lpm_rcu_config rcu_cfg = {0};\n> +\n> +\tif (rte_lcore_count() < 3) {\n> +\t\tprintf(\"Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\\n\");\n> +\t\treturn TEST_SKIPPED;\n> +\t}\n> +\n> +\tnum_cores = 0;\n> +\tRTE_LCORE_FOREACH_SLAVE(core_id) {\n> +\t\tenabled_core_ids[num_cores] = core_id;\n> +\t\tnum_cores++;\n> +\t}\n> +\n> +\tprintf(\"\\nPerf test: 2 writers, %d readers, RCU integration enabled\\n\",\n> +\t\tnum_cores - 2);\n> +\n> +\t/* Create LPM table */\n> +\tconfig.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.flags = 0;\n> +\tlpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n> +\tTEST_LPM_ASSERT(lpm != NULL);\n> +\n> +\t/* Init RCU variable */\n> +\tsz = rte_rcu_qsbr_get_memsize(num_cores);\n> +\trv = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n> +\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n> +\trte_rcu_qsbr_init(rv, num_cores);\n> +\n> +\trcu_cfg.v = rv;\n> +\t/* Assign the RCU variable to LPM */\n> +\tif (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg, NULL) != 0) {\n> +\t\tprintf(\"RCU variable assignment failed\\n\");\n> +\t\tgoto error;\n> +\t}\n> +\n> +\twriter_done = 0;\n> +\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n> +\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n> +\n> +\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n> +\n> +\t/* Launch reader threads */\n> +\tfor (i = 2; i < num_cores; i++)\n> +\t\trte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Launch writer threads */\n> +\tfor (i = 0; i < 2; i++)\n> +\t\trte_eal_remote_launch(test_lpm_rcu_qsbr_writer,\n> +\t\t\t\t\t(void *)(uintptr_t)i,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Wait for writer threads */\n> +\tfor (i = 0; i < 2; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tgoto error;\n> +\n> +\tprintf(\"Total LPM Adds: %d\\n\",\n> +\t\t2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Total LPM Deletes: %d\\n\",\n> +\t\t2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Average LPM Add/Del: %\"PRIu64\" cycles\\n\",\n> +\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n> +\t\t\t__atomic_load_n(&gwrites, __ATOMIC_RELAXED)\n> +\t\t);\n> +\n> +\t/* Wait and check return value from reader threads */\n> +\twriter_done = 1;\n> +\tfor (i = 2; i < num_cores; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tgoto error;\n> +\n> +\trte_lpm_free(lpm);\n> +\trte_free(rv);\n> +\tlpm = NULL;\n> +\trv = NULL;\n> +\n> +\t/* Test without RCU integration */\n> +\tprintf(\"\\nPerf test: 2 writers, %d readers, RCU integration disabled\\n\",\n> +\t\tnum_cores - 2);\n> +\n> +\t/* Create LPM table */\n> +\tconfig.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.flags = 0;\n> +\tlpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n> +\tTEST_LPM_ASSERT(lpm != NULL);\n> +\n> +\twriter_done = 0;\n> +\t__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n> +\t__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n> +\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n> +\n> +\t/* Launch reader threads */\n> +\tfor (i = 2; i < num_cores; i++)\n> +\t\trte_eal_remote_launch(test_lpm_reader, NULL,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Launch writer threads */\n> +\tfor (i = 0; i < 2; i++)\n> +\t\trte_eal_remote_launch(test_lpm_rcu_qsbr_writer,\n> +\t\t\t\t\t(void *)(uintptr_t)i,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Wait for writer threads */\n> +\tfor (i = 0; i < 2; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tgoto error;\n> +\n> +\tprintf(\"Total LPM Adds: %d\\n\",\n> +\t\t2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Total LPM Deletes: %d\\n\",\n> +\t\t2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Average LPM Add/Del: %\"PRIu64\" cycles\\n\",\n> +\t\t__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n> +\t\t\t__atomic_load_n(&gwrites, __ATOMIC_RELAXED)\n> +\t\t);\n> +\n> +\twriter_done = 1;\n> +\t/* Wait and check return value from reader threads */\n> +\tfor (i = 2; i < num_cores; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tgoto error;\n> +\n> +\trte_lpm_free(lpm);\n> +\n> +\treturn 0;\n> +\n> +error:\n> +\twriter_done = 1;\n> +\t/* Wait until all readers have exited */\n> +\trte_eal_mp_wait_lcore();\n> +\n> +\trte_lpm_free(lpm);\n> +\trte_free(rv);\n> +\n> +\treturn -1;\n> +}\n> +\n> +/*\n> + * Functional test:\n> + * Single writer, rest are readers\n> + */\n> +static int\n> +test_lpm_rcu_perf(void)\n> +{\n> +\tstruct rte_lpm_config config;\n> +\tuint64_t begin, total_cycles;\n> +\tsize_t sz;\n> +\tunsigned int i, j;\n> +\tuint16_t core_id;\n> +\tuint32_t next_hop_add = 0xAA;\n> +\tstruct rte_lpm_rcu_config rcu_cfg = {0};\n> +\n> +\tif (rte_lcore_count() < 2) {\n> +\t\tprintf(\"Not enough cores for lpm_rcu_perf_autotest, expecting at least 2\\n\");\n> +\t\treturn TEST_SKIPPED;\n> +\t}\n> +\n> +\tnum_cores = 0;\n> +\tRTE_LCORE_FOREACH_SLAVE(core_id) {\n> +\t\tenabled_core_ids[num_cores] = core_id;\n> +\t\tnum_cores++;\n> +\t}\n> +\n> +\tprintf(\"\\nPerf test: 1 writer, %d readers, RCU integration enabled\\n\",\n> +\t\tnum_cores);\n> +\n> +\t/* Create LPM table */\n> +\tconfig.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.flags = 0;\n> +\tlpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n> +\tTEST_LPM_ASSERT(lpm != NULL);\n> +\n> +\t/* Init RCU variable */\n> +\tsz = rte_rcu_qsbr_get_memsize(num_cores);\n> +\trv = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n> +\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n> +\trte_rcu_qsbr_init(rv, num_cores);\n> +\n> +\trcu_cfg.v = rv;\n> +\t/* Assign the RCU variable to LPM */\n> +\tif (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg, NULL) != 0) {\n> +\t\tprintf(\"RCU variable assignment failed\\n\");\n> +\t\tgoto error;\n> +\t}\n> +\n> +\twriter_done = 0;\n> +\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n> +\n> +\t/* Launch reader threads */\n> +\tfor (i = 0; i < num_cores; i++)\n> +\t\trte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Measure add/delete. */\n> +\tbegin = rte_rdtsc_precise();\n> +\tfor (i = 0; i < RCU_ITERATIONS; i++) {\n> +\t\t/* Add all the entries */\n> +\t\tfor (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n> +\t\t\tif (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\t\tlarge_ldepth_route_table[j].depth,\n> +\t\t\t\t\tnext_hop_add) != 0) {\n> +\t\t\t\tprintf(\"Failed to add iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t\tgoto error;\n> +\t\t\t}\n> +\n> +\t\t/* Delete all the entries */\n> +\t\tfor (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n> +\t\t\tif (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\tlarge_ldepth_route_table[j].depth) != 0) {\n> +\t\t\t\tprintf(\"Failed to delete iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t\tgoto error;\n> +\t\t\t}\n> +\t}\n> +\ttotal_cycles = rte_rdtsc_precise() - begin;\n> +\n> +\tprintf(\"Total LPM Adds: %d\\n\", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Total LPM Deletes: %d\\n\",\n> +\t\tITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Average LPM Add/Del: %g cycles\\n\",\n> +\t\t(double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));\n> +\n> +\twriter_done = 1;\n> +\t/* Wait and check return value from reader threads */\n> +\tfor (i = 0; i < num_cores; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tgoto error;\n> +\n> +\trte_lpm_free(lpm);\n> +\trte_free(rv);\n> +\tlpm = NULL;\n> +\trv = NULL;\n> +\n> +\t/* Test without RCU integration */\n> +\tprintf(\"\\nPerf test: 1 writer, %d readers, RCU integration disabled\\n\",\n> +\t\tnum_cores);\n> +\n> +\t/* Create LPM table */\n> +\tconfig.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n> +\tconfig.flags = 0;\n> +\tlpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n> +\tTEST_LPM_ASSERT(lpm != NULL);\n> +\n> +\twriter_done = 0;\n> +\t__atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n> +\n> +\t/* Launch reader threads */\n> +\tfor (i = 0; i < num_cores; i++)\n> +\t\trte_eal_remote_launch(test_lpm_reader, NULL,\n> +\t\t\t\t\tenabled_core_ids[i]);\n> +\n> +\t/* Measure add/delete. */\n> +\tbegin = rte_rdtsc_precise();\n> +\tfor (i = 0; i < RCU_ITERATIONS; i++) {\n> +\t\t/* Add all the entries */\n> +\t\tfor (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n> +\t\t\tif (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\t\tlarge_ldepth_route_table[j].depth,\n> +\t\t\t\t\tnext_hop_add) != 0) {\n> +\t\t\t\tprintf(\"Failed to add iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t\tgoto error;\n> +\t\t\t}\n> +\n> +\t\t/* Delete all the entries */\n> +\t\tfor (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n> +\t\t\tif (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n> +\t\t\t\tlarge_ldepth_route_table[j].depth) != 0) {\n> +\t\t\t\tprintf(\"Failed to delete iteration %d, route# %d\\n\",\n> +\t\t\t\t\ti, j);\n> +\t\t\t\tgoto error;\n> +\t\t\t}\n> +\t}\n> +\ttotal_cycles = rte_rdtsc_precise() - begin;\n> +\n> +\tprintf(\"Total LPM Adds: %d\\n\", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Total LPM Deletes: %d\\n\",\n> +\t\tITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n> +\tprintf(\"Average LPM Add/Del: %g cycles\\n\",\n> +\t\t(double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));\n> +\n> +\twriter_done = 1;\n> +\t/* Wait and check return value from reader threads */\n> +\tfor (i = 0; i < num_cores; i++)\n> +\t\tif (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n> +\t\t\tprintf(\"Warning: lcore %u not finished.\\n\",\n> +\t\t\t\tenabled_core_ids[i]);\n> +\n> +\trte_lpm_free(lpm);\n> +\n> +\treturn 0;\n> +\n> +error:\n> +\twriter_done = 1;\n> +\t/* Wait until all readers have exited */\n> +\trte_eal_mp_wait_lcore();\n> +\n> +\trte_lpm_free(lpm);\n> +\trte_free(rv);\n> +\n> +\treturn -1;\n> +}\n> +\n> static int\n> test_lpm_perf(void)\n> {\n> -\tstruct rte_lpm *lpm = NULL;\n> \tstruct rte_lpm_config config;\n> \n> \tconfig.max_rules = 2000000;\n> @@ -343,7 +825,7 @@ test_lpm_perf(void)\n> \tlpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n> \tTEST_LPM_ASSERT(lpm != NULL);\n> \n> -\t/* Measue add. */\n\n\nunintentional typo?\n\n\n> +\t/* Measure add. */\n> \tbegin = rte_rdtsc();\n> \n> \tfor (i = 0; i < NUM_ROUTE_ENTRIES; i++) {\n> @@ -478,6 +960,10 @@ test_lpm_perf(void)\n> \trte_lpm_delete_all(lpm);\n> \trte_lpm_free(lpm);\n> \n> +\ttest_lpm_rcu_perf();\n> +\n> +\ttest_lpm_rcu_perf_multi_writer();\n> +\n> \treturn 0;\n> }\n> \n\nAcked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>", "headers": { "Return-Path": "<dev-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EFA54A0526;\n\tWed, 8 Jul 2020 14:37:29 +0200 (CEST)", "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8507D1DE1E;\n\tWed, 8 Jul 2020 14:37:29 +0200 (CEST)", "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by dpdk.org (Postfix) with ESMTP id 83E751DE1C\n for <dev@dpdk.org>; Wed, 8 Jul 2020 14:37:27 +0200 (CEST)", "from fmsmga002.fm.intel.com ([10.253.24.26])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 08 Jul 2020 05:37:26 -0700", "from vmedvedk-mobl.ger.corp.intel.com (HELO [10.213.247.70])\n ([10.213.247.70])\n by fmsmga002.fm.intel.com with ESMTP; 08 Jul 2020 05:37:24 -0700" ], "IronPort-SDR": [ "\n 1leWLa2QkH17byiyjGg6BbQT0xr93MlVCWw4FNMg+9pETNegpDDW1vrxMGdEiiB345E22E12j/\n xZjQd4mRM+iw==", "\n NpQnvpz4vyZPG5EOEkDyPf3Cvf1ccALEujAYAMCqi+UTQQYuKnznfLcZs+vSjyPeZ860r8I8oi\n 2mJ0lcwwF9hg==" ], "X-IronPort-AV": [ "E=McAfee;i=\"6000,8403,9675\"; a=\"127864587\"", "E=Sophos;i=\"5.75,327,1589266800\";\n d=\"scan'208,217\";a=\"127864587\"", "E=Sophos;i=\"5.75,327,1589266800\";\n d=\"scan'208,217\";a=\"315856549\"" ], "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "To": "Ruifeng Wang <ruifeng.wang@arm.com>,\n Bruce Richardson <bruce.richardson@intel.com>", "Cc": "dev@dpdk.org, mdr@ashroe.eu, konstantin.ananyev@intel.com,\n honnappa.nagarahalli@arm.com, nd@arm.com", "References": "<20190906094534.36060-1-ruifeng.wang@arm.com>\n <20200707151554.64431-1-ruifeng.wang@arm.com>\n <20200707151554.64431-4-ruifeng.wang@arm.com>", "From": "\"Medvedkin, Vladimir\" <vladimir.medvedkin@intel.com>", "Message-ID": "<0f200402-18e3-93f8-dc8f-a0f254c65032@intel.com>", "Date": "Wed, 8 Jul 2020 13:37:23 +0100", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101\n Thunderbird/68.10.0", "MIME-Version": "1.0", "In-Reply-To": "<20200707151554.64431-4-ruifeng.wang@arm.com>", "Content-Language": "en-US", "Content-Type": "text/plain; charset=utf-8; format=flowed", "Content-Transfer-Encoding": "7bit", "X-Content-Filtered-By": "Mailman/MimeDel 2.1.15", "Subject": "Re: [dpdk-dev] [PATCH v7 3/3] test/lpm: add RCU integration\n\tperformance tests", "X-BeenThere": "dev@dpdk.org", "X-Mailman-Version": "2.1.15", "Precedence": "list", "List-Id": "DPDK patches and discussions <dev.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dev/>", "List-Post": "<mailto:dev@dpdk.org>", "List-Help": "<mailto:dev-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>", "Errors-To": "dev-bounces@dpdk.org", "Sender": "\"dev\" <dev-bounces@dpdk.org>" }, "addressed": null }, { "id": 115531, "web_url": "https://patches.dpdk.org/comment/115531/", "msgid": "<HE1PR0801MB202542DBB0EEABEC2A2B085E9E670@HE1PR0801MB2025.eurprd08.prod.outlook.com>", "list_archive_url": "https://inbox.dpdk.org/dev/HE1PR0801MB202542DBB0EEABEC2A2B085E9E670@HE1PR0801MB2025.eurprd08.prod.outlook.com", "date": "2020-07-08T14:07:51", "subject": "Re: [dpdk-dev] [PATCH v7 3/3] test/lpm: add RCU integration\n\tperformance tests", "submitter": { "id": 1198, "url": "https://patches.dpdk.org/api/people/1198/?format=api", "name": "Ruifeng Wang", "email": "ruifeng.wang@arm.com" }, "content": "From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>\nSent: Wednesday, July 8, 2020 8:37 PM\nTo: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson <bruce.richardson@intel.com>\nCc: dev@dpdk.org; mdr@ashroe.eu; konstantin.ananyev@intel.com; Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>\nSubject: Re: [PATCH v7 3/3] test/lpm: add RCU integration performance tests\n\n\n\nOn 07/07/2020 16:15, Ruifeng Wang wrote:\n\nFrom: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com><mailto:honnappa.nagarahalli@arm.com>\n\n\n\nAdd performance tests for RCU integration. The performance\n\ndifference with and without RCU integration is very small\n\n(~1% to ~2%) on both Arm and x86 platforms.\n\n\n\nSigned-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com><mailto:honnappa.nagarahalli@arm.com>\n\nReviewed-by: Gavin Hu <gavin.hu@arm.com><mailto:gavin.hu@arm.com>\n\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com><mailto:ruifeng.wang@arm.com>\n\n---\n\n app/test/test_lpm_perf.c | 492 ++++++++++++++++++++++++++++++++++++++-\n\n 1 file changed, 489 insertions(+), 3 deletions(-)\n\n\n\ndiff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c\n\nindex 489719c40..dfe186426 100644\n\n--- a/app/test/test_lpm_perf.c\n\n+++ b/app/test/test_lpm_perf.c\n\n@@ -1,5 +1,6 @@\n\n /* SPDX-License-Identifier: BSD-3-Clause\n\n * Copyright(c) 2010-2014 Intel Corporation\n\n+ * Copyright(c) 2020 Arm Limited\n\n */\n\n\n\n #include <stdio.h>\n\n@@ -10,12 +11,27 @@\n\n #include <rte_cycles.h>\n\n #include <rte_random.h>\n\n #include <rte_branch_prediction.h>\n\n+#include <rte_malloc.h>\n\n #include <rte_ip.h>\n\n #include <rte_lpm.h>\n\n\n\n #include \"test.h\"\n\n #include \"test_xmmt_ops.h\"\n\n\n\n+struct rte_lpm *lpm;\n\n+static struct rte_rcu_qsbr *rv;\n\n+static volatile uint8_t writer_done;\n\n+static volatile uint32_t thr_id;\n\n+static uint64_t gwrite_cycles;\n\n+static uint64_t gwrites;\n\n+/* LPM APIs are not thread safe, use mutex to provide thread safety */\n\n+static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;\n\n+\n\n+/* Report quiescent state interval every 1024 lookups. Larger critical\n\n+ * sections in reader will result in writer polling multiple times.\n\n+ */\n\n+#define QSBR_REPORTING_INTERVAL 1024\n\n+\n\n #define TEST_LPM_ASSERT(cond) do { \\\n\n if (!(cond)) { \\\n\n printf(\"Error at line %d: \\n\", __LINE__); \\\n\n@@ -24,6 +40,7 @@\n\n } while(0)\n\n\n\n #define ITERATIONS (1 << 10)\n\n+#define RCU_ITERATIONS 10\n\n #define BATCH_SIZE (1 << 12)\n\n #define BULK_SIZE 32\n\n\n\n@@ -35,9 +52,13 @@ struct route_rule {\n\n };\n\n\n\n static struct route_rule large_route_table[MAX_RULE_NUM];\n\n+/* Route table for routes with depth > 24 */\n\n+struct route_rule large_ldepth_route_table[MAX_RULE_NUM];\n\n\n\n static uint32_t num_route_entries;\n\n+static uint32_t num_ldepth_route_entries;\n\n #define NUM_ROUTE_ENTRIES num_route_entries\n\n+#define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries\n\n\n\n enum {\n\n IP_CLASS_A,\n\n@@ -191,7 +212,7 @@ static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)\n\n uint32_t ip_head_mask;\n\n uint32_t rule_num;\n\n uint32_t k;\n\n- struct route_rule *ptr_rule;\n\n+ struct route_rule *ptr_rule, *ptr_ldepth_rule;\n\n\n\n if (ip_class == IP_CLASS_A) { /* IP Address class A */\n\n fixed_bit_num = IP_HEAD_BIT_NUM_A;\n\n@@ -236,10 +257,20 @@ static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)\n\n */\n\n start = lrand48() & mask;\n\n ptr_rule = &large_route_table[num_route_entries];\n\n+ ptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];\n\n for (k = 0; k < rule_num; k++) {\n\n ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))\n\n | ip_head_mask;\n\n ptr_rule->depth = depth;\n\n+ /* If the depth of the route is more than 24, store it\n\n+ * in another table as well.\n\n+ */\n\n+ if (depth > 24) {\n\n+ ptr_ldepth_rule->ip = ptr_rule->ip;\n\n+ ptr_ldepth_rule->depth = ptr_rule->depth;\n\n+ ptr_ldepth_rule++;\n\n+ num_ldepth_route_entries++;\n\n+ }\n\n ptr_rule++;\n\n start = (start + step) & mask;\n\n }\n\n@@ -273,6 +304,7 @@ static void generate_large_route_rule_table(void)\n\n uint8_t depth;\n\n\n\n num_route_entries = 0;\n\n+ num_ldepth_route_entries = 0;\n\n memset(large_route_table, 0, sizeof(large_route_table));\n\n\n\n for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {\n\n@@ -316,10 +348,460 @@ print_route_distribution(const struct route_rule *table, uint32_t n)\n\n printf(\"\\n\");\n\n }\n\n\n\n+/* Check condition and return an error if true. */\n\n+static uint16_t enabled_core_ids[RTE_MAX_LCORE];\n\n+static unsigned int num_cores;\n\n+\n\n+/* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */\n\n+static inline uint32_t\n\n+alloc_thread_id(void)\n\n+{\n\n+ uint32_t tmp_thr_id;\n\n+\n\n+ tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);\n\n+ if (tmp_thr_id >= RTE_MAX_LCORE)\n\n+ printf(\"Invalid thread id %u\\n\", tmp_thr_id);\n\n+\n\n+ return tmp_thr_id;\n\n+}\n\n+\n\n+/*\n\n+ * Reader thread using rte_lpm data structure without RCU.\n\n+ */\n\n+static int\n\n+test_lpm_reader(void *arg)\n\n+{\n\n+ int i;\n\n+ uint32_t ip_batch[QSBR_REPORTING_INTERVAL];\n\n+ uint32_t next_hop_return = 0;\n\n+\n\n+ RTE_SET_USED(arg);\n\n+ do {\n\n+ for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n\n+ ip_batch[i] = rte_rand();\n\n+\n\n+ for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n\n+ rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);\n\n+\n\n+ } while (!writer_done);\n\n+\n\n+ return 0;\n\n+}\n\n+\n\n+/*\n\n+ * Reader thread using rte_lpm data structure with RCU.\n\n+ */\n\n+static int\n\n+test_lpm_rcu_qsbr_reader(void *arg)\n\n+{\n\n+ int i;\n\n+ uint32_t thread_id = alloc_thread_id();\n\n+ uint32_t ip_batch[QSBR_REPORTING_INTERVAL];\n\n+ uint32_t next_hop_return = 0;\n\n+\n\n+ RTE_SET_USED(arg);\n\n+ /* Register this thread to report quiescent state */\n\n+ rte_rcu_qsbr_thread_register(rv, thread_id);\n\n+ rte_rcu_qsbr_thread_online(rv, thread_id);\n\n+\n\n+ do {\n\n+ for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n\n+ ip_batch[i] = rte_rand();\n\n+\n\n+ for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)\n\n+ rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);\n\n+\n\n+ /* Update quiescent state */\n\n+ rte_rcu_qsbr_quiescent(rv, thread_id);\n\n+ } while (!writer_done);\n\n+\n\n+ rte_rcu_qsbr_thread_offline(rv, thread_id);\n\n+ rte_rcu_qsbr_thread_unregister(rv, thread_id);\n\n+\n\n+ return 0;\n\n+}\n\n+\n\n+/*\n\n+ * Writer thread using rte_lpm data structure with RCU.\n\n+ */\n\n+static int\n\n+test_lpm_rcu_qsbr_writer(void *arg)\n\n+{\n\n+ unsigned int i, j, si, ei;\n\n+ uint64_t begin, total_cycles;\n\n+ uint8_t core_id = (uint8_t)((uintptr_t)arg);\n\n+ uint32_t next_hop_add = 0xAA;\n\n+\n\n+ RTE_SET_USED(arg);\n\n+ /* 2 writer threads are used */\n\n+ if (core_id % 2 == 0) {\n\n+ si = 0;\n\n+ ei = NUM_LDEPTH_ROUTE_ENTRIES / 2;\n\n+ } else {\n\n+ si = NUM_LDEPTH_ROUTE_ENTRIES / 2;\n\n+ ei = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ }\n\n+\n\n+ /* Measure add/delete. */\n\n+ begin = rte_rdtsc_precise();\n\n+ for (i = 0; i < RCU_ITERATIONS; i++) {\n\n+ /* Add all the entries */\n\n+ for (j = si; j < ei; j++) {\n\n+ pthread_mutex_lock(&lpm_mutex);\n\n+ if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth,\n\n+ next_hop_add) != 0) {\n\n+ printf(\"Failed to add iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ }\n\n+ pthread_mutex_unlock(&lpm_mutex);\n\n+ }\n\n+\n\n+ /* Delete all the entries */\n\n+ for (j = si; j < ei; j++) {\n\n+ pthread_mutex_lock(&lpm_mutex);\n\n+ if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth) != 0) {\n\n+ printf(\"Failed to delete iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ }\n\n+ pthread_mutex_unlock(&lpm_mutex);\n\n+ }\n\n+ }\n\n+\n\n+ total_cycles = rte_rdtsc_precise() - begin;\n\n+\n\n+ __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);\n\n+ __atomic_fetch_add(&gwrites,\n\n+ 2 * NUM_LDEPTH_ROUTE_ENTRIES * RCU_ITERATIONS,\n\n+ __ATOMIC_RELAXED);\n\n+\n\n+ return 0;\n\n+}\n\n+\n\n+/*\n\n+ * Functional test:\n\n+ * 2 writers, rest are readers\n\n+ */\n\n+static int\n\n+test_lpm_rcu_perf_multi_writer(void)\n\n+{\n\n+ struct rte_lpm_config config;\n\n+ size_t sz;\n\n+ unsigned int i;\n\n+ uint16_t core_id;\n\n+ struct rte_lpm_rcu_config rcu_cfg = {0};\n\n+\n\n+ if (rte_lcore_count() < 3) {\n\n+ printf(\"Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\\n\");\n\n+ return TEST_SKIPPED;\n\n+ }\n\n+\n\n+ num_cores = 0;\n\n+ RTE_LCORE_FOREACH_SLAVE(core_id) {\n\n+ enabled_core_ids[num_cores] = core_id;\n\n+ num_cores++;\n\n+ }\n\n+\n\n+ printf(\"\\nPerf test: 2 writers, %d readers, RCU integration enabled\\n\",\n\n+ num_cores - 2);\n\n+\n\n+ /* Create LPM table */\n\n+ config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.flags = 0;\n\n+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n\n+ TEST_LPM_ASSERT(lpm != NULL);\n\n+\n\n+ /* Init RCU variable */\n\n+ sz = rte_rcu_qsbr_get_memsize(num_cores);\n\n+ rv = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n\n+ RTE_CACHE_LINE_SIZE);\n\n+ rte_rcu_qsbr_init(rv, num_cores);\n\n+\n\n+ rcu_cfg.v = rv;\n\n+ /* Assign the RCU variable to LPM */\n\n+ if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg, NULL) != 0) {\n\n+ printf(\"RCU variable assignment failed\\n\");\n\n+ goto error;\n\n+ }\n\n+\n\n+ writer_done = 0;\n\n+ __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n\n+ __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n\n+\n\n+ __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n\n+\n\n+ /* Launch reader threads */\n\n+ for (i = 2; i < num_cores; i++)\n\n+ rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Launch writer threads */\n\n+ for (i = 0; i < 2; i++)\n\n+ rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,\n\n+ (void *)(uintptr_t)i,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Wait for writer threads */\n\n+ for (i = 0; i < 2; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ goto error;\n\n+\n\n+ printf(\"Total LPM Adds: %d\\n\",\n\n+ 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Total LPM Deletes: %d\\n\",\n\n+ 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Average LPM Add/Del: %\"PRIu64\" cycles\\n\",\n\n+ __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n\n+ __atomic_load_n(&gwrites, __ATOMIC_RELAXED)\n\n+ );\n\n+\n\n+ /* Wait and check return value from reader threads */\n\n+ writer_done = 1;\n\n+ for (i = 2; i < num_cores; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ goto error;\n\n+\n\n+ rte_lpm_free(lpm);\n\n+ rte_free(rv);\n\n+ lpm = NULL;\n\n+ rv = NULL;\n\n+\n\n+ /* Test without RCU integration */\n\n+ printf(\"\\nPerf test: 2 writers, %d readers, RCU integration disabled\\n\",\n\n+ num_cores - 2);\n\n+\n\n+ /* Create LPM table */\n\n+ config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.flags = 0;\n\n+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n\n+ TEST_LPM_ASSERT(lpm != NULL);\n\n+\n\n+ writer_done = 0;\n\n+ __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);\n\n+ __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);\n\n+ __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n\n+\n\n+ /* Launch reader threads */\n\n+ for (i = 2; i < num_cores; i++)\n\n+ rte_eal_remote_launch(test_lpm_reader, NULL,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Launch writer threads */\n\n+ for (i = 0; i < 2; i++)\n\n+ rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,\n\n+ (void *)(uintptr_t)i,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Wait for writer threads */\n\n+ for (i = 0; i < 2; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ goto error;\n\n+\n\n+ printf(\"Total LPM Adds: %d\\n\",\n\n+ 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Total LPM Deletes: %d\\n\",\n\n+ 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Average LPM Add/Del: %\"PRIu64\" cycles\\n\",\n\n+ __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /\n\n+ __atomic_load_n(&gwrites, __ATOMIC_RELAXED)\n\n+ );\n\n+\n\n+ writer_done = 1;\n\n+ /* Wait and check return value from reader threads */\n\n+ for (i = 2; i < num_cores; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ goto error;\n\n+\n\n+ rte_lpm_free(lpm);\n\n+\n\n+ return 0;\n\n+\n\n+error:\n\n+ writer_done = 1;\n\n+ /* Wait until all readers have exited */\n\n+ rte_eal_mp_wait_lcore();\n\n+\n\n+ rte_lpm_free(lpm);\n\n+ rte_free(rv);\n\n+\n\n+ return -1;\n\n+}\n\n+\n\n+/*\n\n+ * Functional test:\n\n+ * Single writer, rest are readers\n\n+ */\n\n+static int\n\n+test_lpm_rcu_perf(void)\n\n+{\n\n+ struct rte_lpm_config config;\n\n+ uint64_t begin, total_cycles;\n\n+ size_t sz;\n\n+ unsigned int i, j;\n\n+ uint16_t core_id;\n\n+ uint32_t next_hop_add = 0xAA;\n\n+ struct rte_lpm_rcu_config rcu_cfg = {0};\n\n+\n\n+ if (rte_lcore_count() < 2) {\n\n+ printf(\"Not enough cores for lpm_rcu_perf_autotest, expecting at least 2\\n\");\n\n+ return TEST_SKIPPED;\n\n+ }\n\n+\n\n+ num_cores = 0;\n\n+ RTE_LCORE_FOREACH_SLAVE(core_id) {\n\n+ enabled_core_ids[num_cores] = core_id;\n\n+ num_cores++;\n\n+ }\n\n+\n\n+ printf(\"\\nPerf test: 1 writer, %d readers, RCU integration enabled\\n\",\n\n+ num_cores);\n\n+\n\n+ /* Create LPM table */\n\n+ config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.flags = 0;\n\n+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n\n+ TEST_LPM_ASSERT(lpm != NULL);\n\n+\n\n+ /* Init RCU variable */\n\n+ sz = rte_rcu_qsbr_get_memsize(num_cores);\n\n+ rv = (struct rte_rcu_qsbr *)rte_zmalloc(\"rcu0\", sz,\n\n+ RTE_CACHE_LINE_SIZE);\n\n+ rte_rcu_qsbr_init(rv, num_cores);\n\n+\n\n+ rcu_cfg.v = rv;\n\n+ /* Assign the RCU variable to LPM */\n\n+ if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg, NULL) != 0) {\n\n+ printf(\"RCU variable assignment failed\\n\");\n\n+ goto error;\n\n+ }\n\n+\n\n+ writer_done = 0;\n\n+ __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n\n+\n\n+ /* Launch reader threads */\n\n+ for (i = 0; i < num_cores; i++)\n\n+ rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Measure add/delete. */\n\n+ begin = rte_rdtsc_precise();\n\n+ for (i = 0; i < RCU_ITERATIONS; i++) {\n\n+ /* Add all the entries */\n\n+ for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n\n+ if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth,\n\n+ next_hop_add) != 0) {\n\n+ printf(\"Failed to add iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ goto error;\n\n+ }\n\n+\n\n+ /* Delete all the entries */\n\n+ for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n\n+ if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth) != 0) {\n\n+ printf(\"Failed to delete iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ goto error;\n\n+ }\n\n+ }\n\n+ total_cycles = rte_rdtsc_precise() - begin;\n\n+\n\n+ printf(\"Total LPM Adds: %d\\n\", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Total LPM Deletes: %d\\n\",\n\n+ ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Average LPM Add/Del: %g cycles\\n\",\n\n+ (double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));\n\n+\n\n+ writer_done = 1;\n\n+ /* Wait and check return value from reader threads */\n\n+ for (i = 0; i < num_cores; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ goto error;\n\n+\n\n+ rte_lpm_free(lpm);\n\n+ rte_free(rv);\n\n+ lpm = NULL;\n\n+ rv = NULL;\n\n+\n\n+ /* Test without RCU integration */\n\n+ printf(\"\\nPerf test: 1 writer, %d readers, RCU integration disabled\\n\",\n\n+ num_cores);\n\n+\n\n+ /* Create LPM table */\n\n+ config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;\n\n+ config.flags = 0;\n\n+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n\n+ TEST_LPM_ASSERT(lpm != NULL);\n\n+\n\n+ writer_done = 0;\n\n+ __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);\n\n+\n\n+ /* Launch reader threads */\n\n+ for (i = 0; i < num_cores; i++)\n\n+ rte_eal_remote_launch(test_lpm_reader, NULL,\n\n+ enabled_core_ids[i]);\n\n+\n\n+ /* Measure add/delete. */\n\n+ begin = rte_rdtsc_precise();\n\n+ for (i = 0; i < RCU_ITERATIONS; i++) {\n\n+ /* Add all the entries */\n\n+ for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n\n+ if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth,\n\n+ next_hop_add) != 0) {\n\n+ printf(\"Failed to add iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ goto error;\n\n+ }\n\n+\n\n+ /* Delete all the entries */\n\n+ for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)\n\n+ if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,\n\n+ large_ldepth_route_table[j].depth) != 0) {\n\n+ printf(\"Failed to delete iteration %d, route# %d\\n\",\n\n+ i, j);\n\n+ goto error;\n\n+ }\n\n+ }\n\n+ total_cycles = rte_rdtsc_precise() - begin;\n\n+\n\n+ printf(\"Total LPM Adds: %d\\n\", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Total LPM Deletes: %d\\n\",\n\n+ ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);\n\n+ printf(\"Average LPM Add/Del: %g cycles\\n\",\n\n+ (double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));\n\n+\n\n+ writer_done = 1;\n\n+ /* Wait and check return value from reader threads */\n\n+ for (i = 0; i < num_cores; i++)\n\n+ if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)\n\n+ printf(\"Warning: lcore %u not finished.\\n\",\n\n+ enabled_core_ids[i]);\n\n+\n\n+ rte_lpm_free(lpm);\n\n+\n\n+ return 0;\n\n+\n\n+error:\n\n+ writer_done = 1;\n\n+ /* Wait until all readers have exited */\n\n+ rte_eal_mp_wait_lcore();\n\n+\n\n+ rte_lpm_free(lpm);\n\n+ rte_free(rv);\n\n+\n\n+ return -1;\n\n+}\n\n+\n\n static int\n\n test_lpm_perf(void)\n\n {\n\n- struct rte_lpm *lpm = NULL;\n\n struct rte_lpm_config config;\n\n\n\n config.max_rules = 2000000;\n\n@@ -343,7 +825,7 @@ test_lpm_perf(void)\n\n lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);\n\n TEST_LPM_ASSERT(lpm != NULL);\n\n\n\n- /* Measue add. */\n\n\n\nunintentional typo?\n\n[Ruifeng] Yes, this is a typo fix. I assume it is OK not to be split out.\n\n\n\n+ /* Measure add. */\n\n begin = rte_rdtsc();\n\n\n\n for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {\n\n@@ -478,6 +960,10 @@ test_lpm_perf(void)\n\n rte_lpm_delete_all(lpm);\n\n rte_lpm_free(lpm);\n\n\n\n+ test_lpm_rcu_perf();\n\n+\n\n+ test_lpm_rcu_perf_multi_writer();\n\n+\n\n return 0;\n\n }\n\n\n\n\n\nAcked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com><mailto:vladimir.medvedkin@intel.com>\n\n\n\n\n\n\n\n--\n\nRegards,\n\nVladimir", "headers": { "Return-Path": "<dev-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 15F4DA0526;\n\tWed, 8 Jul 2020 16:08:06 +0200 (CEST)", "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AB0BD1DB08;\n\tWed, 8 Jul 2020 16:08:05 +0200 (CEST)", "from EUR02-HE1-obe.outbound.protection.outlook.com\n (mail-eopbgr10078.outbound.protection.outlook.com [40.107.1.78])\n by dpdk.org (Postfix) with ESMTP id 8B52C1D5F9\n for <dev@dpdk.org>; Wed, 8 Jul 2020 16:08:04 +0200 (CEST)", "from AM0PR10CA0060.EURPRD10.PROD.OUTLOOK.COM (2603:10a6:20b:150::40)\n by VI1PR08MB3582.eurprd08.prod.outlook.com (2603:10a6:803:89::18)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.3153.24; Wed, 8 Jul\n 2020 14:08:01 +0000", "from VE1EUR03FT004.eop-EUR03.prod.protection.outlook.com\n (2603:10a6:20b:150:cafe::8e) by AM0PR10CA0060.outlook.office365.com\n (2603:10a6:20b:150::40) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.3153.23 via Frontend\n Transport; Wed, 8 Jul 2020 14:08:01 +0000", "from 64aa7808-outbound-1.mta.getcheckrecipient.com (63.35.35.123) by\n VE1EUR03FT004.mail.protection.outlook.com (10.152.18.106) with\n Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.3174.21 via Frontend Transport; Wed, 8 Jul 2020 14:08:00 +0000", "(\"Tessian outbound 1c27ecaec3d6:v62\");\n Wed, 08 Jul 2020 14:08:00 +0000", "from a144ca08809d.1\n by 64aa7808-outbound-1.mta.getcheckrecipient.com id\n DB75D877-0ABC-49D2-803E-8F74FB5EC0D9.1;\n Wed, 08 Jul 2020 14:07:55 +0000", "from EUR04-DB3-obe.outbound.protection.outlook.com\n by 64aa7808-outbound-1.mta.getcheckrecipient.com with ESMTPS id\n a144ca08809d.1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384);\n Wed, 08 Jul 2020 14:07:55 +0000", "from HE1PR0801MB2025.eurprd08.prod.outlook.com (2603:10a6:3:50::14)\n by HE1PR0802MB2507.eurprd08.prod.outlook.com (2603:10a6:3:e1::8) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.3153.29; Wed, 8 Jul\n 2020 14:07:52 +0000", "from HE1PR0801MB2025.eurprd08.prod.outlook.com\n ([fe80::e863:15c9:b803:6533]) by HE1PR0801MB2025.eurprd08.prod.outlook.com\n ([fe80::e863:15c9:b803:6533%7]) with mapi id 15.20.3174.021; Wed, 8 Jul 2020\n 14:07:52 +0000" ], "DKIM-Signature": [ "v=1; a=rsa-sha256; c=relaxed/relaxed; d=armh.onmicrosoft.com;\n s=selector2-armh-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4a6bj4hl79ZYeTGxWJN7k6SwzqPc7htuLMAVtL6Fnrw=;\n b=xWV0KB0ETiab/IagF1vsdC67zNhzk8q1A5/54WxJ6rikHINJQAIhR5ssbXdVVmVv37CtYxGCK5tdtUXa0z41Y/VI5H17vwI4T8umZPM6Hj+rxh/r1WDbCl0c60aOF43CG9xsjTNz8Mihby5DT+nyJ14Pfeibw6gzmreJgB01374=", "v=1; a=rsa-sha256; c=relaxed/relaxed; d=armh.onmicrosoft.com;\n s=selector2-armh-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4a6bj4hl79ZYeTGxWJN7k6SwzqPc7htuLMAVtL6Fnrw=;\n b=xWV0KB0ETiab/IagF1vsdC67zNhzk8q1A5/54WxJ6rikHINJQAIhR5ssbXdVVmVv37CtYxGCK5tdtUXa0z41Y/VI5H17vwI4T8umZPM6Hj+rxh/r1WDbCl0c60aOF43CG9xsjTNz8Mihby5DT+nyJ14Pfeibw6gzmreJgB01374=" ], "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 63.35.35.123)\n smtp.mailfrom=arm.com; dpdk.org; dkim=pass (signature was verified)\n header.d=armh.onmicrosoft.com;dpdk.org; dmarc=bestguesspass action=none\n header.from=arm.com;", "Received-SPF": "Pass (protection.outlook.com: domain of arm.com designates\n 63.35.35.123 as permitted sender) receiver=protection.outlook.com;\n client-ip=63.35.35.123; helo=64aa7808-outbound-1.mta.getcheckrecipient.com;", "X-CR-MTA-TID": "64aa7808", "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Ot+HTFGFpgef772O9WgU9eaeH1NmrpRbBgmsMxMuIglNwndDnfbENBPNt6vhFJs0UIWBcsaWSxCD22LoN/WSkoiMmYA95lLXPTiMH5Mv17uflAPlPUNBv/nSonFLX/YCEkbrHKr7OHnz6mRic6N9e8WNFILJOyYoN24TPo+Svuh7OiVjsTDxksAHLXyOY3c6Xq2A1FoUqNQtNG4F2A8lq2UiHSe0kWPa+2HpdG8N7x0TUJ3mEfPDiyg11fbkio1c71qOzmrxmFhw7V5RQs2RFdl6dXXoVDGk4z1N6skwtCFI5kY8UaeZDLaCLRryecV/bvuXthxwa7pwED4nLRC2OQ==", "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4a6bj4hl79ZYeTGxWJN7k6SwzqPc7htuLMAVtL6Fnrw=;\n b=h+aMIizGkLm3HqR+JtFc2pGelhlp/qLBY6TAXv7kBqSIgusNcetv3QEh2EnbAjfsEdZLI3iQgFDgxG6LU27/6+7qnDCM0go6UmC8VKQ5FZ6wUECXcAjXWK2tp8V6jCH3tHfpazrNpLJP0D0oIJTDIth41XDW4a8tD1vcO2CvYS2zt16BnoSXC0tYSmCeedFBommOHUaZyijr3f3U/DjL5f4QZUhL5vgqoUIyf6foKqGH86Mpk1Gp0uRWtywnWPvSYpmlaGa8f3FVDABFa+KH52rDtOjBh9cWAY7oK+eMmZJukmcCJX62xCMuoC3hgn1PCc87E0fA1BU+E4phfQ3SrA==", "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass\n smtp.mailfrom=arm.com; dmarc=pass action=none header.from=arm.com; dkim=pass\n header.d=arm.com; arc=none", "From": "Ruifeng Wang <Ruifeng.Wang@arm.com>", "To": "\"Medvedkin, Vladimir\" <vladimir.medvedkin@intel.com>, Bruce Richardson\n <bruce.richardson@intel.com>", "CC": "\"dev@dpdk.org\" <dev@dpdk.org>, \"mdr@ashroe.eu\" <mdr@ashroe.eu>,\n \"konstantin.ananyev@intel.com\" <konstantin.ananyev@intel.com>, Honnappa\n Nagarahalli <Honnappa.Nagarahalli@arm.com>, nd <nd@arm.com>, nd <nd@arm.com>", "Thread-Topic": "[PATCH v7 3/3] test/lpm: add RCU integration performance tests", "Thread-Index": "AQHWVSSlehTbtexTd0aaNHYNJ7Fa1Kj9tq+A", "Date": "Wed, 8 Jul 2020 14:07:51 +0000", "Message-ID": "\n <HE1PR0801MB202542DBB0EEABEC2A2B085E9E670@HE1PR0801MB2025.eurprd08.prod.outlook.com>", "References": "<20190906094534.36060-1-ruifeng.wang@arm.com>\n <20200707151554.64431-1-ruifeng.wang@arm.com>\n <20200707151554.64431-4-ruifeng.wang@arm.com>\n <0f200402-18e3-93f8-dc8f-a0f254c65032@intel.com>", "In-Reply-To": "<0f200402-18e3-93f8-dc8f-a0f254c65032@intel.com>", "Accept-Language": "en-US", "Content-Language": "en-US", "X-MS-Has-Attach": "", "X-MS-TNEF-Correlator": "", "x-ts-tracking-id": "ec46f98f-0757-48da-ab5d-d4cc1ede8eb1.0", "x-checkrecipientchecked": "true", "Authentication-Results-Original": "intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=none action=none header.from=arm.com;", "x-originating-ip": "[203.126.0.113]", "x-ms-publictraffictype": "Email", "X-MS-Office365-Filtering-HT": "Tenant", "X-MS-Office365-Filtering-Correlation-Id": "303277ea-7d22-499f-d571-08d8234852f3", "x-ms-traffictypediagnostic": "HE1PR0802MB2507:|VI1PR08MB3582:", "x-ms-exchange-transport-forked": "True", "X-Microsoft-Antispam-PRVS": "\n <VI1PR08MB35828CA545111BB1C22381819E670@VI1PR08MB3582.eurprd08.prod.outlook.com>", "x-checkrecipientrouted": "true", "nodisclaimer": "true", "x-ms-oob-tlc-oobclassifiers": "OLM:4941;OLM:4941;", "x-forefront-prvs": [ "04583CED1A", "04583CED1A" ], "X-MS-Exchange-SenderADCheck": "1", "X-Microsoft-Antispam-Untrusted": "BCL:0;", "X-Microsoft-Antispam-Message-Info-Original": "\n ikKLk6sX5zqQUevBiWZ6UqsDPx5ksjyurpinpiFsYtyH4MFSi07wVbzOhGVbk6hwAOQeRMqUR2sw+jDhzWLTGBemXv9UQJpNZJyfcQTa++AUuBASpNLxTIdi9kzjgEV/7Pvc49aqQoCLD2kfFV3ORJyVvbnV1LlE3zxWAFW+OpeVPeT8u/3Gj0Fie8Pfc9C1B9LiDXRvj/rgEPDmY2Qrebgta3Y26KamFwGbIDdikD+xpunBSrQWw6pRIKJdiNdNFyaipKJw4373F8mBHy8tyO2hNXu+x7ugdBQyceLwridI6vY29usYpXqZ20b/oaxl", "X-Forefront-Antispam-Report-Untrusted": "CIP:255.255.255.255; CTRY:; LANG:en;\n SCL:1; SRV:; IPV:NLI; SFV:NSPM; H:HE1PR0801MB2025.eurprd08.prod.outlook.com;\n PTR:; CAT:NONE; SFTY:;\n SFS:(4636009)(346002)(136003)(396003)(376002)(39860400002)(366004)(26005)(7696005)(4326008)(186003)(86362001)(9686003)(55016002)(33656002)(110136005)(54906003)(316002)(30864003)(8676002)(8936002)(76116006)(6506007)(83380400001)(53546011)(64756008)(66556008)(66476007)(66446008)(66946007)(5660300002)(52536014)(71200400001)(478600001)(2906002)(579004);\n DIR:OUT; SFP:1101;", "x-ms-exchange-antispam-messagedata": "\n WoKn+GLXr24taE/MebodL9cJ9w9OMMKCFtQSlWAk5XOKhR+APwBqVdVbHxE7rcwd92BcO2RQTo+10GzfZjKrM/pcsw0uVa9fgSbxFo5qzzpejAQIyCoXNbc3LCRqiZC90/OOYn6QAdutYdxPEy2oYrZbI4ws5kQdFT99/2FJtPEl0LTdlSqNAvcynPjxuJ8pmKgS5qHKDzgBXB7r9ar85HVGOjYBKjJ0nuTkNBEExftrL29CJxJ82Bx10Z4E/cuUBITKJUpWSzKLj6MAhdzJooKNKIeJIAT+2rOXofzIQAX40X7jfcmbWNND3hpnv4lJO8MiuPJ4fAAZFWn7nLbfMaTF9P/sHFbJ/EccLhVNuKg2qPXGTLDln/DiN9vONi2uaBvM0TU0l8pxo4XYHochj/G5GzT7CBcDKe1C5c9r6pavhXJDZCVMTAvyzw6ePkfWHNqVHt7oDx0wIuyYnQiJpZg6G7Gz7FxftxUQtdFbF5I=", "MIME-Version": "1.0", "X-MS-Exchange-Transport-CrossTenantHeadersStamped": [ "HE1PR0802MB2507", "VI1PR08MB3582" ], "Original-Authentication-Results": "intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=none action=none header.from=arm.com;", "X-EOPAttributedMessage": "0", "X-MS-Exchange-Transport-CrossTenantHeadersStripped": "\n VE1EUR03FT004.eop-EUR03.prod.protection.outlook.com", "X-Forefront-Antispam-Report": "CIP:63.35.35.123; CTRY:IE; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:64aa7808-outbound-1.mta.getcheckrecipient.com;\n PTR:ec2-63-35-35-123.eu-west-1.compute.amazonaws.com; CAT:NONE; SFTY:;\n SFS:(4636009)(396003)(39860400002)(376002)(136003)(346002)(46966005)(9686003)(5660300002)(52536014)(81166007)(8936002)(336012)(316002)(82310400002)(70206006)(86362001)(356005)(30864003)(36906005)(70586007)(47076004)(186003)(82740400003)(33964004)(53546011)(110136005)(2906002)(8676002)(54906003)(478600001)(55016002)(7696005)(26005)(33656002)(4326008)(6506007)(83380400001)(559001)(579004);\n DIR:OUT; SFP:1101;", "X-MS-Office365-Filtering-Correlation-Id-Prvs": "\n d41a4f6d-17c4-4d2c-c652-08d823484dbc", "X-Forefront-PRVS": [ "04583CED1A", "04583CED1A" ], "X-Microsoft-Antispam": "BCL:0;", "X-Microsoft-Antispam-Message-Info": "\n CqNYtJsnc7B/ovi8DVLDblvhNfqVhT1dUiM584DpcHYe5z82wN2QnKht7x1cu1KYy8HZPmOuWjsO+PcQKR+EkHDioqMvFDjAnxJiNziT54N1AcennU6HZqdog/3ejaEiC6n6olebdI1GCFKGi8a9O9VEt3gFzRbW197w8XPsqocKCuHOEfia35VzXqoh0QbVImmuwJN4xxBKUHB9QeRha/HVWt71+zkxzTpRcwGDsBpufWCcbfzMURcFv0bdUqvvzQlt3P8+W061A+Exy//3Mjv5gvuOs46vGEg10KK1teQjxXGP+Xj0DSNMgAkRIdd6hrvJT23bHYPNh19cS51CIiDQLROc1oFNVktTgi4OPmZFG0jjtOd1Hz8Rl47kKOpOqw0+Ad98495/ZAnffHxVgA==", "X-OriginatorOrg": "arm.com", "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "08 Jul 2020 14:08:00.7765 (UTC)", "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 303277ea-7d22-499f-d571-08d8234852f3", "X-MS-Exchange-CrossTenant-Id": "f34e5979-57d9-4aaa-ad4d-b122a662184d", "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=f34e5979-57d9-4aaa-ad4d-b122a662184d; Ip=[63.35.35.123];\n Helo=[64aa7808-outbound-1.mta.getcheckrecipient.com]", "X-MS-Exchange-CrossTenant-AuthSource": "\n VE1EUR03FT004.eop-EUR03.prod.protection.outlook.com", "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous", "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem", "Content-Type": "text/plain; charset=\"utf-8\"", "Content-Transfer-Encoding": "base64", "X-Content-Filtered-By": "Mailman/MimeDel 2.1.15", "Subject": "Re: [dpdk-dev] [PATCH v7 3/3] test/lpm: add RCU integration\n\tperformance tests", "X-BeenThere": "dev@dpdk.org", "X-Mailman-Version": "2.1.15", "Precedence": "list", "List-Id": "DPDK patches and discussions <dev.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dev/>", "List-Post": "<mailto:dev@dpdk.org>", "List-Help": "<mailto:dev-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>", "Errors-To": "dev-bounces@dpdk.org", "Sender": "\"dev\" <dev-bounces@dpdk.org>" }, "addressed": null } ]