get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42572/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42572,
    "url": "http://patches.dpdk.org/api/patches/42572/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1530906406-289697-6-git-send-email-yipeng1.wang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1530906406-289697-6-git-send-email-yipeng1.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1530906406-289697-6-git-send-email-yipeng1.wang@intel.com",
    "date": "2018-07-06T19:46:43",
    "name": "[v3,5/8] hash: add read and write concurrency support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b9257f4af3751cd33d5d7d8fcac4f0efd9cfea93",
    "submitter": {
        "id": 754,
        "url": "http://patches.dpdk.org/api/people/754/?format=api",
        "name": "Wang, Yipeng1",
        "email": "yipeng1.wang@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1530906406-289697-6-git-send-email-yipeng1.wang@intel.com/mbox/",
    "series": [
        {
            "id": 463,
            "url": "http://patches.dpdk.org/api/series/463/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=463",
            "date": "2018-07-06T19:46:38",
            "name": "Add read-write concurrency to rte_hash library",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/463/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42572/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/42572/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5100B1BE4F;\n\tSat,  7 Jul 2018 04:54:12 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby dpdk.org (Postfix) with ESMTP id CFF041BE23\n\tfor <dev@dpdk.org>; Sat,  7 Jul 2018 04:53:58 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t06 Jul 2018 19:53:55 -0700",
            "from skx-yipeng.jf.intel.com ([10.54.81.175])\n\tby FMSMGA003.fm.intel.com with ESMTP; 06 Jul 2018 19:53:44 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,319,1526367600\"; d=\"scan'208\";a=\"62888549\"",
        "From": "Yipeng Wang <yipeng1.wang@intel.com>",
        "To": "pablo.de.lara.guarch@intel.com",
        "Cc": "dev@dpdk.org, yipeng1.wang@intel.com, bruce.richardson@intel.com,\n\thonnappa.nagarahalli@arm.com, vguvva@caviumnetworks.com,\n\tbrijesh.s.singh@gmail.com",
        "Date": "Fri,  6 Jul 2018 12:46:43 -0700",
        "Message-Id": "<1530906406-289697-6-git-send-email-yipeng1.wang@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1530906406-289697-1-git-send-email-yipeng1.wang@intel.com>",
        "References": "<1528455078-328182-1-git-send-email-yipeng1.wang@intel.com>\n\t<1530906406-289697-1-git-send-email-yipeng1.wang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 5/8] hash: add read and write concurrency\n\tsupport",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The existing implementation of librte_hash does not support read-write\nconcurrency. This commit implements read-write safety using rte_rwlock\nand rte_rwlock TM version if hardware transactional memory is available.\n\nBoth multi-writer and read-write concurrency is protected by rte_rwlock\nnow. The x86 specific header file is removed since the x86 specific RTM\nfunction is not called directly by rte hash now.\n\nSigned-off-by: Yipeng Wang <yipeng1.wang@intel.com>\n---\n lib/librte_hash/meson.build           |   1 -\n lib/librte_hash/rte_cuckoo_hash.c     | 520 ++++++++++++++++++++++------------\n lib/librte_hash/rte_cuckoo_hash.h     |  18 +-\n lib/librte_hash/rte_cuckoo_hash_x86.h | 167 -----------\n lib/librte_hash/rte_hash.h            |   3 +\n 5 files changed, 348 insertions(+), 361 deletions(-)\n delete mode 100644 lib/librte_hash/rte_cuckoo_hash_x86.h",
    "diff": "diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build\nindex e139e1d..efc06ed 100644\n--- a/lib/librte_hash/meson.build\n+++ b/lib/librte_hash/meson.build\n@@ -6,7 +6,6 @@ headers = files('rte_cmp_arm64.h',\n \t'rte_cmp_x86.h',\n \t'rte_crc_arm64.h',\n \t'rte_cuckoo_hash.h',\n-\t'rte_cuckoo_hash_x86.h',\n \t'rte_fbk_hash.h',\n \t'rte_hash_crc.h',\n \t'rte_hash.h',\ndiff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c\nindex 109da92..032e213 100644\n--- a/lib/librte_hash/rte_cuckoo_hash.c\n+++ b/lib/librte_hash/rte_cuckoo_hash.c\n@@ -31,9 +31,6 @@\n #include \"rte_hash.h\"\n #include \"rte_cuckoo_hash.h\"\n \n-#if defined(RTE_ARCH_X86)\n-#include \"rte_cuckoo_hash_x86.h\"\n-#endif\n \n TAILQ_HEAD(rte_hash_list, rte_tailq_entry);\n \n@@ -93,8 +90,10 @@ rte_hash_create(const struct rte_hash_parameters *params)\n \tvoid *buckets = NULL;\n \tchar ring_name[RTE_RING_NAMESIZE];\n \tunsigned num_key_slots;\n-\tunsigned hw_trans_mem_support = 0;\n \tunsigned i;\n+\tunsigned int hw_trans_mem_support = 0, multi_writer_support = 0;\n+\tunsigned int readwrite_concur_support = 0;\n+\n \trte_hash_function default_hash_func = (rte_hash_function)rte_jhash;\n \n \thash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);\n@@ -118,8 +117,16 @@ rte_hash_create(const struct rte_hash_parameters *params)\n \tif (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)\n \t\thw_trans_mem_support = 1;\n \n+\tif (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)\n+\t\tmulti_writer_support = 1;\n+\n+\tif (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {\n+\t\treadwrite_concur_support = 1;\n+\t\tmulti_writer_support = 1;\n+\t}\n+\n \t/* Store all keys and leave the first entry as a dummy entry for lookup_bulk */\n-\tif (hw_trans_mem_support)\n+\tif (multi_writer_support)\n \t\t/*\n \t\t * Increase number of slots by total number of indices\n \t\t * that can be stored in the lcore caches\n@@ -233,7 +240,7 @@ rte_hash_create(const struct rte_hash_parameters *params)\n \th->cmp_jump_table_idx = KEY_OTHER_BYTES;\n #endif\n \n-\tif (hw_trans_mem_support) {\n+\tif (multi_writer_support) {\n \t\th->local_free_slots = rte_zmalloc_socket(NULL,\n \t\t\t\tsizeof(struct lcore_cache) * RTE_MAX_LCORE,\n \t\t\t\tRTE_CACHE_LINE_SIZE, params->socket_id);\n@@ -261,6 +268,8 @@ rte_hash_create(const struct rte_hash_parameters *params)\n \th->key_store = k;\n \th->free_slots = r;\n \th->hw_trans_mem_support = hw_trans_mem_support;\n+\th->multi_writer_support = multi_writer_support;\n+\th->readwrite_concur_support = readwrite_concur_support;\n \n #if defined(RTE_ARCH_X86)\n \tif (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))\n@@ -271,24 +280,17 @@ rte_hash_create(const struct rte_hash_parameters *params)\n #endif\n \t\th->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;\n \n-\t/* Turn on multi-writer only with explicit flat from user and TM\n+\t/* Turn on multi-writer only with explicit flag from user and TM\n \t * support.\n \t */\n-\tif (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {\n-\t\tif (h->hw_trans_mem_support) {\n-\t\t\th->add_key = ADD_KEY_MULTIWRITER_TM;\n-\t\t} else {\n-\t\t\th->add_key = ADD_KEY_MULTIWRITER;\n-\t\t\th->multiwriter_lock = rte_malloc(NULL,\n-\t\t\t\t\t\t\tsizeof(rte_spinlock_t),\n-\t\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n-\t\t\tif (h->multiwriter_lock == NULL)\n-\t\t\t\tgoto err_unlock;\n-\n-\t\t\trte_spinlock_init(h->multiwriter_lock);\n-\t\t}\n-\t} else\n-\t\th->add_key = ADD_KEY_SINGLEWRITER;\n+\tif (h->multi_writer_support) {\n+\t\th->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\t\tif (h->readwrite_lock == NULL)\n+\t\t\tgoto err_unlock;\n+\n+\t\trte_rwlock_init(h->readwrite_lock);\n+\t}\n \n \t/* Populate free slots ring. Entry zero is reserved for key misses. */\n \tfor (i = 1; i < num_key_slots; i++)\n@@ -338,11 +340,10 @@ rte_hash_free(struct rte_hash *h)\n \n \trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n \n-\tif (h->hw_trans_mem_support)\n+\tif (h->multi_writer_support) {\n \t\trte_free(h->local_free_slots);\n-\n-\tif (h->add_key == ADD_KEY_MULTIWRITER)\n-\t\trte_free(h->multiwriter_lock);\n+\t\trte_free(h->readwrite_lock);\n+\t}\n \trte_ring_free(h->free_slots);\n \trte_free(h->key_store);\n \trte_free(h->buckets);\n@@ -369,6 +370,44 @@ rte_hash_secondary_hash(const hash_sig_t primary_hash)\n \treturn primary_hash ^ ((tag + 1) * alt_bits_xor);\n }\n \n+/* Read write locks implemented using rte_rwlock */\n+static inline void\n+__hash_rw_writer_lock(const struct rte_hash *h)\n+{\n+\tif (h->multi_writer_support && h->hw_trans_mem_support)\n+\t\trte_rwlock_write_lock_tm(h->readwrite_lock);\n+\telse if (h->multi_writer_support)\n+\t\trte_rwlock_write_lock(h->readwrite_lock);\n+}\n+\n+\n+static inline void\n+__hash_rw_reader_lock(const struct rte_hash *h)\n+{\n+\tif (h->readwrite_concur_support && h->hw_trans_mem_support)\n+\t\trte_rwlock_read_lock_tm(h->readwrite_lock);\n+\telse if (h->readwrite_concur_support)\n+\t\trte_rwlock_read_lock(h->readwrite_lock);\n+}\n+\n+static inline void\n+__hash_rw_writer_unlock(const struct rte_hash *h)\n+{\n+\tif (h->multi_writer_support && h->hw_trans_mem_support)\n+\t\trte_rwlock_write_unlock_tm(h->readwrite_lock);\n+\telse if (h->multi_writer_support)\n+\t\trte_rwlock_write_unlock(h->readwrite_lock);\n+}\n+\n+static inline void\n+__hash_rw_reader_unlock(const struct rte_hash *h)\n+{\n+\tif (h->readwrite_concur_support && h->hw_trans_mem_support)\n+\t\trte_rwlock_read_unlock_tm(h->readwrite_lock);\n+\telse if (h->readwrite_concur_support)\n+\t\trte_rwlock_read_unlock(h->readwrite_lock);\n+}\n+\n void\n rte_hash_reset(struct rte_hash *h)\n {\n@@ -378,6 +417,7 @@ rte_hash_reset(struct rte_hash *h)\n \tif (h == NULL)\n \t\treturn;\n \n+\t__hash_rw_writer_lock(h);\n \tmemset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));\n \tmemset(h->key_store, 0, h->key_entry_size * (h->entries + 1));\n \n@@ -386,7 +426,7 @@ rte_hash_reset(struct rte_hash *h)\n \t\trte_pause();\n \n \t/* Repopulate the free slots ring. Entry zero is reserved for key misses */\n-\tif (h->hw_trans_mem_support)\n+\tif (h->multi_writer_support)\n \t\ttot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *\n \t\t\t\t\t(LCORE_CACHE_SIZE - 1);\n \telse\n@@ -395,77 +435,12 @@ rte_hash_reset(struct rte_hash *h)\n \tfor (i = 1; i < tot_ring_cnt + 1; i++)\n \t\trte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));\n \n-\tif (h->hw_trans_mem_support) {\n+\tif (h->multi_writer_support) {\n \t\t/* Reset local caches per lcore */\n \t\tfor (i = 0; i < RTE_MAX_LCORE; i++)\n \t\t\th->local_free_slots[i].len = 0;\n \t}\n-}\n-\n-/* Search for an entry that can be pushed to its alternative location */\n-static inline int\n-make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt,\n-\t\tunsigned int *nr_pushes)\n-{\n-\tunsigned i, j;\n-\tint ret;\n-\tuint32_t next_bucket_idx;\n-\tstruct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];\n-\n-\t/*\n-\t * Push existing item (search for bucket with space in\n-\t * alternative locations) to its alternative location\n-\t */\n-\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n-\t\t/* Search for space in alternative locations */\n-\t\tnext_bucket_idx = bkt->sig_alt[i] & h->bucket_bitmask;\n-\t\tnext_bkt[i] = &h->buckets[next_bucket_idx];\n-\t\tfor (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {\n-\t\t\tif (next_bkt[i]->key_idx[j] == EMPTY_SLOT)\n-\t\t\t\tbreak;\n-\t\t}\n-\n-\t\tif (j != RTE_HASH_BUCKET_ENTRIES)\n-\t\t\tbreak;\n-\t}\n-\n-\t/* Alternative location has spare room (end of recursive function) */\n-\tif (i != RTE_HASH_BUCKET_ENTRIES) {\n-\t\tnext_bkt[i]->sig_alt[j] = bkt->sig_current[i];\n-\t\tnext_bkt[i]->sig_current[j] = bkt->sig_alt[i];\n-\t\tnext_bkt[i]->key_idx[j] = bkt->key_idx[i];\n-\t\treturn i;\n-\t}\n-\n-\t/* Pick entry that has not been pushed yet */\n-\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)\n-\t\tif (bkt->flag[i] == 0)\n-\t\t\tbreak;\n-\n-\t/* All entries have been pushed, so entry cannot be added */\n-\tif (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES)\n-\t\treturn -ENOSPC;\n-\n-\t/* Set flag to indicate that this entry is going to be pushed */\n-\tbkt->flag[i] = 1;\n-\n-\t/* Need room in alternative bucket to insert the pushed entry */\n-\tret = make_space_bucket(h, next_bkt[i], nr_pushes);\n-\t/*\n-\t * After recursive function.\n-\t * Clear flags and insert the pushed entry\n-\t * in its alternative location if successful,\n-\t * or return error\n-\t */\n-\tbkt->flag[i] = 0;\n-\tif (ret >= 0) {\n-\t\tnext_bkt[i]->sig_alt[ret] = bkt->sig_current[i];\n-\t\tnext_bkt[i]->sig_current[ret] = bkt->sig_alt[i];\n-\t\tnext_bkt[i]->key_idx[ret] = bkt->key_idx[i];\n-\t\treturn i;\n-\t} else\n-\t\treturn ret;\n-\n+\t__hash_rw_writer_unlock(h);\n }\n \n /*\n@@ -478,7 +453,7 @@ enqueue_slot_back(const struct rte_hash *h,\n \t\tstruct lcore_cache *cached_free_slots,\n \t\tvoid *slot_id)\n {\n-\tif (h->hw_trans_mem_support) {\n+\tif (h->multi_writer_support) {\n \t\tcached_free_slots->objs[cached_free_slots->len] = slot_id;\n \t\tcached_free_slots->len++;\n \t} else\n@@ -512,13 +487,207 @@ search_and_update(const struct rte_hash *h, void *data, const void *key,\n \treturn -1;\n }\n \n+/* Only tries to insert at one bucket (@prim_bkt) without trying to push\n+ * buckets around.\n+ * return 1 if matching existing key, return 0 if succeeds, return -1 for no\n+ * empty entry.\n+ */\n+static inline int32_t\n+rte_hash_cuckoo_insert_mw(const struct rte_hash *h,\n+\t\tstruct rte_hash_bucket *prim_bkt,\n+\t\tstruct rte_hash_bucket *sec_bkt,\n+\t\tconst struct rte_hash_key *key, void *data,\n+\t\thash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,\n+\t\tint32_t *ret_val)\n+{\n+\tunsigned int i;\n+\tstruct rte_hash_bucket *cur_bkt = prim_bkt;\n+\tint32_t ret;\n+\n+\t__hash_rw_writer_lock(h);\n+\t/* Check if key was inserted after last check but before this\n+\t * protected region in case of inserting duplicated keys.\n+\t */\n+\tret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\t*ret_val = ret;\n+\t\treturn 1;\n+\t}\n+\tret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\t*ret_val = ret;\n+\t\treturn 1;\n+\t}\n+\n+\t/* Insert new entry if there is room in the primary\n+\t * bucket.\n+\t */\n+\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n+\t\t/* Check if slot is available */\n+\t\tif (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {\n+\t\t\tprim_bkt->sig_current[i] = sig;\n+\t\t\tprim_bkt->sig_alt[i] = alt_hash;\n+\t\t\tprim_bkt->key_idx[i] = new_idx;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\t__hash_rw_writer_unlock(h);\n+\n+\tif (i != RTE_HASH_BUCKET_ENTRIES)\n+\t\treturn 0;\n+\n+\t/* no empty entry */\n+\treturn -1;\n+}\n+\n+/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill\n+ * the path head with new entry (sig, alt_hash, new_idx)\n+ * return 1 if matched key found, return -1 if cuckoo path invalided and fail,\n+ * return 0 if succeeds.\n+ */\n+static inline int\n+rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,\n+\t\t\tstruct rte_hash_bucket *bkt,\n+\t\t\tstruct rte_hash_bucket *alt_bkt,\n+\t\t\tconst struct rte_hash_key *key, void *data,\n+\t\t\tstruct queue_node *leaf, uint32_t leaf_slot,\n+\t\t\thash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,\n+\t\t\tint32_t *ret_val)\n+{\n+\tuint32_t prev_alt_bkt_idx;\n+\tstruct rte_hash_bucket *cur_bkt = bkt;\n+\tstruct queue_node *prev_node, *curr_node = leaf;\n+\tstruct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;\n+\tuint32_t prev_slot, curr_slot = leaf_slot;\n+\tint32_t ret;\n+\n+\t__hash_rw_writer_lock(h);\n+\n+\t/* In case empty slot was gone before entering protected region */\n+\tif (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Check if key was inserted after last check but before this\n+\t * protected region.\n+\t */\n+\tret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\t*ret_val = ret;\n+\t\treturn 1;\n+\t}\n+\n+\tret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\t*ret_val = ret;\n+\t\treturn 1;\n+\t}\n+\n+\twhile (likely(curr_node->prev != NULL)) {\n+\t\tprev_node = curr_node->prev;\n+\t\tprev_bkt = prev_node->bkt;\n+\t\tprev_slot = curr_node->prev_slot;\n+\n+\t\tprev_alt_bkt_idx =\n+\t\t\tprev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;\n+\n+\t\tif (unlikely(&h->buckets[prev_alt_bkt_idx]\n+\t\t\t\t!= curr_bkt)) {\n+\t\t\t/* revert it to empty, otherwise duplicated keys */\n+\t\t\tcurr_bkt->key_idx[curr_slot] = EMPTY_SLOT;\n+\t\t\t__hash_rw_writer_unlock(h);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Need to swap current/alt sig to allow later\n+\t\t * Cuckoo insert to move elements back to its\n+\t\t * primary bucket if available\n+\t\t */\n+\t\tcurr_bkt->sig_alt[curr_slot] =\n+\t\t\t prev_bkt->sig_current[prev_slot];\n+\t\tcurr_bkt->sig_current[curr_slot] =\n+\t\t\tprev_bkt->sig_alt[prev_slot];\n+\t\tcurr_bkt->key_idx[curr_slot] =\n+\t\t\tprev_bkt->key_idx[prev_slot];\n+\n+\t\tcurr_slot = prev_slot;\n+\t\tcurr_node = prev_node;\n+\t\tcurr_bkt = curr_node->bkt;\n+\t}\n+\n+\tcurr_bkt->sig_current[curr_slot] = sig;\n+\tcurr_bkt->sig_alt[curr_slot] = alt_hash;\n+\tcurr_bkt->key_idx[curr_slot] = new_idx;\n+\n+\t__hash_rw_writer_unlock(h);\n+\n+\treturn 0;\n+\n+}\n+\n+/*\n+ * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe\n+ * Cuckoo\n+ */\n+static inline int\n+rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,\n+\t\t\tstruct rte_hash_bucket *bkt,\n+\t\t\tstruct rte_hash_bucket *sec_bkt,\n+\t\t\tconst struct rte_hash_key *key, void *data,\n+\t\t\thash_sig_t sig, hash_sig_t alt_hash,\n+\t\t\tuint32_t new_idx, int32_t *ret_val)\n+{\n+\tunsigned int i;\n+\tstruct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];\n+\tstruct queue_node *tail, *head;\n+\tstruct rte_hash_bucket *curr_bkt, *alt_bkt;\n+\n+\ttail = queue;\n+\thead = queue + 1;\n+\ttail->bkt = bkt;\n+\ttail->prev = NULL;\n+\ttail->prev_slot = -1;\n+\n+\t/* Cuckoo bfs Search */\n+\twhile (likely(tail != head && head <\n+\t\t\t\t\tqueue + RTE_HASH_BFS_QUEUE_MAX_LEN -\n+\t\t\t\t\tRTE_HASH_BUCKET_ENTRIES)) {\n+\t\tcurr_bkt = tail->bkt;\n+\t\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n+\t\t\tif (curr_bkt->key_idx[i] == EMPTY_SLOT) {\n+\t\t\t\tint32_t ret = rte_hash_cuckoo_move_insert_mw(h,\n+\t\t\t\t\t\tbkt, sec_bkt, key, data,\n+\t\t\t\t\t\ttail, i, sig, alt_hash,\n+\t\t\t\t\t\tnew_idx, ret_val);\n+\t\t\t\tif (likely(ret != -1))\n+\t\t\t\t\treturn ret;\n+\t\t\t}\n+\n+\t\t\t/* Enqueue new node and keep prev node info */\n+\t\t\talt_bkt = &(h->buckets[curr_bkt->sig_alt[i]\n+\t\t\t\t\t\t    & h->bucket_bitmask]);\n+\t\t\thead->bkt = alt_bkt;\n+\t\t\thead->prev = tail;\n+\t\t\thead->prev_slot = i;\n+\t\t\thead++;\n+\t\t}\n+\t\ttail++;\n+\t}\n+\n+\treturn -ENOSPC;\n+}\n+\n static inline int32_t\n __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,\n \t\t\t\t\t\thash_sig_t sig, void *data)\n {\n \thash_sig_t alt_hash;\n \tuint32_t prim_bucket_idx, sec_bucket_idx;\n-\tunsigned i;\n \tstruct rte_hash_bucket *prim_bkt, *sec_bkt;\n \tstruct rte_hash_key *new_k, *keys = h->key_store;\n \tvoid *slot_id = NULL;\n@@ -527,10 +696,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,\n \tunsigned n_slots;\n \tunsigned lcore_id;\n \tstruct lcore_cache *cached_free_slots = NULL;\n-\tunsigned int nr_pushes = 0;\n-\n-\tif (h->add_key == ADD_KEY_MULTIWRITER)\n-\t\trte_spinlock_lock(h->multiwriter_lock);\n+\tint32_t ret_val;\n \n \tprim_bucket_idx = sig & h->bucket_bitmask;\n \tprim_bkt = &h->buckets[prim_bucket_idx];\n@@ -541,8 +707,24 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,\n \tsec_bkt = &h->buckets[sec_bucket_idx];\n \trte_prefetch0(sec_bkt);\n \n-\t/* Get a new slot for storing the new key */\n-\tif (h->hw_trans_mem_support) {\n+\t/* Check if key is already inserted in primary location */\n+\t__hash_rw_writer_lock(h);\n+\tret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Check if key is already inserted in secondary location */\n+\tret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n+\t\treturn ret;\n+\t}\n+\t__hash_rw_writer_unlock(h);\n+\n+\t/* Did not find a match, so get a new slot for storing the new key */\n+\tif (h->multi_writer_support) {\n \t\tlcore_id = rte_lcore_id();\n \t\tcached_free_slots = &h->local_free_slots[lcore_id];\n \t\t/* Try to get a free slot from the local cache */\n@@ -552,8 +734,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,\n \t\t\t\t\tcached_free_slots->objs,\n \t\t\t\t\tLCORE_CACHE_SIZE, NULL);\n \t\t\tif (n_slots == 0) {\n-\t\t\t\tret = -ENOSPC;\n-\t\t\t\tgoto failure;\n+\t\t\t\treturn -ENOSPC;\n \t\t\t}\n \n \t\t\tcached_free_slots->len += n_slots;\n@@ -564,92 +745,50 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,\n \t\tslot_id = cached_free_slots->objs[cached_free_slots->len];\n \t} else {\n \t\tif (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {\n-\t\t\tret = -ENOSPC;\n-\t\t\tgoto failure;\n+\t\t\treturn -ENOSPC;\n \t\t}\n \t}\n \n \tnew_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);\n-\trte_prefetch0(new_k);\n \tnew_idx = (uint32_t)((uintptr_t) slot_id);\n-\n-\t/* Check if key is already inserted in primary location */\n-\tret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);\n-\tif (ret != -1)\n-\t\tgoto failure;\n-\n-\t/* Check if key is already inserted in secondary location */\n-\tret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);\n-\tif (ret != -1)\n-\t\tgoto failure;\n-\n \t/* Copy key */\n \trte_memcpy(new_k->key, key, h->key_len);\n \tnew_k->pdata = data;\n \n-#if defined(RTE_ARCH_X86) /* currently only x86 support HTM */\n-\tif (h->add_key == ADD_KEY_MULTIWRITER_TM) {\n-\t\tret = rte_hash_cuckoo_insert_mw_tm(prim_bkt,\n-\t\t\t\tsig, alt_hash, new_idx);\n-\t\tif (ret >= 0)\n-\t\t\treturn new_idx - 1;\n \n-\t\t/* Primary bucket full, need to make space for new entry */\n-\t\tret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig,\n-\t\t\t\t\t\t\talt_hash, new_idx);\n+\t/* Find an empty slot and insert */\n+\tret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,\n+\t\t\t\t\tsig, alt_hash, new_idx, &ret_val);\n+\tif (ret == 0)\n+\t\treturn new_idx - 1;\n+\telse if (ret == 1) {\n+\t\tenqueue_slot_back(h, cached_free_slots, slot_id);\n+\t\treturn ret_val;\n+\t}\n \n-\t\tif (ret >= 0)\n-\t\t\treturn new_idx - 1;\n+\t/* Primary bucket full, need to make space for new entry */\n+\tret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,\n+\t\t\t\t\tsig, alt_hash, new_idx, &ret_val);\n+\tif (ret == 0)\n+\t\treturn new_idx - 1;\n+\telse if (ret == 1) {\n+\t\tenqueue_slot_back(h, cached_free_slots, slot_id);\n+\t\treturn ret_val;\n+\t}\n \n-\t\t/* Also search secondary bucket to get better occupancy */\n-\t\tret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig,\n-\t\t\t\t\t\t\talt_hash, new_idx);\n+\t/* Also search secondary bucket to get better occupancy */\n+\tret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,\n+\t\t\t\t\talt_hash, sig, new_idx, &ret_val);\n \n-\t\tif (ret >= 0)\n-\t\t\treturn new_idx - 1;\n+\tif (ret == 0)\n+\t\treturn new_idx - 1;\n+\telse if (ret == 1) {\n+\t\tenqueue_slot_back(h, cached_free_slots, slot_id);\n+\t\treturn ret_val;\n \t} else {\n-#endif\n-\t\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n-\t\t\t/* Check if slot is available */\n-\t\t\tif (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {\n-\t\t\t\tprim_bkt->sig_current[i] = sig;\n-\t\t\t\tprim_bkt->sig_alt[i] = alt_hash;\n-\t\t\t\tprim_bkt->key_idx[i] = new_idx;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\tif (i != RTE_HASH_BUCKET_ENTRIES) {\n-\t\t\tif (h->add_key == ADD_KEY_MULTIWRITER)\n-\t\t\t\trte_spinlock_unlock(h->multiwriter_lock);\n-\t\t\treturn new_idx - 1;\n-\t\t}\n-\n-\t\t/* Primary bucket full, need to make space for new entry\n-\t\t * After recursive function.\n-\t\t * Insert the new entry in the position of the pushed entry\n-\t\t * if successful or return error and\n-\t\t * store the new slot back in the ring\n-\t\t */\n-\t\tret = make_space_bucket(h, prim_bkt, &nr_pushes);\n-\t\tif (ret >= 0) {\n-\t\t\tprim_bkt->sig_current[ret] = sig;\n-\t\t\tprim_bkt->sig_alt[ret] = alt_hash;\n-\t\t\tprim_bkt->key_idx[ret] = new_idx;\n-\t\t\tif (h->add_key == ADD_KEY_MULTIWRITER)\n-\t\t\t\trte_spinlock_unlock(h->multiwriter_lock);\n-\t\t\treturn new_idx - 1;\n-\t\t}\n-#if defined(RTE_ARCH_X86)\n+\t\tenqueue_slot_back(h, cached_free_slots, slot_id);\n+\t\treturn ret;\n \t}\n-#endif\n-\t/* Error in addition, store new slot back in the ring and return error */\n-\tenqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));\n-\n-failure:\n-\tif (h->add_key == ADD_KEY_MULTIWRITER)\n-\t\trte_spinlock_unlock(h->multiwriter_lock);\n-\treturn ret;\n }\n \n int32_t\n@@ -734,12 +873,14 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,\n \tbucket_idx = sig & h->bucket_bitmask;\n \tbkt = &h->buckets[bucket_idx];\n \n+\t__hash_rw_reader_lock(h);\n \n \t/* Check if key is in primary location */\n \tret = search_one_bucket(h, key, sig, data, bkt);\n-\tif (ret != -1)\n+\tif (ret != -1) {\n+\t\t__hash_rw_reader_unlock(h);\n \t\treturn ret;\n-\n+\t}\n \t/* Calculate secondary hash */\n \talt_hash = rte_hash_secondary_hash(sig);\n \tbucket_idx = alt_hash & h->bucket_bitmask;\n@@ -747,9 +888,11 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,\n \n \t/* Check if key is in secondary location */\n \tret = search_one_bucket(h, key, alt_hash, data, bkt);\n-\tif (ret != -1)\n+\tif (ret != -1) {\n+\t\t__hash_rw_reader_unlock(h);\n \t\treturn ret;\n-\n+\t}\n+\t__hash_rw_reader_unlock(h);\n \treturn -ENOENT;\n }\n \n@@ -791,7 +934,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)\n \n \tbkt->sig_current[i] = NULL_SIGNATURE;\n \tbkt->sig_alt[i] = NULL_SIGNATURE;\n-\tif (h->hw_trans_mem_support) {\n+\tif (h->multi_writer_support) {\n \t\tlcore_id = rte_lcore_id();\n \t\tcached_free_slots = &h->local_free_slots[lcore_id];\n \t\t/* Cache full, need to free it. */\n@@ -855,10 +998,13 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,\n \tbucket_idx = sig & h->bucket_bitmask;\n \tbkt = &h->buckets[bucket_idx];\n \n+\t__hash_rw_writer_lock(h);\n \t/* look for key in primary bucket */\n \tret = search_and_remove(h, key, bkt, sig);\n-\tif (ret != -1)\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n \t\treturn ret;\n+\t}\n \n \t/* Calculate secondary hash */\n \talt_hash = rte_hash_secondary_hash(sig);\n@@ -867,9 +1013,12 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,\n \n \t/* look for key in secondary bucket */\n \tret = search_and_remove(h, key, bkt, alt_hash);\n-\tif (ret != -1)\n+\tif (ret != -1) {\n+\t\t__hash_rw_writer_unlock(h);\n \t\treturn ret;\n+\t}\n \n+\t__hash_rw_writer_unlock(h);\n \treturn -ENOENT;\n }\n \n@@ -1011,6 +1160,7 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,\n \t\trte_prefetch0(secondary_bkt[i]);\n \t}\n \n+\t__hash_rw_reader_lock(h);\n \t/* Compare signatures and prefetch key slot of first hit */\n \tfor (i = 0; i < num_keys; i++) {\n \t\tcompare_signatures(&prim_hitmask[i], &sec_hitmask[i],\n@@ -1093,6 +1243,8 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,\n \t\tcontinue;\n \t}\n \n+\t__hash_rw_reader_unlock(h);\n+\n \tif (hit_mask != NULL)\n \t\t*hit_mask = hits;\n }\n@@ -1151,7 +1303,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32\n \t\tbucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;\n \t\tidx = *next % RTE_HASH_BUCKET_ENTRIES;\n \t}\n-\n+\t__hash_rw_reader_lock(h);\n \t/* Get position of entry in key table */\n \tposition = h->buckets[bucket_idx].key_idx[idx];\n \tnext_key = (struct rte_hash_key *) ((char *)h->key_store +\n@@ -1160,6 +1312,8 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32\n \t*key = next_key->key;\n \t*data = next_key->pdata;\n \n+\t__hash_rw_reader_unlock(h);\n+\n \t/* Increment iterator */\n \t(*next)++;\n \ndiff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h\nindex 7a54e55..db4d1a0 100644\n--- a/lib/librte_hash/rte_cuckoo_hash.h\n+++ b/lib/librte_hash/rte_cuckoo_hash.h\n@@ -88,11 +88,6 @@ const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {\n \n #endif\n \n-enum add_key_case {\n-\tADD_KEY_SINGLEWRITER = 0,\n-\tADD_KEY_MULTIWRITER,\n-\tADD_KEY_MULTIWRITER_TM,\n-};\n \n /** Number of items per bucket. */\n #define RTE_HASH_BUCKET_ENTRIES\t\t8\n@@ -155,18 +150,20 @@ struct rte_hash {\n \n \tstruct rte_ring *free_slots;\n \t/**< Ring that stores all indexes of the free slots in the key table */\n-\tuint8_t hw_trans_mem_support;\n-\t/**< Hardware transactional memory support */\n+\n \tstruct lcore_cache *local_free_slots;\n \t/**< Local cache per lcore, storing some indexes of the free slots */\n-\tenum add_key_case add_key; /**< Multi-writer hash add behavior */\n-\n-\trte_spinlock_t *multiwriter_lock; /**< Multi-writer spinlock for w/o TM */\n \n \t/* Fields used in lookup */\n \n \tuint32_t key_len __rte_cache_aligned;\n \t/**< Length of hash key. */\n+\tuint8_t hw_trans_mem_support;\n+\t/**< If hardware transactional memory is used. */\n+\tuint8_t multi_writer_support;\n+\t/**< If multi-writer support is enabled. */\n+\tuint8_t readwrite_concur_support;\n+\t/**< If read-write concurrency support is enabled */\n \trte_hash_function hash_func;    /**< Function used to calculate hash. */\n \tuint32_t hash_func_init_val;    /**< Init value used by hash_func. */\n \trte_hash_cmp_eq_t rte_hash_custom_cmp_eq;\n@@ -184,6 +181,7 @@ struct rte_hash {\n \t/**< Table with buckets storing all the\thash values and key indexes\n \t * to the key table.\n \t */\n+\trte_rwlock_t *readwrite_lock; /**< Read-write lock thread-safety. */\n } __rte_cache_aligned;\n \n struct queue_node {\ndiff --git a/lib/librte_hash/rte_cuckoo_hash_x86.h b/lib/librte_hash/rte_cuckoo_hash_x86.h\ndeleted file mode 100644\nindex 981d7bd..0000000\n--- a/lib/librte_hash/rte_cuckoo_hash_x86.h\n+++ /dev/null\n@@ -1,167 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2016 Intel Corporation\n- */\n-\n-/* rte_cuckoo_hash_x86.h\n- * This file holds all x86 specific Cuckoo Hash functions\n- */\n-\n-/* Only tries to insert at one bucket (@prim_bkt) without trying to push\n- * buckets around\n- */\n-static inline unsigned\n-rte_hash_cuckoo_insert_mw_tm(struct rte_hash_bucket *prim_bkt,\n-\t\thash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)\n-{\n-\tunsigned i, status;\n-\tunsigned try = 0;\n-\n-\twhile (try < RTE_HASH_TSX_MAX_RETRY) {\n-\t\tstatus = rte_xbegin();\n-\t\tif (likely(status == RTE_XBEGIN_STARTED)) {\n-\t\t\t/* Insert new entry if there is room in the primary\n-\t\t\t* bucket.\n-\t\t\t*/\n-\t\t\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n-\t\t\t\t/* Check if slot is available */\n-\t\t\t\tif (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {\n-\t\t\t\t\tprim_bkt->sig_current[i] = sig;\n-\t\t\t\t\tprim_bkt->sig_alt[i] = alt_hash;\n-\t\t\t\t\tprim_bkt->key_idx[i] = new_idx;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\trte_xend();\n-\n-\t\t\tif (i != RTE_HASH_BUCKET_ENTRIES)\n-\t\t\t\treturn 0;\n-\n-\t\t\tbreak; /* break off try loop if transaction commits */\n-\t\t} else {\n-\t\t\t/* If we abort we give up this cuckoo path. */\n-\t\t\ttry++;\n-\t\t\trte_pause();\n-\t\t}\n-\t}\n-\n-\treturn -1;\n-}\n-\n-/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill\n- * the path head with new entry (sig, alt_hash, new_idx)\n- */\n-static inline int\n-rte_hash_cuckoo_move_insert_mw_tm(const struct rte_hash *h,\n-\t\t\tstruct queue_node *leaf, uint32_t leaf_slot,\n-\t\t\thash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)\n-{\n-\tunsigned try = 0;\n-\tunsigned status;\n-\tuint32_t prev_alt_bkt_idx;\n-\n-\tstruct queue_node *prev_node, *curr_node = leaf;\n-\tstruct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;\n-\tuint32_t prev_slot, curr_slot = leaf_slot;\n-\n-\twhile (try < RTE_HASH_TSX_MAX_RETRY) {\n-\t\tstatus = rte_xbegin();\n-\t\tif (likely(status == RTE_XBEGIN_STARTED)) {\n-\t\t\t/* In case empty slot was gone before entering TSX */\n-\t\t\tif (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT)\n-\t\t\t\trte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);\n-\t\t\twhile (likely(curr_node->prev != NULL)) {\n-\t\t\t\tprev_node = curr_node->prev;\n-\t\t\t\tprev_bkt = prev_node->bkt;\n-\t\t\t\tprev_slot = curr_node->prev_slot;\n-\n-\t\t\t\tprev_alt_bkt_idx\n-\t\t\t\t\t= prev_bkt->sig_alt[prev_slot]\n-\t\t\t\t\t    & h->bucket_bitmask;\n-\n-\t\t\t\tif (unlikely(&h->buckets[prev_alt_bkt_idx]\n-\t\t\t\t\t     != curr_bkt)) {\n-\t\t\t\t\trte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);\n-\t\t\t\t}\n-\n-\t\t\t\t/* Need to swap current/alt sig to allow later\n-\t\t\t\t * Cuckoo insert to move elements back to its\n-\t\t\t\t * primary bucket if available\n-\t\t\t\t */\n-\t\t\t\tcurr_bkt->sig_alt[curr_slot] =\n-\t\t\t\t    prev_bkt->sig_current[prev_slot];\n-\t\t\t\tcurr_bkt->sig_current[curr_slot] =\n-\t\t\t\t    prev_bkt->sig_alt[prev_slot];\n-\t\t\t\tcurr_bkt->key_idx[curr_slot]\n-\t\t\t\t    = prev_bkt->key_idx[prev_slot];\n-\n-\t\t\t\tcurr_slot = prev_slot;\n-\t\t\t\tcurr_node = prev_node;\n-\t\t\t\tcurr_bkt = curr_node->bkt;\n-\t\t\t}\n-\n-\t\t\tcurr_bkt->sig_current[curr_slot] = sig;\n-\t\t\tcurr_bkt->sig_alt[curr_slot] = alt_hash;\n-\t\t\tcurr_bkt->key_idx[curr_slot] = new_idx;\n-\n-\t\t\trte_xend();\n-\n-\t\t\treturn 0;\n-\t\t}\n-\n-\t\t/* If we abort we give up this cuckoo path, since most likely it's\n-\t\t * no longer valid as TSX detected data conflict\n-\t\t */\n-\t\ttry++;\n-\t\trte_pause();\n-\t}\n-\n-\treturn -1;\n-}\n-\n-/*\n- * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe\n- * Cuckoo\n- */\n-static inline int\n-rte_hash_cuckoo_make_space_mw_tm(const struct rte_hash *h,\n-\t\t\tstruct rte_hash_bucket *bkt,\n-\t\t\thash_sig_t sig, hash_sig_t alt_hash,\n-\t\t\tuint32_t new_idx)\n-{\n-\tunsigned i;\n-\tstruct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];\n-\tstruct queue_node *tail, *head;\n-\tstruct rte_hash_bucket *curr_bkt, *alt_bkt;\n-\n-\ttail = queue;\n-\thead = queue + 1;\n-\ttail->bkt = bkt;\n-\ttail->prev = NULL;\n-\ttail->prev_slot = -1;\n-\n-\t/* Cuckoo bfs Search */\n-\twhile (likely(tail != head && head <\n-\t\t\t\t\tqueue + RTE_HASH_BFS_QUEUE_MAX_LEN -\n-\t\t\t\t\tRTE_HASH_BUCKET_ENTRIES)) {\n-\t\tcurr_bkt = tail->bkt;\n-\t\tfor (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {\n-\t\t\tif (curr_bkt->key_idx[i] == EMPTY_SLOT) {\n-\t\t\t\tif (likely(rte_hash_cuckoo_move_insert_mw_tm(h,\n-\t\t\t\t\t\ttail, i, sig,\n-\t\t\t\t\t\talt_hash, new_idx) == 0))\n-\t\t\t\t\treturn 0;\n-\t\t\t}\n-\n-\t\t\t/* Enqueue new node and keep prev node info */\n-\t\t\talt_bkt = &(h->buckets[curr_bkt->sig_alt[i]\n-\t\t\t\t\t\t    & h->bucket_bitmask]);\n-\t\t\thead->bkt = alt_bkt;\n-\t\t\thead->prev = tail;\n-\t\t\thead->prev_slot = i;\n-\t\t\thead++;\n-\t\t}\n-\t\ttail++;\n-\t}\n-\n-\treturn -ENOSPC;\n-}\ndiff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h\nindex f71ca9f..ecb49e4 100644\n--- a/lib/librte_hash/rte_hash.h\n+++ b/lib/librte_hash/rte_hash.h\n@@ -34,6 +34,9 @@ extern \"C\" {\n /** Default behavior of insertion, single writer/multi writer */\n #define RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD 0x02\n \n+/** Flag to support reader writer concurrency */\n+#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04\n+\n /** Signature of key that is stored internally. */\n typedef uint32_t hash_sig_t;\n \n",
    "prefixes": [
        "v3",
        "5/8"
    ]
}