[2/4] hash: remove rw-lock calls from lock-free functions

Message ID 20181109163917.16845-3-honnappa.nagarahalli@arm.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series hash: separate lf and rw lock lookup code paths |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Honnappa Nagarahalli Nov. 9, 2018, 4:39 p.m. UTC
  Remove the rw-lock calls from lock-free versions of lookup
functions.
This is an intermediate commit meant to ease the
review process.

Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com

Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
 lib/librte_hash/rte_cuckoo_hash.c | 15 ++++-----------
 1 file changed, 4 insertions(+), 11 deletions(-)
  

Patch

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 874d77f1c..e6b84c6bc 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1274,8 +1274,6 @@  __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
 	prim_bucket_idx = get_prim_bucket_index(h, sig);
 	sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
 
-	__hash_rw_reader_lock(h);
-
 	do {
 		/* Load the table change counter before the lookup
 		 * starts. Acquire semantics will make sure that
@@ -1286,7 +1284,7 @@  __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
 
 		/* Check if key is in primary location */
 		bkt = &h->buckets[prim_bucket_idx];
-		ret = search_one_bucket(h, key, short_sig, data, bkt);
+		ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
 		if (ret != -1) {
 			__hash_rw_reader_unlock(h);
 			return ret;
@@ -1296,7 +1294,7 @@  __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
 
 		/* Check if key is in secondary location */
 		FOR_EACH_BUCKET(cur_bkt, bkt) {
-			ret = search_one_bucket(h, key, short_sig,
+			ret = search_one_bucket_lf(h, key, short_sig,
 						data, cur_bkt);
 			if (ret != -1) {
 				__hash_rw_reader_unlock(h);
@@ -1320,8 +1318,6 @@  __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
 					__ATOMIC_ACQUIRE);
 	} while (cnt_b != cnt_a);
 
-	__hash_rw_reader_unlock(h);
-
 	return -ENOENT;
 }
 
@@ -1911,7 +1907,6 @@  __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
 		rte_prefetch0(secondary_bkt[i]);
 	}
 
-	__hash_rw_reader_lock(h);
 	do {
 		/* Load the table change counter before the lookup
 		 * starts. Acquire semantics will make sure that
@@ -2060,10 +2055,10 @@  __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
 		next_bkt = secondary_bkt[i]->next;
 		FOR_EACH_BUCKET(cur_bkt, next_bkt) {
 			if (data != NULL)
-				ret = search_one_bucket(h, keys[i],
+				ret = search_one_bucket_lf(h, keys[i],
 						sig[i], &data[i], cur_bkt);
 			else
-				ret = search_one_bucket(h, keys[i],
+				ret = search_one_bucket_lf(h, keys[i],
 						sig[i], NULL, cur_bkt);
 			if (ret != -1) {
 				positions[i] = ret;
@@ -2073,8 +2068,6 @@  __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
 		}
 	}
 
-	__hash_rw_reader_unlock(h);
-
 	if (hit_mask != NULL)
 		*hit_mask = hits;
 }