From patchwork Fri Oct 20 16:51:56 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoan Picchi X-Patchwork-Id: 133163 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B9CA6431E0; Mon, 23 Oct 2023 10:55:08 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D849540A8A; Mon, 23 Oct 2023 10:55:04 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 7EA9B4027C for ; Fri, 20 Oct 2023 18:53:02 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E00BF143D; Fri, 20 Oct 2023 09:53:42 -0700 (PDT) Received: from ampere-altra-2-2.usa.Arm.com (unknown [10.118.91.160]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id E133F3F5A1; Fri, 20 Oct 2023 09:53:01 -0700 (PDT) From: Yoan Picchi To: Thomas Monjalon , Yipeng Wang , Sameh Gobriel , Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, Yoan Picchi Subject: [PATCH v2 1/4] hash: pack the hitmask for hash in bulk lookup Date: Fri, 20 Oct 2023 16:51:56 +0000 Message-Id: <20231020165159.1649282-2-yoan.picchi@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231020165159.1649282-1-yoan.picchi@arm.com> References: <20231020165159.1649282-1-yoan.picchi@arm.com> MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 23 Oct 2023 10:55:02 +0200 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Current hitmask includes padding due to Intel's SIMD implementation detail. This patch allows non Intel SIMD implementations to benefit from a dense hitmask. Signed-off-by: Yoan Picchi --- .mailmap | 2 + lib/hash/rte_cuckoo_hash.c | 118 ++++++++++++++++++++++++++----------- 2 files changed, 86 insertions(+), 34 deletions(-) diff --git a/.mailmap b/.mailmap index 3f5bab26a8..b9c49aa7f6 100644 --- a/.mailmap +++ b/.mailmap @@ -485,6 +485,7 @@ Hari Kumar Vemula Harini Ramakrishnan Hariprasad Govindharajan Harish Patil +Harjot Singh Harman Kalra Harneet Singh Harold Huang @@ -1602,6 +1603,7 @@ Yixue Wang Yi Yang Yi Zhang Yoann Desmouceaux +Yoan Picchi Yogesh Jangra Yogev Chaimovich Yongjie Gu diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c index 19b23f2a97..2aa96eb862 100644 --- a/lib/hash/rte_cuckoo_hash.c +++ b/lib/hash/rte_cuckoo_hash.c @@ -1850,8 +1850,50 @@ rte_hash_free_key_with_position(const struct rte_hash *h, } +#if defined(__ARM_NEON) + +static inline void +compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, + const struct rte_hash_bucket *prim_bkt, + const struct rte_hash_bucket *sec_bkt, + uint16_t sig, + enum rte_hash_sig_compare_function sig_cmp_fn) +{ + unsigned int i; + + /* For match mask every bits indicates the match */ + switch (sig_cmp_fn) { + case RTE_HASH_COMPARE_NEON: { + uint16x8_t vmat, vsig, x; + int16x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7}; + + vsig = vld1q_dup_u16((uint16_t const *)&sig); + /* Compare all signatures in the primary bucket */ + vmat = vceqq_u16(vsig, + vld1q_u16((uint16_t const *)prim_bkt->sig_current)); + x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift); + *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); + /* Compare all signatures in the secondary bucket */ + vmat = vceqq_u16(vsig, + vld1q_u16((uint16_t const *)sec_bkt->sig_current)); + x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift); + *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); + } + break; + default: + for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { + *prim_hash_matches |= + ((sig == prim_bkt->sig_current[i]) << i); + *sec_hash_matches |= + ((sig == sec_bkt->sig_current[i]) << i); + } + } +} + +#else + static inline void -compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, +compare_signatures_sparse(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, const struct rte_hash_bucket *prim_bkt, const struct rte_hash_bucket *sec_bkt, uint16_t sig, @@ -1878,25 +1920,7 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, /* Extract the even-index bits only */ *sec_hash_matches &= 0x5555; break; -#elif defined(__ARM_NEON) - case RTE_HASH_COMPARE_NEON: { - uint16x8_t vmat, vsig, x; - int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1}; - - vsig = vld1q_dup_u16((uint16_t const *)&sig); - /* Compare all signatures in the primary bucket */ - vmat = vceqq_u16(vsig, - vld1q_u16((uint16_t const *)prim_bkt->sig_current)); - x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); - *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); - /* Compare all signatures in the secondary bucket */ - vmat = vceqq_u16(vsig, - vld1q_u16((uint16_t const *)sec_bkt->sig_current)); - x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); - *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); - } - break; -#endif +#endif /* defined(__SSE2__) */ default: for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { *prim_hash_matches |= @@ -1907,6 +1931,8 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, } } +#endif /* defined(__ARM_NEON) */ + static inline void __bulk_lookup_l(const struct rte_hash *h, const void **keys, const struct rte_hash_bucket **primary_bkt, @@ -1921,18 +1947,30 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; +#if defined(__ARM_NEON) + const int hitmask_padding = 0; +#else + const int hitmask_padding = 1; +#endif + __hash_rw_reader_lock(h); /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { - compare_signatures(&prim_hitmask[i], &sec_hitmask[i], +#if defined(__ARM_NEON) + compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i], + primary_bkt[i], secondary_bkt[i], + sig[i], h->sig_cmp_fn); +#else + compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i], primary_bkt[i], secondary_bkt[i], sig[i], h->sig_cmp_fn); +#endif if (prim_hitmask[i]) { uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = @@ -1946,7 +1984,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, if (sec_hitmask[i]) { uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = @@ -1963,7 +2001,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, while (prim_hitmask[i]) { uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[hit_index]; const struct rte_hash_key *key_slot = @@ -1985,13 +2023,13 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - prim_hitmask[i] &= ~(3ULL << (hit_index << 1)); + prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); } while (sec_hitmask[i]) { uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index]; const struct rte_hash_key *key_slot = @@ -2014,7 +2052,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - sec_hitmask[i] &= ~(3ULL << (hit_index << 1)); + sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); } next_key: continue; @@ -2069,6 +2107,12 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, struct rte_hash_bucket *cur_bkt, *next_bkt; uint32_t cnt_b, cnt_a; +#if defined(__ARM_NEON) + const int hitmask_padding = 0; +#else + const int hitmask_padding = 1; +#endif + for (i = 0; i < num_keys; i++) positions[i] = -ENOENT; @@ -2082,14 +2126,20 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { - compare_signatures(&prim_hitmask[i], &sec_hitmask[i], +#if defined(__ARM_NEON) + compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i], primary_bkt[i], secondary_bkt[i], sig[i], h->sig_cmp_fn); +#else + compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i], + primary_bkt[i], secondary_bkt[i], + sig[i], h->sig_cmp_fn); +#endif if (prim_hitmask[i]) { uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = @@ -2103,7 +2153,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, if (sec_hitmask[i]) { uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = @@ -2119,7 +2169,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, while (prim_hitmask[i]) { uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = __atomic_load_n( &primary_bkt[i]->key_idx[hit_index], @@ -2145,13 +2195,13 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - prim_hitmask[i] &= ~(3ULL << (hit_index << 1)); + prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); } while (sec_hitmask[i]) { uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]) - >> 1; + >> hitmask_padding; uint32_t key_idx = __atomic_load_n( &secondary_bkt[i]->key_idx[hit_index], @@ -2178,7 +2228,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - sec_hitmask[i] &= ~(3ULL << (hit_index << 1)); + sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); } next_key: continue; From patchwork Fri Oct 20 16:51:57 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoan Picchi X-Patchwork-Id: 133164 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 992B3431E0; Mon, 23 Oct 2023 10:55:15 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1C70340DF6; Mon, 23 Oct 2023 10:55:06 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id B4158402A2 for ; Fri, 20 Oct 2023 18:53:02 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 1BB29153B; Fri, 20 Oct 2023 09:53:43 -0700 (PDT) Received: from ampere-altra-2-2.usa.Arm.com (unknown [10.118.91.160]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 246233F5A1; Fri, 20 Oct 2023 09:53:02 -0700 (PDT) From: Yoan Picchi To: Yipeng Wang , Sameh Gobriel , Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, Yoan Picchi Subject: [PATCH v2 2/4] hash: optimize compare signature for NEON Date: Fri, 20 Oct 2023 16:51:57 +0000 Message-Id: <20231020165159.1649282-3-yoan.picchi@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231020165159.1649282-1-yoan.picchi@arm.com> References: <20231020165159.1649282-1-yoan.picchi@arm.com> MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 23 Oct 2023 10:55:02 +0200 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Upon a successful comparison, NEON sets all the bits in the lane to 1 We can skip shifting by simply masking with specific masks. Signed-off-by: Yoan Picchi --- lib/hash/rte_cuckoo_hash.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c index 2aa96eb862..a4b907c45c 100644 --- a/lib/hash/rte_cuckoo_hash.c +++ b/lib/hash/rte_cuckoo_hash.c @@ -1864,19 +1864,17 @@ compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches /* For match mask every bits indicates the match */ switch (sig_cmp_fn) { case RTE_HASH_COMPARE_NEON: { - uint16x8_t vmat, vsig, x; - int16x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7}; + uint16x8_t vmat, x; + const uint16x8_t mask = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}; + const uint16x8_t vsig = vld1q_dup_u16((uint16_t const *)&sig); - vsig = vld1q_dup_u16((uint16_t const *)&sig); /* Compare all signatures in the primary bucket */ - vmat = vceqq_u16(vsig, - vld1q_u16((uint16_t const *)prim_bkt->sig_current)); - x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift); + vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bkt->sig_current)); + x = vandq_u16(vmat, mask); *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); /* Compare all signatures in the secondary bucket */ - vmat = vceqq_u16(vsig, - vld1q_u16((uint16_t const *)sec_bkt->sig_current)); - x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift); + vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bkt->sig_current)); + x = vandq_u16(vmat, mask); *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); } break; From patchwork Fri Oct 20 16:51:58 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoan Picchi X-Patchwork-Id: 133165 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 84CC8431E0; Mon, 23 Oct 2023 10:55:22 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 68FB440E36; Mon, 23 Oct 2023 10:55:07 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id D1792402CE for ; Fri, 20 Oct 2023 18:53:02 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 3BA9A1576; Fri, 20 Oct 2023 09:53:43 -0700 (PDT) Received: from ampere-altra-2-2.usa.Arm.com (unknown [10.118.91.160]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 435F23F5A1; Fri, 20 Oct 2023 09:53:02 -0700 (PDT) From: Yoan Picchi To: Yipeng Wang , Sameh Gobriel , Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, Yoan Picchi , Harjot Singh Subject: [PATCH v2 3/4] test/hash: check bulk lookup of keys after collision Date: Fri, 20 Oct 2023 16:51:58 +0000 Message-Id: <20231020165159.1649282-4-yoan.picchi@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231020165159.1649282-1-yoan.picchi@arm.com> References: <20231020165159.1649282-1-yoan.picchi@arm.com> MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 23 Oct 2023 10:55:02 +0200 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch adds unit test for rte_hash_lookup_bulk(). It also update the test_full_bucket test to the current number of entries in a hash bucket. Signed-off-by: Yoan Picchi Signed-off-by: Harjot Singh --- app/test/test_hash.c | 99 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 76 insertions(+), 23 deletions(-) diff --git a/app/test/test_hash.c b/app/test/test_hash.c index d586878a22..b6e22c5ecc 100644 --- a/app/test/test_hash.c +++ b/app/test/test_hash.c @@ -95,7 +95,7 @@ static uint32_t pseudo_hash(__rte_unused const void *keys, __rte_unused uint32_t key_len, __rte_unused uint32_t init_val) { - return 3; + return 3 | 3 << 16; } RTE_LOG_REGISTER(hash_logtype_test, test.hash, INFO); @@ -115,8 +115,10 @@ static void print_key_info(const char *msg, const struct flow_key *key, rte_log(RTE_LOG_DEBUG, hash_logtype_test, " @ pos %d\n", pos); } +#define KEY_PER_BUCKET 8 + /* Keys used by unit test functions */ -static struct flow_key keys[5] = { { +static struct flow_key keys[KEY_PER_BUCKET+1] = { { .ip_src = RTE_IPV4(0x03, 0x02, 0x01, 0x00), .ip_dst = RTE_IPV4(0x07, 0x06, 0x05, 0x04), .port_src = 0x0908, @@ -146,6 +148,30 @@ static struct flow_key keys[5] = { { .port_src = 0x4948, .port_dst = 0x4b4a, .proto = 0x4c, +}, { + .ip_src = RTE_IPV4(0x53, 0x52, 0x51, 0x50), + .ip_dst = RTE_IPV4(0x57, 0x56, 0x55, 0x54), + .port_src = 0x5958, + .port_dst = 0x5b5a, + .proto = 0x5c, +}, { + .ip_src = RTE_IPV4(0x63, 0x62, 0x61, 0x60), + .ip_dst = RTE_IPV4(0x67, 0x66, 0x65, 0x64), + .port_src = 0x6968, + .port_dst = 0x6b6a, + .proto = 0x6c, +}, { + .ip_src = RTE_IPV4(0x73, 0x72, 0x71, 0x70), + .ip_dst = RTE_IPV4(0x77, 0x76, 0x75, 0x74), + .port_src = 0x7978, + .port_dst = 0x7b7a, + .proto = 0x7c, +}, { + .ip_src = RTE_IPV4(0x83, 0x82, 0x81, 0x80), + .ip_dst = RTE_IPV4(0x87, 0x86, 0x85, 0x84), + .port_src = 0x8988, + .port_dst = 0x8b8a, + .proto = 0x8c, } }; /* Parameters used for hash table in unit test functions. Name set later. */ @@ -783,13 +809,15 @@ static int test_five_keys(void) /* * Add keys to the same bucket until bucket full. - * - add 5 keys to the same bucket (hash created with 4 keys per bucket): - * first 4 successful, 5th successful, pushing existing item in bucket - * - lookup the 5 keys: 5 hits - * - add the 5 keys again: 5 OK - * - lookup the 5 keys: 5 hits (updated data) - * - delete the 5 keys: 5 OK - * - lookup the 5 keys: 5 misses + * - add 9 keys to the same bucket (hash created with 8 keys per bucket): + * first 8 successful, 9th successful, pushing existing item in bucket + * - lookup the 9 keys: 9 hits + * - bulk lookup for all the 9 keys: 9 hits + * - add the 9 keys again: 9 OK + * - lookup the 9 keys: 9 hits (updated data) + * - delete the 9 keys: 9 OK + * - lookup the 9 keys: 9 misses + * - bulk lookup for all the 9 keys: 9 misses */ static int test_full_bucket(void) { @@ -801,16 +829,17 @@ static int test_full_bucket(void) .hash_func_init_val = 0, .socket_id = 0, }; + const void *key_array[KEY_PER_BUCKET+1] = {0}; struct rte_hash *handle; - int pos[5]; - int expected_pos[5]; + int pos[KEY_PER_BUCKET+1]; + int expected_pos[KEY_PER_BUCKET+1]; unsigned i; - + int ret; handle = rte_hash_create(¶ms_pseudo_hash); RETURN_IF_ERROR(handle == NULL, "hash creation failed"); /* Fill bucket */ - for (i = 0; i < 4; i++) { + for (i = 0; i < KEY_PER_BUCKET; i++) { pos[i] = rte_hash_add_key(handle, &keys[i]); print_key_info("Add", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] < 0, @@ -821,22 +850,36 @@ static int test_full_bucket(void) * This should work and will push one of the items * in the bucket because it is full */ - pos[4] = rte_hash_add_key(handle, &keys[4]); - print_key_info("Add", &keys[4], pos[4]); - RETURN_IF_ERROR(pos[4] < 0, - "failed to add key (pos[4]=%d)", pos[4]); - expected_pos[4] = pos[4]; + pos[KEY_PER_BUCKET] = rte_hash_add_key(handle, &keys[KEY_PER_BUCKET]); + print_key_info("Add", &keys[KEY_PER_BUCKET], pos[KEY_PER_BUCKET]); + RETURN_IF_ERROR(pos[KEY_PER_BUCKET] < 0, + "failed to add key (pos[%d]=%d)", KEY_PER_BUCKET, pos[KEY_PER_BUCKET]); + expected_pos[KEY_PER_BUCKET] = pos[KEY_PER_BUCKET]; /* Lookup */ - for (i = 0; i < 5; i++) { + for (i = 0; i < KEY_PER_BUCKET+1; i++) { pos[i] = rte_hash_lookup(handle, &keys[i]); print_key_info("Lkp", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] != expected_pos[i], "failed to find key (pos[%u]=%d)", i, pos[i]); } + for (i = 0; i < KEY_PER_BUCKET+1; i++) + key_array[i] = &keys[i]; + + /*Bulk lookup after add with same hash*/ + ret = rte_hash_lookup_bulk(handle, &key_array[0], KEY_PER_BUCKET+1, (int32_t *)pos); + if (ret == 0) + for (i = 0; i < KEY_PER_BUCKET+1; i++) { + print_key_info("Blk_Lkp", key_array[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + + + /* Add - update */ - for (i = 0; i < 5; i++) { + for (i = 0; i < KEY_PER_BUCKET+1; i++) { pos[i] = rte_hash_add_key(handle, &keys[i]); print_key_info("Add", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] != expected_pos[i], @@ -844,7 +887,7 @@ static int test_full_bucket(void) } /* Lookup */ - for (i = 0; i < 5; i++) { + for (i = 0; i < KEY_PER_BUCKET+1; i++) { pos[i] = rte_hash_lookup(handle, &keys[i]); print_key_info("Lkp", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] != expected_pos[i], @@ -869,7 +912,7 @@ static int test_full_bucket(void) RETURN_IF_ERROR(pos[1] < 0, "failed to add key (pos[1]=%d)", pos[1]); /* Delete */ - for (i = 0; i < 5; i++) { + for (i = 0; i < KEY_PER_BUCKET+1; i++) { pos[i] = rte_hash_del_key(handle, &keys[i]); print_key_info("Del", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] != expected_pos[i], @@ -877,13 +920,23 @@ static int test_full_bucket(void) } /* Lookup */ - for (i = 0; i < 5; i++) { + for (i = 0; i < KEY_PER_BUCKET+1; i++) { pos[i] = rte_hash_lookup(handle, &keys[i]); print_key_info("Lkp", &keys[i], pos[i]); RETURN_IF_ERROR(pos[i] != -ENOENT, "fail: found non-existent key (pos[%u]=%d)", i, pos[i]); } + /* Bulk Lookup on empty table*/ + ret = rte_hash_lookup_bulk(handle, &key_array[0], KEY_PER_BUCKET+1, (int32_t *)pos); + if (ret == 0) + for (i = 0; i < KEY_PER_BUCKET+1; i++) { + print_key_info("Blk_Lkp", key_array[i], pos[i]); + RETURN_IF_ERROR(pos[i] != -ENOENT, + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + + rte_hash_free(handle); /* Cover the NULL case. */ From patchwork Fri Oct 20 16:51:59 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoan Picchi X-Patchwork-Id: 133166 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76FF5431E0; Mon, 23 Oct 2023 10:55:29 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9506D40E96; Mon, 23 Oct 2023 10:55:08 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id E4BAF402E6 for ; Fri, 20 Oct 2023 18:53:02 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5EBBC1595; Fri, 20 Oct 2023 09:53:43 -0700 (PDT) Received: from ampere-altra-2-2.usa.Arm.com (unknown [10.118.91.160]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 6523E3F5A1; Fri, 20 Oct 2023 09:53:02 -0700 (PDT) From: Yoan Picchi To: Yipeng Wang , Sameh Gobriel , Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, Yoan Picchi , Harjot Singh Subject: [PATCH v2 4/4] hash: add SVE support for bulk key lookup Date: Fri, 20 Oct 2023 16:51:59 +0000 Message-Id: <20231020165159.1649282-5-yoan.picchi@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231020165159.1649282-1-yoan.picchi@arm.com> References: <20231020165159.1649282-1-yoan.picchi@arm.com> MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 23 Oct 2023 10:55:02 +0200 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org - Implemented SVE code for comparing signatures in bulk lookup. - Added Defines in code for SVE code support. - Optimise NEON code - New SVE code is ~3% slower than optimized NEON for N2 processor. Signed-off-by: Yoan Picchi Signed-off-by: Harjot Singh --- lib/hash/rte_cuckoo_hash.c | 196 ++++++++++++++++++++++++++++--------- lib/hash/rte_cuckoo_hash.h | 1 + 2 files changed, 151 insertions(+), 46 deletions(-) diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c index a4b907c45c..cda39d1441 100644 --- a/lib/hash/rte_cuckoo_hash.c +++ b/lib/hash/rte_cuckoo_hash.c @@ -435,8 +435,11 @@ rte_hash_create(const struct rte_hash_parameters *params) h->sig_cmp_fn = RTE_HASH_COMPARE_SSE; else #elif defined(RTE_ARCH_ARM64) - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { h->sig_cmp_fn = RTE_HASH_COMPARE_NEON; + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE)) + h->sig_cmp_fn = RTE_HASH_COMPARE_SVE; + } else #endif h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR; @@ -1853,37 +1856,103 @@ rte_hash_free_key_with_position(const struct rte_hash *h, #if defined(__ARM_NEON) static inline void -compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, - const struct rte_hash_bucket *prim_bkt, - const struct rte_hash_bucket *sec_bkt, +compare_signatures_dense(uint16_t *hitmask_buffer, + const uint16_t *prim_bucket_sigs, + const uint16_t *sec_bucket_sigs, uint16_t sig, enum rte_hash_sig_compare_function sig_cmp_fn) { unsigned int i; + static_assert(sizeof(*hitmask_buffer) >= 2*(RTE_HASH_BUCKET_ENTRIES/8), + "The hitmask must be exactly wide enough to accept the whole hitmask if it is dense"); + /* For match mask every bits indicates the match */ switch (sig_cmp_fn) { +#if defined(__ARM_NEON) && RTE_HASH_BUCKET_ENTRIES <= 8 case RTE_HASH_COMPARE_NEON: { - uint16x8_t vmat, x; + uint16x8_t vmat, hit1, hit2; const uint16x8_t mask = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}; const uint16x8_t vsig = vld1q_dup_u16((uint16_t const *)&sig); /* Compare all signatures in the primary bucket */ - vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bkt->sig_current)); - x = vandq_u16(vmat, mask); - *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); + vmat = vceqq_u16(vsig, vld1q_u16(prim_bucket_sigs)); + hit1 = vandq_u16(vmat, mask); + /* Compare all signatures in the secondary bucket */ - vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bkt->sig_current)); - x = vandq_u16(vmat, mask); - *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); + vmat = vceqq_u16(vsig, vld1q_u16(sec_bucket_sigs)); + hit2 = vandq_u16(vmat, mask); + + hit2 = vshlq_n_u16(hit2, RTE_HASH_BUCKET_ENTRIES); + hit2 = vorrq_u16(hit1, hit2); + *hitmask_buffer = vaddvq_u16(hit2); + } + break; +#endif +#if defined(RTE_HAS_SVE_ACLE) + case RTE_HASH_COMPARE_SVE: { + svuint16_t vsign, shift, sv_matches; + svbool_t pred, match, bucket_wide_pred; + int i = 0; + uint64_t vl = svcnth(); + + vsign = svdup_u16(sig); + shift = svindex_u16(0, 1); + + if (vl >= 2 * RTE_HASH_BUCKET_ENTRIES && RTE_HASH_BUCKET_ENTRIES <= 8) { + svuint16_t primary_array_vect, secondary_array_vect; + bucket_wide_pred = svwhilelt_b16(0, RTE_HASH_BUCKET_ENTRIES); + primary_array_vect = svld1_u16(bucket_wide_pred, prim_bucket_sigs); + secondary_array_vect = svld1_u16(bucket_wide_pred, sec_bucket_sigs); + + /* We merged the two vectors so we can do both comparison at once */ + primary_array_vect = svsplice_u16(bucket_wide_pred, + primary_array_vect, + secondary_array_vect); + pred = svwhilelt_b16(0, 2*RTE_HASH_BUCKET_ENTRIES); + + /* Compare all signatures in the buckets */ + match = svcmpeq_u16(pred, vsign, primary_array_vect); + if (svptest_any(svptrue_b16(), match)) { + sv_matches = svdup_u16(1); + sv_matches = svlsl_u16_z(match, sv_matches, shift); + *hitmask_buffer = svorv_u16(svptrue_b16(), sv_matches); + } + } else { + do { + pred = svwhilelt_b16(i, RTE_HASH_BUCKET_ENTRIES); + int lower_half = 0; + int upper_half = 0; + /* Compare all signatures in the primary bucket */ + match = svcmpeq_u16(pred, vsign, svld1_u16(pred, + &prim_bucket_sigs[i])); + if (svptest_any(svptrue_b16(), match)) { + sv_matches = svdup_u16(1); + sv_matches = svlsl_u16_z(match, sv_matches, shift); + lower_half = svorv_u16(svptrue_b16(), sv_matches); + } + /* Compare all signatures in the secondary bucket */ + match = svcmpeq_u16(pred, vsign, svld1_u16(pred, + &sec_bucket_sigs[i])); + if (svptest_any(svptrue_b16(), match)) { + sv_matches = svdup_u16(1); + sv_matches = svlsl_u16_z(match, sv_matches, shift); + upper_half = svorv_u16(svptrue_b16(), sv_matches) + << RTE_HASH_BUCKET_ENTRIES; + } + *(hitmask_buffer+(i/8)) = lower_half | upper_half; + i += vl; + } while (i < RTE_HASH_BUCKET_ENTRIES); + } } break; +#endif default: for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - *prim_hash_matches |= - ((sig == prim_bkt->sig_current[i]) << i); - *sec_hash_matches |= - ((sig == sec_bkt->sig_current[i]) << i); + *hitmask_buffer |= + ((sig == prim_bucket_sigs[i]) << i); + *hitmask_buffer |= + ((sig == sec_bucket_sigs[i]) << i) << RTE_HASH_BUCKET_ENTRIES; } } } @@ -1901,7 +1970,7 @@ compare_signatures_sparse(uint32_t *prim_hash_matches, uint32_t *sec_hash_matche /* For match mask the first bit of every two bits indicates the match */ switch (sig_cmp_fn) { -#if defined(__SSE2__) +#if defined(__SSE2__) && RTE_HASH_BUCKET_ENTRIES <= 8 case RTE_HASH_COMPARE_SSE: /* Compare all signatures in the bucket */ *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16( @@ -1941,14 +2010,18 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, uint64_t hits = 0; int32_t i; int32_t ret; - uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; - uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; #if defined(__ARM_NEON) const int hitmask_padding = 0; + uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + + static_assert(sizeof(*hitmask_buffer)*8/2 == RTE_HASH_BUCKET_ENTRIES, + "The hitmask must be exactly wide enough to accept the whole hitmask when it is dense"); #else const int hitmask_padding = 1; + uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; #endif __hash_rw_reader_lock(h); @@ -1956,18 +2029,24 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { #if defined(__ARM_NEON) - compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i], - primary_bkt[i], secondary_bkt[i], + uint16_t *hitmask = &hitmask_buffer[i]; + compare_signatures_dense(hitmask, + primary_bkt[i]->sig_current, + secondary_bkt[i]->sig_current, sig[i], h->sig_cmp_fn); + const unsigned int prim_hitmask = *(uint8_t *)(hitmask); + const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1); #else - compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i], + compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i], primary_bkt[i], secondary_bkt[i], sig[i], h->sig_cmp_fn); + const unsigned int prim_hitmask = prim_hitmask_buffer[i]; + const unsigned int sec_hitmask = sec_hitmask_buffer[i]; #endif - if (prim_hitmask[i]) { + if (prim_hitmask) { uint32_t first_hit = - __builtin_ctzl(prim_hitmask[i]) + __builtin_ctzl(prim_hitmask) >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[first_hit]; @@ -1979,9 +2058,9 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, continue; } - if (sec_hitmask[i]) { + if (sec_hitmask) { uint32_t first_hit = - __builtin_ctzl(sec_hitmask[i]) + __builtin_ctzl(sec_hitmask) >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit]; @@ -1996,9 +2075,17 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, /* Compare keys, first hits in primary first */ for (i = 0; i < num_keys; i++) { positions[i] = -ENOENT; - while (prim_hitmask[i]) { +#if defined(__ARM_NEON) + uint16_t *hitmask = &hitmask_buffer[i]; + unsigned int prim_hitmask = *(uint8_t *)(hitmask); + unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1); +#else + unsigned int prim_hitmask = prim_hitmask_buffer[i]; + unsigned int sec_hitmask = sec_hitmask_buffer[i]; +#endif + while (prim_hitmask) { uint32_t hit_index = - __builtin_ctzl(prim_hitmask[i]) + __builtin_ctzl(prim_hitmask) >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[hit_index]; @@ -2021,12 +2108,12 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); + prim_hitmask &= ~(1 << (hit_index << hitmask_padding)); } - while (sec_hitmask[i]) { + while (sec_hitmask) { uint32_t hit_index = - __builtin_ctzl(sec_hitmask[i]) + __builtin_ctzl(sec_hitmask) >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index]; @@ -2050,7 +2137,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); + sec_hitmask &= ~(1 << (hit_index << hitmask_padding)); } next_key: continue; @@ -2100,15 +2187,18 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, uint64_t hits = 0; int32_t i; int32_t ret; - uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; - uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; uint32_t cnt_b, cnt_a; #if defined(__ARM_NEON) const int hitmask_padding = 0; + uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + static_assert(sizeof(*hitmask_buffer)*8/2 == RTE_HASH_BUCKET_ENTRIES, + "The hitmask must be exactly wide enough to accept the whole hitmask chen it is dense"); #else const int hitmask_padding = 1; + uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0}; #endif for (i = 0; i < num_keys; i++) @@ -2125,18 +2215,24 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { #if defined(__ARM_NEON) - compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i], - primary_bkt[i], secondary_bkt[i], + uint16_t *hitmask = &hitmask_buffer[i]; + compare_signatures_dense(hitmask, + primary_bkt[i]->sig_current, + secondary_bkt[i]->sig_current, sig[i], h->sig_cmp_fn); + const unsigned int prim_hitmask = *(uint8_t *)(hitmask); + const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1); #else - compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i], + compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i], primary_bkt[i], secondary_bkt[i], sig[i], h->sig_cmp_fn); + const unsigned int prim_hitmask = prim_hitmask_buffer[i]; + const unsigned int sec_hitmask = sec_hitmask_buffer[i]; #endif - if (prim_hitmask[i]) { + if (prim_hitmask) { uint32_t first_hit = - __builtin_ctzl(prim_hitmask[i]) + __builtin_ctzl(prim_hitmask) >> hitmask_padding; uint32_t key_idx = primary_bkt[i]->key_idx[first_hit]; @@ -2148,9 +2244,9 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, continue; } - if (sec_hitmask[i]) { + if (sec_hitmask) { uint32_t first_hit = - __builtin_ctzl(sec_hitmask[i]) + __builtin_ctzl(sec_hitmask) >> hitmask_padding; uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit]; @@ -2164,9 +2260,17 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, /* Compare keys, first hits in primary first */ for (i = 0; i < num_keys; i++) { - while (prim_hitmask[i]) { +#if defined(__ARM_NEON) + uint16_t *hitmask = &hitmask_buffer[i]; + unsigned int prim_hitmask = *(uint8_t *)(hitmask); + unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1); +#else + unsigned int prim_hitmask = prim_hitmask_buffer[i]; + unsigned int sec_hitmask = sec_hitmask_buffer[i]; +#endif + while (prim_hitmask) { uint32_t hit_index = - __builtin_ctzl(prim_hitmask[i]) + __builtin_ctzl(prim_hitmask) >> hitmask_padding; uint32_t key_idx = __atomic_load_n( @@ -2193,12 +2297,12 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); + prim_hitmask &= ~(1 << (hit_index << hitmask_padding)); } - while (sec_hitmask[i]) { + while (sec_hitmask) { uint32_t hit_index = - __builtin_ctzl(sec_hitmask[i]) + __builtin_ctzl(sec_hitmask) >> hitmask_padding; uint32_t key_idx = __atomic_load_n( @@ -2226,7 +2330,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys, positions[i] = key_idx - 1; goto next_key; } - sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding)); + sec_hitmask &= ~(1 << (hit_index << hitmask_padding)); } next_key: continue; diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h index eb2644f74b..356ec2a69e 100644 --- a/lib/hash/rte_cuckoo_hash.h +++ b/lib/hash/rte_cuckoo_hash.h @@ -148,6 +148,7 @@ enum rte_hash_sig_compare_function { RTE_HASH_COMPARE_SCALAR = 0, RTE_HASH_COMPARE_SSE, RTE_HASH_COMPARE_NEON, + RTE_HASH_COMPARE_SVE, RTE_HASH_COMPARE_NUM };