From patchwork Tue May 4 14:25:04 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 92743 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D3AD0A0562; Tue, 4 May 2021 16:25:15 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 66D7040147; Tue, 4 May 2021 16:25:15 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 6D64A40141 for ; Tue, 4 May 2021 16:25:13 +0200 (CEST) IronPort-SDR: qBtcc8x9LKfFhp5oeI1vFZC6UL8X0uj1wtQuv2akxvIyhN5USerwmmJn2t6v8tr9YJcj2PKm0i oJwwvT17m2hQ== X-IronPort-AV: E=McAfee;i="6200,9189,9974"; a="259274453" X-IronPort-AV: E=Sophos;i="5.82,272,1613462400"; d="scan'208";a="259274453" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 04 May 2021 07:25:11 -0700 IronPort-SDR: pn2h1M1RRtIXpxQkNpDFiMqWxsOhfrOwx4U7jEcB2J6K6fsU7J66GfnMYHoJtYJyVnQObbjO2g jAWq0Id2l6Yw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,272,1613462400"; d="scan'208";a="468528478" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga001.jf.intel.com with ESMTP; 04 May 2021 07:25:07 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: konstantin.ananyev@intel.com, andrey.chilikin@intel.com, ray.kinsella@intel.com, yipeng1.wang@intel.com, sameh.gobriel@intel.com, bruce.richardson@intel.com, david.marchand@redhat.com, kda@semihalf.com, vladimir.medvedkin@intel.com Date: Tue, 4 May 2021 15:25:04 +0100 Message-Id: <1620138304-203463-1-git-send-email-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1620137248-203174-1-git-send-email-vladimir.medvedkin@intel.com> References: <1620137248-203174-1-git-send-email-vladimir.medvedkin@intel.com> Subject: [dpdk-dev] [PATCH v2] hash: fix tuple adjustment X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" rte_thash_adjust_tuple() uses random to generate a new subtuple if fn() callback reports about collision. In some cases random changes the subtuple in a way that after complementary bits are applied the original tuple is obtained. This patch replaces random with subtuple increment. Fixes: 28ebff11c2dc ("hash: add predictable RSS") Cc: vladimir.medvedkin@intel.com Signed-off-by: Vladimir Medvedkin Acked-by: Yipeng Wang Tested-by: Stanislaw Kardach Reviewed-by: Stanislaw Kardach --- lib/hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 100 insertions(+), 21 deletions(-) diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c index 135a26d..58129df 100644 --- a/lib/hash/rte_thash.c +++ b/lib/hash/rte_thash.c @@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx) return ctx->hash_key; } +static inline uint8_t +read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) +{ + uint8_t ret = 0; + + ret = ptr[offset / CHAR_BIT]; + if (offset % CHAR_BIT) { + ret <<= (offset % CHAR_BIT); + ret |= ptr[(offset / CHAR_BIT) + 1] >> + (CHAR_BIT - (offset % CHAR_BIT)); + } + + return ret >> (CHAR_BIT - len); +} + +static inline uint32_t +read_unaligned_bits(uint8_t *ptr, int len, int offset) +{ + uint32_t ret = 0; + + len = RTE_MAX(len, 0); + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); + + while (len > 0) { + ret <<= CHAR_BIT; + + ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT), + offset); + offset += CHAR_BIT; + len -= CHAR_BIT; + } + + return ret; +} + +/* returns mask for len bits with given offset inside byte */ +static inline uint8_t +get_bits_mask(unsigned int len, unsigned int offset) +{ + unsigned int last_bit; + + offset %= CHAR_BIT; + /* last bit within byte */ + last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len); + + return ((1 << (CHAR_BIT - offset)) - 1) ^ + ((1 << (CHAR_BIT - last_bit)) - 1); +} + +static inline void +write_unaligned_byte(uint8_t *ptr, unsigned int len, + unsigned int offset, uint8_t val) +{ + uint8_t tmp; + + tmp = ptr[offset / CHAR_BIT]; + tmp &= ~get_bits_mask(len, offset); + tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT)); + ptr[offset / CHAR_BIT] = tmp; + if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) { + int rest_len = (offset + len) % CHAR_BIT; + tmp = ptr[(offset + len) / CHAR_BIT]; + tmp &= ~get_bits_mask(rest_len, 0); + tmp |= val << (CHAR_BIT - rest_len); + ptr[(offset + len) / CHAR_BIT] = tmp; + } +} + static inline void -xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos) +write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val) { - uint32_t byte_idx = pos >> 3; - uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1)); uint8_t tmp; + unsigned int part_len; + + len = RTE_MAX(len, 0); + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); - tmp = ptr[byte_idx]; - tmp ^= bit << bit_idx; - ptr[byte_idx] = tmp; + while (len > 0) { + part_len = RTE_MIN(CHAR_BIT, len); + tmp = (uint8_t)val & ((1 << part_len) - 1); + write_unaligned_byte(ptr, part_len, + offset + len - part_len, tmp); + len -= CHAR_BIT; + val >>= CHAR_BIT; + } } int @@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)]; unsigned int i, j, ret = 0; uint32_t hash, adj_bits; - uint8_t bit; const uint8_t *hash_key; + uint32_t tmp; + int offset; + int tmp_len; if ((ctx == NULL) || (h == NULL) || (tuple == NULL) || (tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0)) @@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, hash_key = rte_thash_get_key(ctx); + attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log)); + for (i = 0; i < attempts; i++) { for (j = 0; j < (tuple_len / 4); j++) tmp_tuple[j] = @@ -651,14 +730,12 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, /* * Hint: LSB of adj_bits corresponds to - * offset + len bit of tuple + * offset + len bit of the subtuple */ - for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) { - bit = (adj_bits >> j) & 0x1; - if (bit) - xor_bit(tuple, bit, h->tuple_offset + - h->tuple_len - 1 - j); - } + offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log; + tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset); + tmp ^= adj_bits; + write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp); if (fn != NULL) { ret = (fn(userdata, tuple)) ? 0 : -EEXIST; @@ -666,13 +743,15 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx, return 0; else if (i < (attempts - 1)) { /* Update tuple with random bits */ - for (j = 0; j < h->tuple_len; j++) { - bit = rte_rand() & 0x1; - if (bit) - xor_bit(tuple, bit, - h->tuple_offset + - h->tuple_len - 1 - j); - } + tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT, + h->tuple_len - ctx->reta_sz_log); + offset -= tmp_len; + tmp = read_unaligned_bits(tuple, tmp_len, + offset); + tmp++; + tmp &= (1 << tmp_len) - 1; + write_unaligned_bits(tuple, tmp_len, offset, + tmp); } } else return 0;