From patchwork Tue Jan 12 02:57:04 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ruifeng Wang X-Patchwork-Id: 86359 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C3F38A04B5; Tue, 12 Jan 2021 03:57:44 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D6938140D00; Tue, 12 Jan 2021 03:57:43 +0100 (CET) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 5F4DD140CAF for ; Tue, 12 Jan 2021 03:57:42 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id C9E87101E; Mon, 11 Jan 2021 18:57:41 -0800 (PST) Received: from net-arm-n1amp-01.shanghai.arm.com (net-arm-n1amp-01.shanghai.arm.com [10.169.208.220]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 96E433F66E; Mon, 11 Jan 2021 18:57:38 -0800 (PST) From: Ruifeng Wang To: Jerin Jacob , Ruifeng Wang , Jan Viktorin , Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, pbhagavatula@marvell.com, hemant.agrawal@nxp.com, honnappa.nagarahalli@arm.com, nd@arm.com Date: Tue, 12 Jan 2021 02:57:04 +0000 Message-Id: <20210112025709.1121523-2-ruifeng.wang@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210112025709.1121523-1-ruifeng.wang@arm.com> References: <20201218101210.356836-1-ruifeng.wang@arm.com> <20210112025709.1121523-1-ruifeng.wang@arm.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 1/5] lpm: add sve support for lookup on Arm platform X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Added new path to do lpm4 lookup by using scalable vector extension. The SVE path will be selected if compiler has flag SVE set. Signed-off-by: Ruifeng Wang --- v2: Fixed tbl8 group index calculation. (Vladimir) lib/librte_eal/arm/include/rte_vect.h | 3 + lib/librte_lpm/meson.build | 2 +- lib/librte_lpm/rte_lpm.h | 4 ++ lib/librte_lpm/rte_lpm_sve.h | 83 +++++++++++++++++++++++++++ 4 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 lib/librte_lpm/rte_lpm_sve.h diff --git a/lib/librte_eal/arm/include/rte_vect.h b/lib/librte_eal/arm/include/rte_vect.h index a739e6e66..093e9122a 100644 --- a/lib/librte_eal/arm/include/rte_vect.h +++ b/lib/librte_eal/arm/include/rte_vect.h @@ -9,6 +9,9 @@ #include "generic/rte_vect.h" #include "rte_debug.h" #include "arm_neon.h" +#ifdef __ARM_FEATURE_SVE +#include +#endif #ifdef __cplusplus extern "C" { diff --git a/lib/librte_lpm/meson.build b/lib/librte_lpm/meson.build index 6cfc083c5..f93c86640 100644 --- a/lib/librte_lpm/meson.build +++ b/lib/librte_lpm/meson.build @@ -5,6 +5,6 @@ sources = files('rte_lpm.c', 'rte_lpm6.c') headers = files('rte_lpm.h', 'rte_lpm6.h') # since header files have different names, we can install all vector headers # without worrying about which architecture we actually need -headers += files('rte_lpm_altivec.h', 'rte_lpm_neon.h', 'rte_lpm_sse.h') +headers += files('rte_lpm_altivec.h', 'rte_lpm_neon.h', 'rte_lpm_sse.h', 'rte_lpm_sve.h') deps += ['hash'] deps += ['rcu'] diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index 1afe55cdc..28b57683b 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -402,7 +402,11 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv); #if defined(RTE_ARCH_ARM) +#ifdef __ARM_FEATURE_SVE +#include "rte_lpm_sve.h" +#else #include "rte_lpm_neon.h" +#endif #elif defined(RTE_ARCH_PPC_64) #include "rte_lpm_altivec.h" #else diff --git a/lib/librte_lpm/rte_lpm_sve.h b/lib/librte_lpm/rte_lpm_sve.h new file mode 100644 index 000000000..2e319373e --- /dev/null +++ b/lib/librte_lpm/rte_lpm_sve.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Arm Limited + */ + +#ifndef _RTE_LPM_SVE_H_ +#define _RTE_LPM_SVE_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +__rte_internal +static void +__rte_lpm_lookup_vec(const struct rte_lpm *lpm, const uint32_t *ips, + uint32_t *__rte_restrict next_hops, const uint32_t n) +{ + uint32_t i = 0; + svuint32_t v_ip, v_idx, v_tbl24, v_tbl8, v_hop; + svuint32_t v_mask_xv, v_mask_v, v_mask_hop; + svbool_t pg = svwhilelt_b32(i, n); + svbool_t pv; + + do { + v_ip = svld1(pg, &ips[i]); + /* Get indices for tbl24[] */ + v_idx = svlsr_x(pg, v_ip, 8); + /* Extract values from tbl24[] */ + v_tbl24 = svld1_gather_index(pg, (const uint32_t *)lpm->tbl24, + v_idx); + + /* Create mask with valid set */ + v_mask_v = svdup_u32_z(pg, RTE_LPM_LOOKUP_SUCCESS); + /* Create mask with valid and valid_group set */ + v_mask_xv = svdup_u32_z(pg, RTE_LPM_VALID_EXT_ENTRY_BITMASK); + /* Create predicate for tbl24 entries: (valid && !valid_group) */ + pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_v); + /* Create mask for next_hop in table entry */ + v_mask_hop = svdup_u32_z(pg, 0x00ffffff); + /* Extract next_hop and write back */ + v_hop = svand_x(pv, v_tbl24, v_mask_hop); + svst1(pv, &next_hops[i], v_hop); + + /* Update predicate for tbl24 entries: (valid && valid_group) */ + pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_xv); + /* Compute tbl8 index */ + v_idx = svand_x(pv, v_tbl24, svdup_u32_z(pv, 0xffffff)); + v_idx = svmul_x(pv, v_idx, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); + v_idx = svadd_x(pv, svand_x(pv, v_ip, svdup_u32_z(pv, 0xff)), + v_idx); + /* Extract values from tbl8[] */ + v_tbl8 = svld1_gather_index(pv, (const uint32_t *)lpm->tbl8, + v_idx); + /* Update predicate for tbl8 entries: (valid) */ + pv = svcmpeq(pv, svand_z(pv, v_tbl8, v_mask_v), v_mask_v); + /* Extract next_hop and write back */ + v_hop = svand_x(pv, v_tbl8, v_mask_hop); + svst1(pv, &next_hops[i], v_hop); + + i += svlen(v_ip); + pg = svwhilelt_b32(i, n); + } while (svptest_any(svptrue_b32(), pg)); +} + +static inline void +rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], + uint32_t defv) +{ + uint32_t i, ips[4]; + + vst1q_s32((int32_t *)ips, ip); + for (i = 0; i < 4; i++) + hop[i] = defv; + + __rte_lpm_lookup_vec(lpm, ips, hop, 4); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LPM_SVE_H_ */