Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/73542/?format=api
https://patches.dpdk.org/api/patches/73542/?format=api", "web_url": "https://patches.dpdk.org/project/dpdk/patch/22e490923096cda78424f681424017469c96593c.1594238610.git.vladimir.medvedkin@intel.com/", "project": { "id": 1, "url": "https://patches.dpdk.org/api/projects/1/?format=api", "name": "DPDK", "link_name": "dpdk", "list_id": "dev.dpdk.org", "list_email": "dev@dpdk.org", "web_url": "http://core.dpdk.org", "scm_url": "git://dpdk.org/dpdk", "webscm_url": "http://git.dpdk.org/dpdk", "list_archive_url": "https://inbox.dpdk.org/dev", "list_archive_url_format": "https://inbox.dpdk.org/dev/{}", "commit_url_format": "" }, "msgid": "<22e490923096cda78424f681424017469c96593c.1594238610.git.vladimir.medvedkin@intel.com>", "list_archive_url": "https://inbox.dpdk.org/dev/22e490923096cda78424f681424017469c96593c.1594238610.git.vladimir.medvedkin@intel.com", "date": "2020-07-08T20:16:08", "name": "[v4,3/8] fib: move lookup definition into the header file", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": true, "hash": "5adddd29eb50f9b240269a5f415382c45e573902", "submitter": { "id": 1216, "url": "https://patches.dpdk.org/api/people/1216/?format=api", "name": "Vladimir Medvedkin", "email": "vladimir.medvedkin@intel.com" }, "delegate": { "id": 1, "url": "https://patches.dpdk.org/api/users/1/?format=api", "username": "tmonjalo", "first_name": "Thomas", "last_name": "Monjalon", "email": "thomas@monjalon.net" }, "mbox": "https://patches.dpdk.org/project/dpdk/patch/22e490923096cda78424f681424017469c96593c.1594238610.git.vladimir.medvedkin@intel.com/mbox/", "series": [ { "id": 10896, "url": "https://patches.dpdk.org/api/series/10896/?format=api", "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10896", "date": "2020-07-08T20:16:05", "name": "fib: implement AVX512 vector lookup", "version": 4, "mbox": "https://patches.dpdk.org/series/10896/mbox/" } ], "comments": "https://patches.dpdk.org/api/patches/73542/comments/", "check": "warning", "checks": "https://patches.dpdk.org/api/patches/73542/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<dev-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4CD7AA0526;\n\tWed, 8 Jul 2020 22:17:19 +0200 (CEST)", "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 838F51DD02;\n\tWed, 8 Jul 2020 22:16:57 +0200 (CEST)", "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by dpdk.org (Postfix) with ESMTP id 2347B1DC55\n for <dev@dpdk.org>; Wed, 8 Jul 2020 22:16:52 +0200 (CEST)", "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 08 Jul 2020 13:16:52 -0700", "from silpixa00400322.ir.intel.com ([10.237.214.86])\n by fmsmga002.fm.intel.com with ESMTP; 08 Jul 2020 13:16:51 -0700" ], "IronPort-SDR": [ "\n ru0jgPqXcHZk9SEp77YTgf8X6gxl8vpt1GFwqduBVXYGu38I+OIJJDwVgxduCPufuJLAeMVYxw\n ijICcV25T0GQ==", "\n X+CIenj1VHVGVOYh8QilFHdGqQDMtYMSgvtrgwpj6H/yukOuPfcRKu+ZU8T2+Y7wWTA3ROQjqR\n gaRMRu4pA2Lg==" ], "X-IronPort-AV": [ "E=McAfee;i=\"6000,8403,9676\"; a=\"135346082\"", "E=Sophos;i=\"5.75,329,1589266800\"; d=\"scan'208\";a=\"135346082\"", "E=Sophos;i=\"5.75,329,1589266800\"; d=\"scan'208\";a=\"315988592\"" ], "X-Amp-Result": "SKIPPED(no attachment in message)", "X-Amp-File-Uploaded": "False", "X-ExtLoop1": "1", "From": "Vladimir Medvedkin <vladimir.medvedkin@intel.com>", "To": "dev@dpdk.org", "Cc": "konstantin.ananyev@intel.com,\n\tbruce.richardson@intel.com", "Date": "Wed, 8 Jul 2020 21:16:08 +0100", "Message-Id": "\n <22e490923096cda78424f681424017469c96593c.1594238610.git.vladimir.medvedkin@intel.com>", "X-Mailer": "git-send-email 2.17.1", "In-Reply-To": [ "<cover.1594238609.git.vladimir.medvedkin@intel.com>", "<cover.1594238609.git.vladimir.medvedkin@intel.com>" ], "References": [ "<cover.1594238609.git.vladimir.medvedkin@intel.com>", "<cover.1589890262.git.vladimir.medvedkin@intel.com>\n <cover.1594238609.git.vladimir.medvedkin@intel.com>" ], "MIME-Version": "1.0", "Content-Type": "text/plain; charset=UTF-8", "Content-Transfer-Encoding": "8bit", "Subject": "[dpdk-dev] [PATCH v4 3/8] fib: move lookup definition into the\n\theader file", "X-BeenThere": "dev@dpdk.org", "X-Mailman-Version": "2.1.15", "Precedence": "list", "List-Id": "DPDK patches and discussions <dev.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dev/>", "List-Post": "<mailto:dev@dpdk.org>", "List-Help": "<mailto:dev-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>", "Errors-To": "dev-bounces@dpdk.org", "Sender": "\"dev\" <dev-bounces@dpdk.org>" }, "content": "Move dir24_8 table layout and lookup defenition into the\nprivate header file. This is necessary for implementing a\nvectorized lookup function in a separate .с file.\n\nSigned-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>\nAcked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n lib/librte_fib/dir24_8.c | 225 +--------------------------------------\n lib/librte_fib/dir24_8.h | 224 ++++++++++++++++++++++++++++++++++++++\n 2 files changed, 225 insertions(+), 224 deletions(-)", "diff": "diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c\nindex 825d061fd..9d74653cf 100644\n--- a/lib/librte_fib/dir24_8.c\n+++ b/lib/librte_fib/dir24_8.c\n@@ -11,240 +11,17 @@\n \n #include <rte_debug.h>\n #include <rte_malloc.h>\n-#include <rte_prefetch.h>\n #include <rte_errno.h>\n #include <rte_memory.h>\n-#include <rte_branch_prediction.h>\n \n-#include <rte_fib.h>\n #include <rte_rib.h>\n+#include <rte_fib.h>\n #include \"dir24_8.h\"\n \n #define DIR24_8_NAMESIZE\t64\n \n-#define DIR24_8_TBL24_NUM_ENT\t\t(1 << 24)\n-#define DIR24_8_TBL8_GRP_NUM_ENT\t256U\n-#define DIR24_8_EXT_ENT\t\t\t1\n-#define DIR24_8_TBL24_MASK\t\t0xffffff00\n-\n-#define BITMAP_SLAB_BIT_SIZE_LOG2\t6\n-#define BITMAP_SLAB_BIT_SIZE\t\t(1 << BITMAP_SLAB_BIT_SIZE_LOG2)\n-#define BITMAP_SLAB_BITMASK\t\t(BITMAP_SLAB_BIT_SIZE - 1)\n-\n-struct dir24_8_tbl {\n-\tuint32_t\tnumber_tbl8s;\t/**< Total number of tbl8s */\n-\tuint32_t\trsvd_tbl8s;\t/**< Number of reserved tbl8s */\n-\tuint32_t\tcur_tbl8s;\t/**< Current number of tbl8s */\n-\tenum rte_fib_dir24_8_nh_sz\tnh_sz;\t/**< Size of nexthop entry */\n-\tuint64_t\tdef_nh;\t\t/**< Default next hop */\n-\tuint64_t\t*tbl8;\t\t/**< tbl8 table. */\n-\tuint64_t\t*tbl8_idxes;\t/**< bitmap containing free tbl8 idxes*/\n-\t/* tbl24 table. */\n-\t__extension__ uint64_t\ttbl24[0] __rte_cache_aligned;\n-};\n-\n #define ROUNDUP(x, y)\t RTE_ALIGN_CEIL(x, (1 << (32 - y)))\n \n-static inline void *\n-get_tbl24_p(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz)\n-{\n-\treturn (void *)&((uint8_t *)dp->tbl24)[(ip &\n-\t\tDIR24_8_TBL24_MASK) >> (8 - nh_sz)];\n-}\n-\n-static inline uint8_t\n-bits_in_nh(uint8_t nh_sz)\n-{\n-\treturn 8 * (1 << nh_sz);\n-}\n-\n-static inline uint64_t\n-get_max_nh(uint8_t nh_sz)\n-{\n-\treturn ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1);\n-}\n-\n-static inline uint32_t\n-get_tbl24_idx(uint32_t ip)\n-{\n-\treturn ip >> 8;\n-}\n-\n-static inline uint32_t\n-get_tbl8_idx(uint32_t res, uint32_t ip)\n-{\n-\treturn (res >> 1) * DIR24_8_TBL8_GRP_NUM_ENT + (uint8_t)ip;\n-}\n-\n-static inline uint64_t\n-lookup_msk(uint8_t nh_sz)\n-{\n-\treturn ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1;\n-}\n-\n-static inline uint8_t\n-get_psd_idx(uint32_t val, uint8_t nh_sz)\n-{\n-\treturn val & ((1 << (3 - nh_sz)) - 1);\n-}\n-\n-static inline uint32_t\n-get_tbl_idx(uint32_t val, uint8_t nh_sz)\n-{\n-\treturn val >> (3 - nh_sz);\n-}\n-\n-static inline uint64_t\n-get_tbl24(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz)\n-{\n-\treturn ((dp->tbl24[get_tbl_idx(get_tbl24_idx(ip), nh_sz)] >>\n-\t\t(get_psd_idx(get_tbl24_idx(ip), nh_sz) *\n-\t\tbits_in_nh(nh_sz))) & lookup_msk(nh_sz));\n-}\n-\n-static inline uint64_t\n-get_tbl8(struct dir24_8_tbl *dp, uint32_t res, uint32_t ip, uint8_t nh_sz)\n-{\n-\treturn ((dp->tbl8[get_tbl_idx(get_tbl8_idx(res, ip), nh_sz)] >>\n-\t\t(get_psd_idx(get_tbl8_idx(res, ip), nh_sz) *\n-\t\tbits_in_nh(nh_sz))) & lookup_msk(nh_sz));\n-}\n-\n-static inline int\n-is_entry_extended(uint64_t ent)\n-{\n-\treturn (ent & DIR24_8_EXT_ENT) == DIR24_8_EXT_ENT;\n-}\n-\n-#define LOOKUP_FUNC(suffix, type, bulk_prefetch, nh_sz)\t\t\t\\\n-static void dir24_8_lookup_bulk_##suffix(void *p, const uint32_t *ips,\t\\\n-\tuint64_t *next_hops, const unsigned int n)\t\t\t\\\n-{\t\t\t\t\t\t\t\t\t\\\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\t\t\\\n-\tuint64_t tmp;\t\t\t\t\t\t\t\\\n-\tuint32_t i;\t\t\t\t\t\t\t\\\n-\tuint32_t prefetch_offset =\t\t\t\t\t\\\n-\t\tRTE_MIN((unsigned int)bulk_prefetch, n);\t\t\\\n-\t\t\t\t\t\t\t\t\t\\\n-\tfor (i = 0; i < prefetch_offset; i++)\t\t\t\t\\\n-\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\t\t\\\n-\tfor (i = 0; i < (n - prefetch_offset); i++) {\t\t\t\\\n-\t\trte_prefetch0(get_tbl24_p(dp,\t\t\t\t\\\n-\t\t\tips[i + prefetch_offset], nh_sz));\t\t\\\n-\t\ttmp = ((type *)dp->tbl24)[ips[i] >> 8];\t\t\t\\\n-\t\tif (unlikely(is_entry_extended(tmp)))\t\t\t\\\n-\t\t\ttmp = ((type *)dp->tbl8)[(uint8_t)ips[i] +\t\\\n-\t\t\t\t((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \\\n-\t\tnext_hops[i] = tmp >> 1;\t\t\t\t\\\n-\t}\t\t\t\t\t\t\t\t\\\n-\tfor (; i < n; i++) {\t\t\t\t\t\t\\\n-\t\ttmp = ((type *)dp->tbl24)[ips[i] >> 8];\t\t\t\\\n-\t\tif (unlikely(is_entry_extended(tmp)))\t\t\t\\\n-\t\t\ttmp = ((type *)dp->tbl8)[(uint8_t)ips[i] +\t\\\n-\t\t\t\t((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \\\n-\t\tnext_hops[i] = tmp >> 1;\t\t\t\t\\\n-\t}\t\t\t\t\t\t\t\t\\\n-}\t\t\t\t\t\t\t\t\t\\\n-\n-LOOKUP_FUNC(1b, uint8_t, 5, 0)\n-LOOKUP_FUNC(2b, uint16_t, 6, 1)\n-LOOKUP_FUNC(4b, uint32_t, 15, 2)\n-LOOKUP_FUNC(8b, uint64_t, 12, 3)\n-\n-static inline void\n-dir24_8_lookup_bulk(struct dir24_8_tbl *dp, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n, uint8_t nh_sz)\n-{\n-\tuint64_t tmp;\n-\tuint32_t i;\n-\tuint32_t prefetch_offset = RTE_MIN(15U, n);\n-\n-\tfor (i = 0; i < prefetch_offset; i++)\n-\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\n-\tfor (i = 0; i < (n - prefetch_offset); i++) {\n-\t\trte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset],\n-\t\t\tnh_sz));\n-\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n-\t\tif (unlikely(is_entry_extended(tmp)))\n-\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n-\n-\t\tnext_hops[i] = tmp >> 1;\n-\t}\n-\tfor (; i < n; i++) {\n-\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n-\t\tif (unlikely(is_entry_extended(tmp)))\n-\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n-\n-\t\tnext_hops[i] = tmp >> 1;\n-\t}\n-}\n-\n-static void\n-dir24_8_lookup_bulk_0(void *p, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n)\n-{\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n-\n-\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 0);\n-}\n-\n-static void\n-dir24_8_lookup_bulk_1(void *p, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n)\n-{\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n-\n-\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 1);\n-}\n-\n-static void\n-dir24_8_lookup_bulk_2(void *p, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n)\n-{\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n-\n-\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 2);\n-}\n-\n-static void\n-dir24_8_lookup_bulk_3(void *p, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n)\n-{\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n-\n-\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 3);\n-}\n-\n-static void\n-dir24_8_lookup_bulk_uni(void *p, const uint32_t *ips,\n-\tuint64_t *next_hops, const unsigned int n)\n-{\n-\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n-\tuint64_t tmp;\n-\tuint32_t i;\n-\tuint32_t prefetch_offset = RTE_MIN(15U, n);\n-\tuint8_t nh_sz = dp->nh_sz;\n-\n-\tfor (i = 0; i < prefetch_offset; i++)\n-\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\n-\tfor (i = 0; i < (n - prefetch_offset); i++) {\n-\t\trte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset],\n-\t\t\tnh_sz));\n-\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n-\t\tif (unlikely(is_entry_extended(tmp)))\n-\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n-\n-\t\tnext_hops[i] = tmp >> 1;\n-\t}\n-\tfor (; i < n; i++) {\n-\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n-\t\tif (unlikely(is_entry_extended(tmp)))\n-\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n-\n-\t\tnext_hops[i] = tmp >> 1;\n-\t}\n-}\n-\n rte_fib_lookup_fn_t\n dir24_8_get_lookup_fn(void *p, enum rte_fib_dir24_8_lookup_type type)\n {\ndiff --git a/lib/librte_fib/dir24_8.h b/lib/librte_fib/dir24_8.h\nindex 53c5dd29e..56d038951 100644\n--- a/lib/librte_fib/dir24_8.h\n+++ b/lib/librte_fib/dir24_8.h\n@@ -6,6 +6,9 @@\n #ifndef _DIR24_8_H_\n #define _DIR24_8_H_\n \n+#include <rte_prefetch.h>\n+#include <rte_branch_prediction.h>\n+\n /**\n * @file\n * DIR24_8 algorithm\n@@ -15,6 +18,227 @@\n extern \"C\" {\n #endif\n \n+#define DIR24_8_TBL24_NUM_ENT\t\t(1 << 24)\n+#define DIR24_8_TBL8_GRP_NUM_ENT\t256U\n+#define DIR24_8_EXT_ENT\t\t\t1\n+#define DIR24_8_TBL24_MASK\t\t0xffffff00\n+\n+#define BITMAP_SLAB_BIT_SIZE_LOG2\t6\n+#define BITMAP_SLAB_BIT_SIZE\t\t(1 << BITMAP_SLAB_BIT_SIZE_LOG2)\n+#define BITMAP_SLAB_BITMASK\t\t(BITMAP_SLAB_BIT_SIZE - 1)\n+\n+struct dir24_8_tbl {\n+\tuint32_t\tnumber_tbl8s;\t/**< Total number of tbl8s */\n+\tuint32_t\trsvd_tbl8s;\t/**< Number of reserved tbl8s */\n+\tuint32_t\tcur_tbl8s;\t/**< Current number of tbl8s */\n+\tenum rte_fib_dir24_8_nh_sz\tnh_sz;\t/**< Size of nexthop entry */\n+\tuint64_t\tdef_nh;\t\t/**< Default next hop */\n+\tuint64_t\t*tbl8;\t\t/**< tbl8 table. */\n+\tuint64_t\t*tbl8_idxes;\t/**< bitmap containing free tbl8 idxes*/\n+\t/* tbl24 table. */\n+\t__extension__ uint64_t\ttbl24[0] __rte_cache_aligned;\n+};\n+\n+static inline void *\n+get_tbl24_p(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz)\n+{\n+\treturn (void *)&((uint8_t *)dp->tbl24)[(ip &\n+\t\tDIR24_8_TBL24_MASK) >> (8 - nh_sz)];\n+}\n+\n+static inline uint8_t\n+bits_in_nh(uint8_t nh_sz)\n+{\n+\treturn 8 * (1 << nh_sz);\n+}\n+\n+static inline uint64_t\n+get_max_nh(uint8_t nh_sz)\n+{\n+\treturn ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1);\n+}\n+\n+static inline uint32_t\n+get_tbl24_idx(uint32_t ip)\n+{\n+\treturn ip >> 8;\n+}\n+\n+static inline uint32_t\n+get_tbl8_idx(uint32_t res, uint32_t ip)\n+{\n+\treturn (res >> 1) * DIR24_8_TBL8_GRP_NUM_ENT + (uint8_t)ip;\n+}\n+\n+static inline uint64_t\n+lookup_msk(uint8_t nh_sz)\n+{\n+\treturn ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1;\n+}\n+\n+static inline uint8_t\n+get_psd_idx(uint32_t val, uint8_t nh_sz)\n+{\n+\treturn val & ((1 << (3 - nh_sz)) - 1);\n+}\n+\n+static inline uint32_t\n+get_tbl_idx(uint32_t val, uint8_t nh_sz)\n+{\n+\treturn val >> (3 - nh_sz);\n+}\n+\n+static inline uint64_t\n+get_tbl24(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz)\n+{\n+\treturn ((dp->tbl24[get_tbl_idx(get_tbl24_idx(ip), nh_sz)] >>\n+\t\t(get_psd_idx(get_tbl24_idx(ip), nh_sz) *\n+\t\tbits_in_nh(nh_sz))) & lookup_msk(nh_sz));\n+}\n+\n+static inline uint64_t\n+get_tbl8(struct dir24_8_tbl *dp, uint32_t res, uint32_t ip, uint8_t nh_sz)\n+{\n+\treturn ((dp->tbl8[get_tbl_idx(get_tbl8_idx(res, ip), nh_sz)] >>\n+\t\t(get_psd_idx(get_tbl8_idx(res, ip), nh_sz) *\n+\t\tbits_in_nh(nh_sz))) & lookup_msk(nh_sz));\n+}\n+\n+static inline int\n+is_entry_extended(uint64_t ent)\n+{\n+\treturn (ent & DIR24_8_EXT_ENT) == DIR24_8_EXT_ENT;\n+}\n+\n+#define LOOKUP_FUNC(suffix, type, bulk_prefetch, nh_sz)\t\t\t\\\n+static inline void dir24_8_lookup_bulk_##suffix(void *p, const uint32_t *ips, \\\n+\tuint64_t *next_hops, const unsigned int n)\t\t\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\t\t\\\n+\tuint64_t tmp;\t\t\t\t\t\t\t\\\n+\tuint32_t i;\t\t\t\t\t\t\t\\\n+\tuint32_t prefetch_offset =\t\t\t\t\t\\\n+\t\tRTE_MIN((unsigned int)bulk_prefetch, n);\t\t\\\n+\t\t\t\t\t\t\t\t\t\\\n+\tfor (i = 0; i < prefetch_offset; i++)\t\t\t\t\\\n+\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\t\t\\\n+\tfor (i = 0; i < (n - prefetch_offset); i++) {\t\t\t\\\n+\t\trte_prefetch0(get_tbl24_p(dp,\t\t\t\t\\\n+\t\t\tips[i + prefetch_offset], nh_sz));\t\t\\\n+\t\ttmp = ((type *)dp->tbl24)[ips[i] >> 8];\t\t\t\\\n+\t\tif (unlikely(is_entry_extended(tmp)))\t\t\t\\\n+\t\t\ttmp = ((type *)dp->tbl8)[(uint8_t)ips[i] +\t\\\n+\t\t\t\t((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \\\n+\t\tnext_hops[i] = tmp >> 1;\t\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n+\tfor (; i < n; i++) {\t\t\t\t\t\t\\\n+\t\ttmp = ((type *)dp->tbl24)[ips[i] >> 8];\t\t\t\\\n+\t\tif (unlikely(is_entry_extended(tmp)))\t\t\t\\\n+\t\t\ttmp = ((type *)dp->tbl8)[(uint8_t)ips[i] +\t\\\n+\t\t\t\t((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \\\n+\t\tnext_hops[i] = tmp >> 1;\t\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n+}\t\t\t\t\t\t\t\t\t\\\n+\n+LOOKUP_FUNC(1b, uint8_t, 5, 0)\n+LOOKUP_FUNC(2b, uint16_t, 6, 1)\n+LOOKUP_FUNC(4b, uint32_t, 15, 2)\n+LOOKUP_FUNC(8b, uint64_t, 12, 3)\n+\n+static inline void\n+dir24_8_lookup_bulk(struct dir24_8_tbl *dp, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n, uint8_t nh_sz)\n+{\n+\tuint64_t tmp;\n+\tuint32_t i;\n+\tuint32_t prefetch_offset = RTE_MIN(15U, n);\n+\n+\tfor (i = 0; i < prefetch_offset; i++)\n+\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\n+\tfor (i = 0; i < (n - prefetch_offset); i++) {\n+\t\trte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset],\n+\t\t\tnh_sz));\n+\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n+\t\tif (unlikely(is_entry_extended(tmp)))\n+\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n+\n+\t\tnext_hops[i] = tmp >> 1;\n+\t}\n+\tfor (; i < n; i++) {\n+\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n+\t\tif (unlikely(is_entry_extended(tmp)))\n+\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n+\n+\t\tnext_hops[i] = tmp >> 1;\n+\t}\n+}\n+\n+static inline void\n+dir24_8_lookup_bulk_0(void *p, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n)\n+{\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n+\n+\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 0);\n+}\n+\n+static inline void\n+dir24_8_lookup_bulk_1(void *p, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n)\n+{\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n+\n+\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 1);\n+}\n+\n+static inline void\n+dir24_8_lookup_bulk_2(void *p, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n)\n+{\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n+\n+\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 2);\n+}\n+\n+static inline void\n+dir24_8_lookup_bulk_3(void *p, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n)\n+{\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n+\n+\tdir24_8_lookup_bulk(dp, ips, next_hops, n, 3);\n+}\n+\n+static inline void\n+dir24_8_lookup_bulk_uni(void *p, const uint32_t *ips,\n+\tuint64_t *next_hops, const unsigned int n)\n+{\n+\tstruct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;\n+\tuint64_t tmp;\n+\tuint32_t i;\n+\tuint32_t prefetch_offset = RTE_MIN(15U, n);\n+\tuint8_t nh_sz = dp->nh_sz;\n+\n+\tfor (i = 0; i < prefetch_offset; i++)\n+\t\trte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz));\n+\tfor (i = 0; i < (n - prefetch_offset); i++) {\n+\t\trte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset],\n+\t\t\tnh_sz));\n+\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n+\t\tif (unlikely(is_entry_extended(tmp)))\n+\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n+\n+\t\tnext_hops[i] = tmp >> 1;\n+\t}\n+\tfor (; i < n; i++) {\n+\t\ttmp = get_tbl24(dp, ips[i], nh_sz);\n+\t\tif (unlikely(is_entry_extended(tmp)))\n+\t\t\ttmp = get_tbl8(dp, tmp, ips[i], nh_sz);\n+\n+\t\tnext_hops[i] = tmp >> 1;\n+\t}\n+}\n+\n void *\n dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *conf);\n \n", "prefixes": [ "v4", "3/8" ] }{ "id": 73542, "url": "