get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/7992/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 7992,
    "url": "https://patches.dpdk.org/api/patches/7992/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com",
    "date": "2015-10-25T17:52:04",
    "name": "[dpdk-dev,v1,0/3] lpm: increase number of next hops for lpm (ipv4)",
    "commit_ref": null,
    "pull_url": null,
    "state": "not-applicable",
    "archived": true,
    "hash": "2d3891e424525c1cb23f6ed36bc0dbd021f145fc",
    "submitter": {
        "id": 210,
        "url": "https://patches.dpdk.org/api/people/210/?format=api",
        "name": "Vladimir Medvedkin",
        "email": "medvedkinv@gmail.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/7992/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/7992/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 72F135682;\n\tSun, 25 Oct 2015 18:52:06 +0100 (CET)",
            "from mail-lf0-f51.google.com (mail-lf0-f51.google.com\n\t[209.85.215.51]) by dpdk.org (Postfix) with ESMTP id C5369532D\n\tfor <dev@dpdk.org>; Sun, 25 Oct 2015 18:52:04 +0100 (CET)",
            "by lfbn126 with SMTP id n126so91430735lfb.2\n\tfor <dev@dpdk.org>; Sun, 25 Oct 2015 10:52:04 -0700 (PDT)",
            "by 10.114.199.4 with HTTP; Sun, 25 Oct 2015 10:52:04 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113;\n\th=mime-version:in-reply-to:references:date:message-id:subject:from:to\n\t:cc:content-type;\n\tbh=F0dw9MTmTIBwgueorXMAdR5x8MLF7MgNsTYf60JdMmE=;\n\tb=U3/ylubMYJbVbv6c9MFEURwjC69be4O+6+My8ZvRaSrF98FzXeDFYHA9bg3srY1VX+\n\tQzNuBOcoQRPspWU0KEUZdSQBfZInQBa99KZT8XvAvAB526OTSk+lLXBLjjoDA02Ps6NC\n\tJkWdzuOB3lyC6NyIrq/zW3Y4hwX7sDVyxmR+SRmT6Nm4l7UkXTr0d8ZA9L5xeJRXg2so\n\tbzCq+mbn1uqXELK/L5/grLuXeFqmvdj33ajz7aUUACEzfRq3sOBeCKhhTJDGxr0CvuhS\n\tdUSJ1jI23DPqm9avMLv0nqQ8+pywO/X16YBD/P+IxmBO+TXRZtGNis6eih5UmAUYg7KC\n\t1VnQ==",
        "MIME-Version": "1.0",
        "X-Received": "by 10.25.18.39 with SMTP id h39mr10123372lfi.7.1445795524439;\n\tSun, 25 Oct 2015 10:52:04 -0700 (PDT)",
        "In-Reply-To": "<562B209A.6030507@mhcomputing.net>",
        "References": "<1445608311-8092-1-git-send-email-michalx.k.jastrzebski@intel.com>\n\t<20151023162033.GA10036@mhcomputing.net>\n\t<562B209A.6030507@mhcomputing.net>",
        "Date": "Sun, 25 Oct 2015 20:52:04 +0300",
        "Message-ID": "<CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com>",
        "From": "Vladimir Medvedkin <medvedkinv@gmail.com>",
        "To": "Matthew Hall <mhall@mhcomputing.net>",
        "Content-Type": "text/plain; charset=UTF-8",
        "X-Content-Filtered-By": "Mailman/MimeDel 2.1.15",
        "Cc": "\"dev@dpdk.org\" <dev@dpdk.org>",
        "Subject": "Re: [dpdk-dev] [PATCH v1 0/3] lpm: increase number of next hops for\n\tlpm (ipv4)",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Hi all,\n\nHere my implementation\n\nSigned-off-by: Vladimir Medvedkin <medvedkinv@gmail.com>\n---\n config/common_bsdapp     |   1 +\n config/common_linuxapp   |   1 +\n lib/librte_lpm/rte_lpm.c | 194\n+++++++++++++++++++++++++++++------------------\n lib/librte_lpm/rte_lpm.h | 163 +++++++++++++++++++++++----------------\n 4 files changed, 219 insertions(+), 140 deletions(-)\n\n                        __rte_cache_aligned; /**< LPM rules. */\n@@ -219,7 +220,7 @@ rte_lpm_free(struct rte_lpm *lpm);\n  *   0 on success, negative value otherwise\n  */\n int\n-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t\nnext_hop);\n+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, struct\nrte_lpm_res *res);\n\n /**\n  * Check if a rule is present in the LPM table,\n@@ -238,7 +239,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t\ndepth, uint8_t next_hop);\n  */\n int\n rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-uint8_t *next_hop);\n+                       struct rte_lpm_res *res);\n\n /**\n  * Delete a rule from the LPM table.\n@@ -277,29 +278,43 @@ rte_lpm_delete_all(struct rte_lpm *lpm);\n  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup\nhit\n  */\n static inline int\n-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)\n+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, struct rte_lpm_res *res)\n {\n        unsigned tbl24_index = (ip >> 8);\n-       uint16_t tbl_entry;\n-\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint64_t tbl_entry;\n+#else\n+       uint32_t tbl_entry;\n+#endif\n        /* DEBUG: Check user input arguments. */\n-       RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)),\n-EINVAL);\n+       RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (res == NULL)), -EINVAL);\n\n        /* Copy tbl24 entry */\n-       tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];\n-\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       tbl_entry = *(const uint64_t *)&lpm->tbl24[tbl24_index];\n+#else\n+       tbl_entry = *(const uint32_t *)&lpm->tbl24[tbl24_index];\n+#endif\n        /* Copy tbl8 entry (only if needed) */\n        if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==\n                        RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {\n\n                unsigned tbl8_index = (uint8_t)ip +\n-                               ((uint8_t)tbl_entry *\nRTE_LPM_TBL8_GROUP_NUM_ENTRIES);\n+                               ((*(struct rte_lpm_tbl_entry\n*)&tbl_entry).tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);\n\n-               tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+               tbl_entry = *(const uint64_t *)&lpm->tbl8[tbl8_index];\n+#else\n+               tbl_entry = *(const uint32_t *)&lpm->tbl8[tbl8_index];\n+#endif\n        }\n-\n-       *next_hop = (uint8_t)tbl_entry;\n+       res->next_hop  = ((struct rte_lpm_tbl_entry *)&tbl_entry)->next_hop;\n+       res->fwd_class = ((struct rte_lpm_tbl_entry\n*)&tbl_entry)->fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       res->as_num       = ((struct rte_lpm_tbl_entry\n*)&tbl_entry)->as_num;\n+#endif\n        return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;\n+\n }\n\n /**\n@@ -322,19 +337,25 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip,\nuint8_t *next_hop)\n  *  @return\n  *   -EINVAL for incorrect arguments, otherwise 0\n  */\n-#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \\\n-               rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)\n+#define rte_lpm_lookup_bulk(lpm, ips, res_tbl, n) \\\n+               rte_lpm_lookup_bulk_func(lpm, ips, res_tbl, n)\n\n static inline int\n-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,\n-               uint16_t * next_hops, const unsigned n)\n+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,\n+               struct rte_lpm_res *res_tbl, const unsigned n)\n {\n        unsigned i;\n+       int ret = 0;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint64_t tbl_entry;\n+#else\n+       uint32_t tbl_entry;\n+#endif\n        unsigned tbl24_indexes[n];\n\n        /* DEBUG: Check user input arguments. */\n        RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||\n-                       (next_hops == NULL)), -EINVAL);\n+                       (res_tbl == NULL)), -EINVAL);\n\n        for (i = 0; i < n; i++) {\n                tbl24_indexes[i] = ips[i] >> 8;\n@@ -342,20 +363,32 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm,\nconst uint32_t * ips,\n\n        for (i = 0; i < n; i++) {\n                /* Simply copy tbl24 entry to output */\n-               next_hops[i] = *(const uint16_t\n*)&lpm->tbl24[tbl24_indexes[i]];\n-\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+               tbl_entry = *(const uint64_t\n*)&lpm->tbl24[tbl24_indexes[i]];\n+#else\n+               tbl_entry = *(const uint32_t\n*)&lpm->tbl24[tbl24_indexes[i]];\n+#endif\n                /* Overwrite output with tbl8 entry if needed */\n-               if (unlikely((next_hops[i] &\nRTE_LPM_VALID_EXT_ENTRY_BITMASK) ==\n-                               RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {\n+               if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK)\n==\n+                       RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {\n\n                        unsigned tbl8_index = (uint8_t)ips[i] +\n-                                       ((uint8_t)next_hops[i] *\n-                                        RTE_LPM_TBL8_GROUP_NUM_ENTRIES);\n+                               ((*(struct rte_lpm_tbl_entry\n*)&tbl_entry).tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);\n\n-                       next_hops[i] = *(const uint16_t\n*)&lpm->tbl8[tbl8_index];\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       tbl_entry = *(const uint64_t\n*)&lpm->tbl8[tbl8_index];\n+#else\n+                       tbl_entry = *(const uint32_t\n*)&lpm->tbl8[tbl8_index];\n+#endif\n                }\n+               res_tbl[i].next_hop     = ((struct rte_lpm_tbl_entry\n*)&tbl_entry)->next_hop;\n+               res_tbl[i].fwd_class    = ((struct rte_lpm_tbl_entry\n*)&tbl_entry)->next_hop;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+               res_tbl[i].as_num       = ((struct rte_lpm_tbl_entry\n*)&tbl_entry)->as_num;\n+#endif\n+               ret |= 1 << i;\n        }\n-       return 0;\n+       return ret;\n }\n\n /* Mask four results. */\n@@ -477,4 +510,4 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip,\nuint16_t hop[4],\n }\n #endif\n\n-#endif /* _RTE_LPM_H_ */\n+#endif /* _RTE_LPM_EXT_H_ */\n\n2015-10-24 9:09 GMT+03:00 Matthew Hall <mhall@mhcomputing.net>:\n\n> On 10/23/15 9:20 AM, Matthew Hall wrote:\n>\n>> On Fri, Oct 23, 2015 at 03:51:48PM +0200, Michal Jastrzebski wrote:\n>>\n>>> From: Michal Kobylinski  <michalx.kobylinski@intel.com>\n>>>\n>>> The current DPDK implementation for LPM for IPv4 and IPv6 limits the\n>>> number of next hops to 256, as the next hop ID is an 8-bit long field.\n>>> Proposed extension increase number of next hops for IPv4 to 2^24 and\n>>> also allows 32-bits read/write operations.\n>>>\n>>> This patchset requires additional change to rte_table library to meet\n>>> ABI compatibility requirements. A v2 will be sent next week.\n>>>\n>>\n>> I also have a patchset for this.\n>>\n>> I will send it out as well so we could compare.\n>>\n>> Matthew.\n>>\n>\n> Sorry about the delay; I only work on DPDK in personal time and not as\n> part of a job. My patchset is attached to this email.\n>\n> One possible advantage with my patchset, compared to others, is that the\n> space problem is fixed in both IPV4 and in IPV6, to prevent asymmetry\n> between these two standards, which is something I try to avoid as much as\n> humanly possible.\n>\n> This is because my application code is green-field, so I absolutely don't\n> want to put any ugly hacks or incompatibilities in this code if I can\n> possibly avoid it.\n>\n> Otherwise, I am not necessarily as expert about rte_lpm as some of the\n> full-time guys, but I think with four or five of us in the thread hammering\n> out patches we will be able to create something amazing together and I am\n> very very very very very happy about this.\n>\n> Matthew.\n>",
    "diff": "diff --git a/config/common_bsdapp b/config/common_bsdapp\nindex b37dcf4..408cc2c 100644\n--- a/config/common_bsdapp\n+++ b/config/common_bsdapp\n@@ -344,6 +344,7 @@ CONFIG_RTE_LIBRTE_JOBSTATS=y\n #\n CONFIG_RTE_LIBRTE_LPM=y\n CONFIG_RTE_LIBRTE_LPM_DEBUG=n\n+CONFIG_RTE_LIBRTE_LPM_ASNUM=n\n\n #\n # Compile librte_acl\ndiff --git a/config/common_linuxapp b/config/common_linuxapp\nindex 0de43d5..1c60e63 100644\n--- a/config/common_linuxapp\n+++ b/config/common_linuxapp\n@@ -352,6 +352,7 @@ CONFIG_RTE_LIBRTE_JOBSTATS=y\n #\n CONFIG_RTE_LIBRTE_LPM=y\n CONFIG_RTE_LIBRTE_LPM_DEBUG=n\n+CONFIG_RTE_LIBRTE_LPM_ASNUM=n\n\n #\n # Compile librte_acl\ndiff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c\nindex 163ba3c..363b400 100644\n--- a/lib/librte_lpm/rte_lpm.c\n+++ b/lib/librte_lpm/rte_lpm.c\n@@ -159,9 +159,11 @@ rte_lpm_create(const char *name, int socket_id, int\nmax_rules,\n\n        lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);\n\n-       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);\n-       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);\n-\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 8);\n+#else\n+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);\n+#endif\n        /* Check user arguments. */\n        if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){\n                rte_errno = EINVAL;\n@@ -261,7 +263,7 @@ rte_lpm_free(struct rte_lpm *lpm)\n  */\n static inline int32_t\n rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n-       uint8_t next_hop)\n+       struct rte_lpm_res *res)\n {\n        uint32_t rule_gindex, rule_index, last_rule;\n        int i;\n@@ -282,8 +284,11 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth,\n\n                        /* If rule already exists update its next_hop and\nreturn. */\n                        if (lpm->rules_tbl[rule_index].ip == ip_masked) {\n-                               lpm->rules_tbl[rule_index].next_hop =\nnext_hop;\n-\n+                               lpm->rules_tbl[rule_index].next_hop =\nres->next_hop;\n+                               lpm->rules_tbl[rule_index].fwd_class =\nres->fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                               lpm->rules_tbl[rule_index].as_num =\nres->as_num;\n+#endif\n                                return rule_index;\n                        }\n                }\n@@ -320,7 +325,11 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth,\n\n        /* Add the new rule. */\n        lpm->rules_tbl[rule_index].ip = ip_masked;\n-       lpm->rules_tbl[rule_index].next_hop = next_hop;\n+       lpm->rules_tbl[rule_index].next_hop = res->next_hop;\n+       lpm->rules_tbl[rule_index].fwd_class = res->fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       lpm->rules_tbl[rule_index].as_num = res->as_num;\n+#endif\n\n        /* Increment the used rules counter for this rule group. */\n        lpm->rule_info[depth - 1].used_rules++;\n@@ -382,10 +391,10 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth)\n  * Find, clean and allocate a tbl8.\n  */\n static inline int32_t\n-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)\n+tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)\n {\n        uint32_t tbl8_gindex; /* tbl8 group index. */\n-       struct rte_lpm_tbl8_entry *tbl8_entry;\n+       struct rte_lpm_tbl_entry *tbl8_entry;\n\n        /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */\n        for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;\n@@ -393,12 +402,12 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)\n                tbl8_entry = &tbl8[tbl8_gindex *\n                                   RTE_LPM_TBL8_GROUP_NUM_ENTRIES];\n                /* If a free tbl8 group is found clean it and set as VALID.\n*/\n-               if (!tbl8_entry->valid_group) {\n+               if (!tbl8_entry->ext_valid) {\n                        memset(&tbl8_entry[0], 0,\n                                        RTE_LPM_TBL8_GROUP_NUM_ENTRIES *\n                                        sizeof(tbl8_entry[0]));\n\n-                       tbl8_entry->valid_group = VALID;\n+                       tbl8_entry->ext_valid = VALID;\n\n                        /* Return group index for allocated tbl8 group. */\n                        return tbl8_gindex;\n@@ -410,46 +419,50 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)\n }\n\n static inline void\n-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)\n+tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)\n {\n        /* Set tbl8 group invalid*/\n-       tbl8[tbl8_group_start].valid_group = INVALID;\n+       tbl8[tbl8_group_start].ext_valid = INVALID;\n }\n\n static inline int32_t\n add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-               uint8_t next_hop)\n+               struct rte_lpm_res *res)\n {\n        uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;\n\n        /* Calculate the index into Table24. */\n        tbl24_index = ip >> 8;\n        tbl24_range = depth_to_range(depth);\n+       struct rte_lpm_tbl_entry new_tbl_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+               .as_num = res->as_num,\n+#endif\n+               .next_hop = res->next_hop,\n+               .fwd_class  = res->fwd_class,\n+               .ext_valid = 0,\n+               .depth = depth,\n+               .valid = VALID,\n+       };\n+\n\n        for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {\n                /*\n                 * For invalid OR valid and non-extended tbl 24 entries set\n                 * entry.\n                 */\n-               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&\n+               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_valid == 0 &&\n                                lpm->tbl24[i].depth <= depth)) {\n\n-                       struct rte_lpm_tbl24_entry new_tbl24_entry = {\n-                               { .next_hop = next_hop, },\n-                               .valid = VALID,\n-                               .ext_entry = 0,\n-                               .depth = depth,\n-                       };\n-\n                        /* Setting tbl24 entry in one go to avoid race\n                         * conditions\n                         */\n-                       lpm->tbl24[i] = new_tbl24_entry;\n+                       lpm->tbl24[i] = new_tbl_entry;\n\n                        continue;\n                }\n\n-               if (lpm->tbl24[i].ext_entry == 1) {\n+               if (lpm->tbl24[i].ext_valid == 1) {\n                        /* If tbl24 entry is valid and extended calculate\nthe\n                         *  index into tbl8.\n                         */\n@@ -461,19 +474,14 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip,\nuint8_t depth,\n                        for (j = tbl8_index; j < tbl8_group_end; j++) {\n                                if (!lpm->tbl8[j].valid ||\n                                                lpm->tbl8[j].depth <=\ndepth) {\n-                                       struct rte_lpm_tbl8_entry\n-                                               new_tbl8_entry = {\n-                                               .valid = VALID,\n-                                               .valid_group = VALID,\n-                                               .depth = depth,\n-                                               .next_hop = next_hop,\n-                                       };\n+\n+                                       new_tbl_entry.ext_valid = VALID;\n\n                                        /*\n                                         * Setting tbl8 entry in one go to\navoid\n                                         * race conditions\n                                         */\n-                                       lpm->tbl8[j] = new_tbl8_entry;\n+                                       lpm->tbl8[j] = new_tbl_entry;\n\n                                        continue;\n                                }\n@@ -486,7 +494,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip,\nuint8_t depth,\n\n static inline int32_t\n add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n-               uint8_t next_hop)\n+               struct rte_lpm_res *res)\n {\n        uint32_t tbl24_index;\n        int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end,\ntbl8_index,\n@@ -512,7 +520,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth,\n                /* Set tbl8 entry. */\n                for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {\n                        lpm->tbl8[i].depth = depth;\n-                       lpm->tbl8[i].next_hop = next_hop;\n+                       lpm->tbl8[i].next_hop = res->next_hop;\n+                       lpm->tbl8[i].fwd_class = res->fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       lpm->tbl8[i].as_num = res->as_num;\n+#endif\n                        lpm->tbl8[i].valid = VALID;\n                }\n\n@@ -522,17 +534,17 @@ add_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked, uint8_t depth,\n                 * so assign whole structure in one go\n                 */\n\n-               struct rte_lpm_tbl24_entry new_tbl24_entry = {\n-                       { .tbl8_gindex = (uint8_t)tbl8_group_index, },\n-                       .valid = VALID,\n-                       .ext_entry = 1,\n+               struct rte_lpm_tbl_entry new_tbl24_entry = {\n+                       .tbl8_gindex = (uint16_t)tbl8_group_index,\n                        .depth = 0,\n+                       .ext_valid = 1,\n+                       .valid = VALID,\n                };\n\n                lpm->tbl24[tbl24_index] = new_tbl24_entry;\n\n        }/* If valid entry but not extended calculate the index into\nTable8. */\n-       else if (lpm->tbl24[tbl24_index].ext_entry == 0) {\n+       else if (lpm->tbl24[tbl24_index].ext_valid == 0) {\n                /* Search for free tbl8 group. */\n                tbl8_group_index = tbl8_alloc(lpm->tbl8);\n\n@@ -551,6 +563,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth,\n                        lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;\n                        lpm->tbl8[i].next_hop =\n                                        lpm->tbl24[tbl24_index].next_hop;\n+                       lpm->tbl8[i].fwd_class =\n+                                       lpm->tbl24[tbl24_index].fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       lpm->tbl8[i].as_num =\nlpm->tbl24[tbl24_index].as_num;\n+#endif\n                }\n\n                tbl8_index = tbl8_group_start + (ip_masked & 0xFF);\n@@ -561,7 +578,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,\nuint8_t depth,\n                                        lpm->tbl8[i].depth <= depth) {\n                                lpm->tbl8[i].valid = VALID;\n                                lpm->tbl8[i].depth = depth;\n-                               lpm->tbl8[i].next_hop = next_hop;\n+                               lpm->tbl8[i].next_hop = res->next_hop;\n+                               lpm->tbl8[i].fwd_class = res->fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                               lpm->tbl8[i].as_num = res->as_num;\n+#endif\n\n                                continue;\n                        }\n@@ -573,11 +594,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked, uint8_t depth,\n                 * so assign whole structure in one go.\n                 */\n\n-               struct rte_lpm_tbl24_entry new_tbl24_entry = {\n-                               { .tbl8_gindex = (uint8_t)tbl8_group_index,\n},\n-                               .valid = VALID,\n-                               .ext_entry = 1,\n+               struct rte_lpm_tbl_entry new_tbl24_entry = {\n+                               .tbl8_gindex = (uint16_t)tbl8_group_index,\n                                .depth = 0,\n+                               .ext_valid = 1,\n+                               .valid = VALID,\n                };\n\n                lpm->tbl24[tbl24_index] = new_tbl24_entry;\n@@ -595,11 +616,15 @@ add_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked, uint8_t depth,\n\n                        if (!lpm->tbl8[i].valid ||\n                                        lpm->tbl8[i].depth <= depth) {\n-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {\n-                                       .valid = VALID,\n+                               struct rte_lpm_tbl_entry new_tbl8_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                                       .as_num = res->as_num,\n+#endif\n+                                       .next_hop = res->next_hop,\n+                                       .fwd_class = res->fwd_class,\n                                        .depth = depth,\n-                                       .next_hop = next_hop,\n-                                       .valid_group =\nlpm->tbl8[i].valid_group,\n+                                       .ext_valid = lpm->tbl8[i].ext_valid,\n+                                       .valid = VALID,\n                                };\n\n                                /*\n@@ -621,19 +646,19 @@ add_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked, uint8_t depth,\n  */\n int\n rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-               uint8_t next_hop)\n+               struct rte_lpm_res *res)\n {\n        int32_t rule_index, status = 0;\n        uint32_t ip_masked;\n\n        /* Check user arguments. */\n-       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))\n+       if ((lpm == NULL) || (res == NULL) || (depth < 1) || (depth >\nRTE_LPM_MAX_DEPTH))\n                return -EINVAL;\n\n        ip_masked = ip & depth_to_mask(depth);\n\n        /* Add the rule to the rule table. */\n-       rule_index = rule_add(lpm, ip_masked, depth, next_hop);\n+       rule_index = rule_add(lpm, ip_masked, depth, res);\n\n        /* If the is no space available for new rule return error. */\n        if (rule_index < 0) {\n@@ -641,10 +666,10 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t\ndepth,\n        }\n\n        if (depth <= MAX_DEPTH_TBL24) {\n-               status = add_depth_small(lpm, ip_masked, depth, next_hop);\n+               status = add_depth_small(lpm, ip_masked, depth, res);\n        }\n        else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */\n-               status = add_depth_big(lpm, ip_masked, depth, next_hop);\n+               status = add_depth_big(lpm, ip_masked, depth, res);\n\n                /*\n                 * If add fails due to exhaustion of tbl8 extensions delete\n@@ -665,14 +690,14 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t\ndepth,\n  */\n int\n rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-uint8_t *next_hop)\n+                       struct rte_lpm_res *res)\n {\n        uint32_t ip_masked;\n        int32_t rule_index;\n\n        /* Check user arguments. */\n        if ((lpm == NULL) ||\n-               (next_hop == NULL) ||\n+               (res == NULL) ||\n                (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))\n                return -EINVAL;\n\n@@ -681,7 +706,11 @@ uint8_t *next_hop)\n        rule_index = rule_find(lpm, ip_masked, depth);\n\n        if (rule_index >= 0) {\n-               *next_hop = lpm->rules_tbl[rule_index].next_hop;\n+               res->next_hop = lpm->rules_tbl[rule_index].next_hop;\n+               res->fwd_class = lpm->rules_tbl[rule_index].fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+               res->as_num = lpm->rules_tbl[rule_index].as_num;\n+#endif\n                return 1;\n        }\n\n@@ -731,7 +760,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t\nip_masked,\n                 */\n                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++)\n{\n\n-                       if (lpm->tbl24[i].ext_entry == 0 &&\n+                       if (lpm->tbl24[i].ext_valid == 0 &&\n                                        lpm->tbl24[i].depth <= depth ) {\n                                lpm->tbl24[i].valid = INVALID;\n                        }\n@@ -761,23 +790,30 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t\nip_masked,\n                 * associated with this rule.\n                 */\n\n-               struct rte_lpm_tbl24_entry new_tbl24_entry = {\n-                       {.next_hop =\nlpm->rules_tbl[sub_rule_index].next_hop,},\n-                       .valid = VALID,\n-                       .ext_entry = 0,\n+               struct rte_lpm_tbl_entry new_tbl24_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       .as_num = lpm->rules_tbl[sub_rule_index].as_num,\n+#endif\n+                       .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,\n+                       .fwd_class =\nlpm->rules_tbl[sub_rule_index].fwd_class,\n                        .depth = sub_rule_depth,\n+                       .ext_valid = 0,\n+                       .valid = VALID,\n                };\n\n-               struct rte_lpm_tbl8_entry new_tbl8_entry = {\n-                       .valid = VALID,\n+               struct rte_lpm_tbl_entry new_tbl8_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       .as_num = lpm->rules_tbl[sub_rule_index].as_num,\n+#endif\n+                       .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,\n+                       .fwd_class =\nlpm->rules_tbl[sub_rule_index].fwd_class,\n                        .depth = sub_rule_depth,\n-                       .next_hop = lpm->rules_tbl\n-                       [sub_rule_index].next_hop,\n+                       .valid = VALID,\n                };\n\n                for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++)\n{\n\n-                       if (lpm->tbl24[i].ext_entry == 0 &&\n+                       if (lpm->tbl24[i].ext_valid == 0 &&\n                                        lpm->tbl24[i].depth <= depth ) {\n                                lpm->tbl24[i] = new_tbl24_entry;\n                        }\n@@ -814,7 +850,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t\nip_masked,\n  * thus can be recycled\n  */\n static inline int32_t\n-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t\ntbl8_group_start)\n+tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t\ntbl8_group_start)\n {\n        uint32_t tbl8_group_end, i;\n        tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n@@ -891,11 +927,15 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked,\n        }\n        else {\n                /* Set new tbl8 entry. */\n-               struct rte_lpm_tbl8_entry new_tbl8_entry = {\n-                       .valid = VALID,\n-                       .depth = sub_rule_depth,\n-                       .valid_group =\nlpm->tbl8[tbl8_group_start].valid_group,\n+               struct rte_lpm_tbl_entry new_tbl8_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       .as_num = lpm->rules_tbl[sub_rule_index].as_num,\n+#endif\n+                       .fwd_class =\nlpm->rules_tbl[sub_rule_index].fwd_class,\n                        .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,\n+                       .depth = sub_rule_depth,\n+                       .ext_valid = lpm->tbl8[tbl8_group_start].ext_valid,\n+                       .valid = VALID,\n                };\n\n                /*\n@@ -923,11 +963,15 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t\nip_masked,\n        }\n        else if (tbl8_recycle_index > -1) {\n                /* Update tbl24 entry. */\n-               struct rte_lpm_tbl24_entry new_tbl24_entry = {\n-                       { .next_hop =\nlpm->tbl8[tbl8_recycle_index].next_hop, },\n-                       .valid = VALID,\n-                       .ext_entry = 0,\n+               struct rte_lpm_tbl_entry new_tbl24_entry = {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+                       .as_num = lpm->tbl8[tbl8_recycle_index].as_num,\n+#endif\n+                       .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,\n+                       .fwd_class =\nlpm->tbl8[tbl8_recycle_index].fwd_class,\n                        .depth = lpm->tbl8[tbl8_recycle_index].depth,\n+                       .ext_valid = 0,\n+                       .valid = VALID,\n                };\n\n                /* Set tbl24 before freeing tbl8 to avoid race condition. */\ndiff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h\nindex c299ce2..7c615bc 100644\n--- a/lib/librte_lpm/rte_lpm.h\n+++ b/lib/librte_lpm/rte_lpm.h\n@@ -31,8 +31,8 @@\n  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n  */\n\n-#ifndef _RTE_LPM_H_\n-#define _RTE_LPM_H_\n+#ifndef _RTE_LPM_EXT_H_\n+#define _RTE_LPM_EXT_H_\n\n /**\n  * @file\n@@ -81,57 +81,58 @@ extern \"C\" {\n #define RTE_LPM_RETURN_IF_TRUE(cond, retval)\n #endif\n\n-/** @internal bitmask with valid and ext_entry/valid_group fields set */\n-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300\n+/** @internal bitmask with valid and ext_valid/ext_valid fields set */\n+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03\n\n /** Bitmask used to indicate successful lookup */\n-#define RTE_LPM_LOOKUP_SUCCESS          0x0100\n+#define RTE_LPM_LOOKUP_SUCCESS          0x01\n+\n+struct rte_lpm_res {\n+       uint16_t        next_hop;\n+       uint8_t         fwd_class;\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint32_t        as_num;\n+#endif\n+};\n\n #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-/** @internal Tbl24 entry structure. */\n-struct rte_lpm_tbl24_entry {\n-       /* Stores Next hop or group index (i.e. gindex)into tbl8. */\n+struct rte_lpm_tbl_entry {\n+       uint8_t valid           :1;\n+       uint8_t ext_valid       :1;\n+       uint8_t depth           :6;\n+       uint8_t fwd_class;\n        union {\n-               uint8_t next_hop;\n-               uint8_t tbl8_gindex;\n+               uint16_t next_hop;\n+               uint16_t tbl8_gindex;\n        };\n-       /* Using single uint8_t to store 3 values. */\n-       uint8_t valid     :1; /**< Validation flag. */\n-       uint8_t ext_entry :1; /**< External entry. */\n-       uint8_t depth     :6; /**< Rule depth. */\n-};\n-\n-/** @internal Tbl8 entry structure. */\n-struct rte_lpm_tbl8_entry {\n-       uint8_t next_hop; /**< next hop. */\n-       /* Using single uint8_t to store 3 values. */\n-       uint8_t valid       :1; /**< Validation flag. */\n-       uint8_t valid_group :1; /**< Group validation flag. */\n-       uint8_t depth       :6; /**< Rule depth. */\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint32_t as_num;\n+#endif\n };\n #else\n-struct rte_lpm_tbl24_entry {\n-       uint8_t depth       :6;\n-       uint8_t ext_entry   :1;\n-       uint8_t valid       :1;\n+struct rte_lpm_tbl_entry {\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint32_t as_num;\n+#endif\n        union {\n-               uint8_t tbl8_gindex;\n-               uint8_t next_hop;\n+               uint16_t tbl8_gindex;\n+               uint16_t next_hop;\n        };\n-};\n-\n-struct rte_lpm_tbl8_entry {\n-       uint8_t depth       :6;\n-       uint8_t valid_group :1;\n-       uint8_t valid       :1;\n-       uint8_t next_hop;\n+       uint8_t fwd_class;\n+       uint8_t depth           :6;\n+       uint8_t ext_valid       :1;\n+       uint8_t valid           :1;\n };\n #endif\n\n /** @internal Rule structure. */\n struct rte_lpm_rule {\n        uint32_t ip; /**< Rule IP address. */\n-       uint8_t  next_hop; /**< Rule next hop. */\n+#ifdef RTE_LIBRTE_LPM_ASNUM\n+       uint32_t as_num;\n+#endif\n+       uint16_t  next_hop; /**< Rule next hop. */\n+       uint8_t fwd_class;\n };\n\n /** @internal Contains metadata about the rules table. */\n@@ -148,9 +149,9 @@ struct rte_lpm {\n        struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule\ninfo table. */\n\n        /* LPM Tables. */\n-       struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \\\n+       struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \\\n                        __rte_cache_aligned; /**< LPM tbl24 table. */\n-       struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \\\n+       struct rte_lpm_tbl_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \\\n                        __rte_cache_aligned; /**< LPM tbl8 table. */\n        struct rte_lpm_rule rules_tbl[0] \\\n",
    "prefixes": [
        "dpdk-dev",
        "v1",
        "0/3"
    ]
}