get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/63171/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 63171,
    "url": "http://patches.dpdk.org/api/patches/63171/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/0d4028269b34b303e538c02a7b0d62fdbb3b1dec.1574270323.git.anatoly.burakov@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<0d4028269b34b303e538c02a7b0d62fdbb3b1dec.1574270323.git.anatoly.burakov@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/0d4028269b34b303e538c02a7b0d62fdbb3b1dec.1574270323.git.anatoly.burakov@intel.com",
    "date": "2019-11-20T17:23:34",
    "name": "[v8,07/12] lpm: remove deprecated code",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8b3577fb0fd33a57496f0c2dbcb924040059facb",
    "submitter": {
        "id": 4,
        "url": "http://patches.dpdk.org/api/people/4/?format=api",
        "name": "Anatoly Burakov",
        "email": "anatoly.burakov@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/0d4028269b34b303e538c02a7b0d62fdbb3b1dec.1574270323.git.anatoly.burakov@intel.com/mbox/",
    "series": [
        {
            "id": 7549,
            "url": "http://patches.dpdk.org/api/series/7549/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=7549",
            "date": "2019-11-20T17:23:27",
            "name": "Implement the new ABI policy and add helper scripts",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/7549/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/63171/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/63171/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7377DA04C1;\n\tWed, 20 Nov 2019 18:24:49 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 76E0037B4;\n\tWed, 20 Nov 2019 18:24:29 +0100 (CET)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by dpdk.org (Postfix) with ESMTP id 806D71BE0C\n for <dev@dpdk.org>; Wed, 20 Nov 2019 18:24:25 +0100 (CET)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 20 Nov 2019 09:24:24 -0800",
            "from silpixa00399498.ir.intel.com (HELO\n silpixa00399498.ger.corp.intel.com) ([10.237.223.151])\n by orsmga008.jf.intel.com with ESMTP; 20 Nov 2019 09:24:21 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.69,222,1571727600\"; d=\"scan'208\";a=\"200794183\"",
        "From": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Marcin Baran <marcinx.baran@intel.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Vladimir Medvedkin <vladimir.medvedkin@intel.com>, john.mcnamara@intel.com,\n ray.kinsella@intel.com, thomas@monjalon.net, david.marchand@redhat.com",
        "Date": "Wed, 20 Nov 2019 17:23:34 +0000",
        "Message-Id": "\n <0d4028269b34b303e538c02a7b0d62fdbb3b1dec.1574270323.git.anatoly.burakov@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": [
            "<cover.1574270323.git.anatoly.burakov@intel.com>",
            "<cover.1574270323.git.anatoly.burakov@intel.com>"
        ],
        "References": [
            "<cover.1574270323.git.anatoly.burakov@intel.com>",
            "<cover.1573230233.git.anatoly.burakov@intel.com>\n <cover.1574270323.git.anatoly.burakov@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v8 07/12] lpm: remove deprecated code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Marcin Baran <marcinx.baran@intel.com>\n\nRemove code for old ABI versions ahead of ABI version bump.\n\nSigned-off-by: Marcin Baran <marcinx.baran@intel.com>\nSigned-off-by: Anatoly Burakov <anatoly.burakov@intel.com>\nAcked-by: Bruce Richardson <bruce.richardson@intel.com>\n---\n\nNotes:\n    v2:\n    - Moved this to before ABI version bump to avoid compile breakage\n\n lib/librte_lpm/rte_lpm.c           | 1010 ++--------------------------\n lib/librte_lpm/rte_lpm.h           |   88 ---\n lib/librte_lpm/rte_lpm6.c          |  140 +---\n lib/librte_lpm/rte_lpm6.h          |   25 -\n lib/librte_lpm/rte_lpm_version.map |   11 -\n 5 files changed, 59 insertions(+), 1215 deletions(-)",
    "diff": "diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c\nindex 106916dc82..b78c487447 100644\n--- a/lib/librte_lpm/rte_lpm.c\n+++ b/lib/librte_lpm/rte_lpm.c\n@@ -90,34 +90,8 @@ depth_to_range(uint8_t depth)\n /*\n  * Find an existing lpm table and return a pointer to it.\n  */\n-struct rte_lpm_v20 * __vsym\n-rte_lpm_find_existing_v20(const char *name)\n-{\n-\tstruct rte_lpm_v20 *l = NULL;\n-\tstruct rte_tailq_entry *te;\n-\tstruct rte_lpm_list *lpm_list;\n-\n-\tlpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);\n-\n-\trte_mcfg_tailq_read_lock();\n-\tTAILQ_FOREACH(te, lpm_list, next) {\n-\t\tl = te->data;\n-\t\tif (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)\n-\t\t\tbreak;\n-\t}\n-\trte_mcfg_tailq_read_unlock();\n-\n-\tif (te == NULL) {\n-\t\trte_errno = ENOENT;\n-\t\treturn NULL;\n-\t}\n-\n-\treturn l;\n-}\n-VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);\n-\n-struct rte_lpm * __vsym\n-rte_lpm_find_existing_v1604(const char *name)\n+struct rte_lpm *\n+rte_lpm_find_existing(const char *name)\n {\n \tstruct rte_lpm *l = NULL;\n \tstruct rte_tailq_entry *te;\n@@ -140,88 +114,12 @@ rte_lpm_find_existing_v1604(const char *name)\n \n \treturn l;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),\n-\t\trte_lpm_find_existing_v1604);\n \n /*\n  * Allocates memory for LPM object\n  */\n-struct rte_lpm_v20 * __vsym\n-rte_lpm_create_v20(const char *name, int socket_id, int max_rules,\n-\t\t__rte_unused int flags)\n-{\n-\tchar mem_name[RTE_LPM_NAMESIZE];\n-\tstruct rte_lpm_v20 *lpm = NULL;\n-\tstruct rte_tailq_entry *te;\n-\tuint32_t mem_size;\n-\tstruct rte_lpm_list *lpm_list;\n-\n-\tlpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);\n-\n-\tRTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);\n-\n-\t/* Check user arguments. */\n-\tif ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {\n-\t\trte_errno = EINVAL;\n-\t\treturn NULL;\n-\t}\n-\n-\tsnprintf(mem_name, sizeof(mem_name), \"LPM_%s\", name);\n-\n-\t/* Determine the amount of memory to allocate. */\n-\tmem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);\n-\n-\trte_mcfg_tailq_write_lock();\n-\n-\t/* guarantee there's no existing */\n-\tTAILQ_FOREACH(te, lpm_list, next) {\n-\t\tlpm = te->data;\n-\t\tif (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)\n-\t\t\tbreak;\n-\t}\n-\n-\tif (te != NULL) {\n-\t\tlpm = NULL;\n-\t\trte_errno = EEXIST;\n-\t\tgoto exit;\n-\t}\n-\n-\t/* allocate tailq entry */\n-\tte = rte_zmalloc(\"LPM_TAILQ_ENTRY\", sizeof(*te), 0);\n-\tif (te == NULL) {\n-\t\tRTE_LOG(ERR, LPM, \"Failed to allocate tailq entry\\n\");\n-\t\trte_errno = ENOMEM;\n-\t\tgoto exit;\n-\t}\n-\n-\t/* Allocate memory to store the LPM data structures. */\n-\tlpm = rte_zmalloc_socket(mem_name, mem_size,\n-\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n-\tif (lpm == NULL) {\n-\t\tRTE_LOG(ERR, LPM, \"LPM memory allocation failed\\n\");\n-\t\trte_free(te);\n-\t\trte_errno = ENOMEM;\n-\t\tgoto exit;\n-\t}\n-\n-\t/* Save user arguments. */\n-\tlpm->max_rules = max_rules;\n-\tstrlcpy(lpm->name, name, sizeof(lpm->name));\n-\n-\tte->data = lpm;\n-\n-\tTAILQ_INSERT_TAIL(lpm_list, te, next);\n-\n-exit:\n-\trte_mcfg_tailq_write_unlock();\n-\n-\treturn lpm;\n-}\n-VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);\n-\n-struct rte_lpm * __vsym\n-rte_lpm_create_v1604(const char *name, int socket_id,\n+struct rte_lpm *\n+rte_lpm_create(const char *name, int socket_id,\n \t\tconst struct rte_lpm_config *config)\n {\n \tchar mem_name[RTE_LPM_NAMESIZE];\n@@ -321,45 +219,12 @@ rte_lpm_create_v1604(const char *name, int socket_id,\n \n \treturn lpm;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(\n-\tstruct rte_lpm *rte_lpm_create(const char *name, int socket_id,\n-\t\t\tconst struct rte_lpm_config *config), rte_lpm_create_v1604);\n \n /*\n  * Deallocates memory for given LPM table.\n  */\n-void __vsym\n-rte_lpm_free_v20(struct rte_lpm_v20 *lpm)\n-{\n-\tstruct rte_lpm_list *lpm_list;\n-\tstruct rte_tailq_entry *te;\n-\n-\t/* Check user arguments. */\n-\tif (lpm == NULL)\n-\t\treturn;\n-\n-\tlpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);\n-\n-\trte_mcfg_tailq_write_lock();\n-\n-\t/* find our tailq entry */\n-\tTAILQ_FOREACH(te, lpm_list, next) {\n-\t\tif (te->data == (void *) lpm)\n-\t\t\tbreak;\n-\t}\n-\tif (te != NULL)\n-\t\tTAILQ_REMOVE(lpm_list, te, next);\n-\n-\trte_mcfg_tailq_write_unlock();\n-\n-\trte_free(lpm);\n-\trte_free(te);\n-}\n-VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);\n-\n-void __vsym\n-rte_lpm_free_v1604(struct rte_lpm *lpm)\n+void\n+rte_lpm_free(struct rte_lpm *lpm)\n {\n \tstruct rte_lpm_list *lpm_list;\n \tstruct rte_tailq_entry *te;\n@@ -387,9 +252,6 @@ rte_lpm_free_v1604(struct rte_lpm *lpm)\n \trte_free(lpm);\n \trte_free(te);\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),\n-\t\trte_lpm_free_v1604);\n \n /*\n  * Adds a rule to the rule table.\n@@ -402,79 +264,7 @@ MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),\n  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.\n  */\n static int32_t\n-rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,\n-\tuint8_t next_hop)\n-{\n-\tuint32_t rule_gindex, rule_index, last_rule;\n-\tint i;\n-\n-\tVERIFY_DEPTH(depth);\n-\n-\t/* Scan through rule group to see if rule already exists. */\n-\tif (lpm->rule_info[depth - 1].used_rules > 0) {\n-\n-\t\t/* rule_gindex stands for rule group index. */\n-\t\trule_gindex = lpm->rule_info[depth - 1].first_rule;\n-\t\t/* Initialise rule_index to point to start of rule group. */\n-\t\trule_index = rule_gindex;\n-\t\t/* Last rule = Last used rule in this rule group. */\n-\t\tlast_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;\n-\n-\t\tfor (; rule_index < last_rule; rule_index++) {\n-\n-\t\t\t/* If rule already exists update its next_hop and return. */\n-\t\t\tif (lpm->rules_tbl[rule_index].ip == ip_masked) {\n-\t\t\t\tlpm->rules_tbl[rule_index].next_hop = next_hop;\n-\n-\t\t\t\treturn rule_index;\n-\t\t\t}\n-\t\t}\n-\n-\t\tif (rule_index == lpm->max_rules)\n-\t\t\treturn -ENOSPC;\n-\t} else {\n-\t\t/* Calculate the position in which the rule will be stored. */\n-\t\trule_index = 0;\n-\n-\t\tfor (i = depth - 1; i > 0; i--) {\n-\t\t\tif (lpm->rule_info[i - 1].used_rules > 0) {\n-\t\t\t\trule_index = lpm->rule_info[i - 1].first_rule\n-\t\t\t\t\t\t+ lpm->rule_info[i - 1].used_rules;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (rule_index == lpm->max_rules)\n-\t\t\treturn -ENOSPC;\n-\n-\t\tlpm->rule_info[depth - 1].first_rule = rule_index;\n-\t}\n-\n-\t/* Make room for the new rule in the array. */\n-\tfor (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {\n-\t\tif (lpm->rule_info[i - 1].first_rule\n-\t\t\t\t+ lpm->rule_info[i - 1].used_rules == lpm->max_rules)\n-\t\t\treturn -ENOSPC;\n-\n-\t\tif (lpm->rule_info[i - 1].used_rules > 0) {\n-\t\t\tlpm->rules_tbl[lpm->rule_info[i - 1].first_rule\n-\t\t\t\t+ lpm->rule_info[i - 1].used_rules]\n-\t\t\t\t\t= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];\n-\t\t\tlpm->rule_info[i - 1].first_rule++;\n-\t\t}\n-\t}\n-\n-\t/* Add the new rule. */\n-\tlpm->rules_tbl[rule_index].ip = ip_masked;\n-\tlpm->rules_tbl[rule_index].next_hop = next_hop;\n-\n-\t/* Increment the used rules counter for this rule group. */\n-\tlpm->rule_info[depth - 1].used_rules++;\n-\n-\treturn rule_index;\n-}\n-\n-static int32_t\n-rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n+rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n \tuint32_t next_hop)\n {\n \tuint32_t rule_gindex, rule_index, last_rule;\n@@ -550,30 +340,7 @@ rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.\n  */\n static void\n-rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)\n-{\n-\tint i;\n-\n-\tVERIFY_DEPTH(depth);\n-\n-\tlpm->rules_tbl[rule_index] =\n-\t\t\tlpm->rules_tbl[lpm->rule_info[depth - 1].first_rule\n-\t\t\t\t+ lpm->rule_info[depth - 1].used_rules - 1];\n-\n-\tfor (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {\n-\t\tif (lpm->rule_info[i].used_rules > 0) {\n-\t\t\tlpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =\n-\t\t\t\tlpm->rules_tbl[lpm->rule_info[i].first_rule\n-\t\t\t\t\t+ lpm->rule_info[i].used_rules - 1];\n-\t\t\tlpm->rule_info[i].first_rule--;\n-\t\t}\n-\t}\n-\n-\tlpm->rule_info[depth - 1].used_rules--;\n-}\n-\n-static void\n-rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)\n+rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)\n {\n \tint i;\n \n@@ -600,28 +367,7 @@ rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)\n  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.\n  */\n static int32_t\n-rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)\n-{\n-\tuint32_t rule_gindex, last_rule, rule_index;\n-\n-\tVERIFY_DEPTH(depth);\n-\n-\trule_gindex = lpm->rule_info[depth - 1].first_rule;\n-\tlast_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;\n-\n-\t/* Scan used rules at given depth to find rule. */\n-\tfor (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {\n-\t\t/* If rule is found return the rule index. */\n-\t\tif (lpm->rules_tbl[rule_index].ip == ip_masked)\n-\t\t\treturn rule_index;\n-\t}\n-\n-\t/* If rule is not found return -EINVAL. */\n-\treturn -EINVAL;\n-}\n-\n-static int32_t\n-rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)\n+rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)\n {\n \tuint32_t rule_gindex, last_rule, rule_index;\n \n@@ -645,42 +391,7 @@ rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)\n  * Find, clean and allocate a tbl8.\n  */\n static int32_t\n-tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)\n-{\n-\tuint32_t group_idx; /* tbl8 group index. */\n-\tstruct rte_lpm_tbl_entry_v20 *tbl8_entry;\n-\n-\t/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */\n-\tfor (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;\n-\t\t\tgroup_idx++) {\n-\t\ttbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];\n-\t\t/* If a free tbl8 group is found clean it and set as VALID. */\n-\t\tif (!tbl8_entry->valid_group) {\n-\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t\t.valid = INVALID,\n-\t\t\t\t.depth = 0,\n-\t\t\t\t.valid_group = VALID,\n-\t\t\t};\n-\t\t\tnew_tbl8_entry.next_hop = 0;\n-\n-\t\t\tmemset(&tbl8_entry[0], 0,\n-\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES *\n-\t\t\t\t\tsizeof(tbl8_entry[0]));\n-\n-\t\t\t__atomic_store(tbl8_entry, &new_tbl8_entry,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n-\n-\t\t\t/* Return group index for allocated tbl8 group. */\n-\t\t\treturn group_idx;\n-\t\t}\n-\t}\n-\n-\t/* If there are no tbl8 groups free then return error. */\n-\treturn -ENOSPC;\n-}\n-\n-static int32_t\n-tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)\n+tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)\n {\n \tuint32_t group_idx; /* tbl8 group index. */\n \tstruct rte_lpm_tbl_entry *tbl8_entry;\n@@ -714,22 +425,7 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)\n }\n \n static void\n-tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)\n-{\n-\t/* Set tbl8 group invalid*/\n-\tstruct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {\n-\t\t.valid = INVALID,\n-\t\t.depth = 0,\n-\t\t.valid_group = INVALID,\n-\t};\n-\tzero_tbl8_entry.next_hop = 0;\n-\n-\t__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,\n-\t\t\t__ATOMIC_RELAXED);\n-}\n-\n-static void\n-tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)\n+tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)\n {\n \t/* Set tbl8 group invalid*/\n \tstruct rte_lpm_tbl_entry zero_tbl8_entry = {0};\n@@ -739,78 +435,7 @@ tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)\n }\n \n static __rte_noinline int32_t\n-add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n-\t\tuint8_t next_hop)\n-{\n-\tuint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;\n-\n-\t/* Calculate the index into Table24. */\n-\ttbl24_index = ip >> 8;\n-\ttbl24_range = depth_to_range(depth);\n-\n-\tfor (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {\n-\t\t/*\n-\t\t * For invalid OR valid and non-extended tbl 24 entries set\n-\t\t * entry.\n-\t\t */\n-\t\tif (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&\n-\t\t\t\tlpm->tbl24[i].depth <= depth)) {\n-\n-\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl24_entry = {\n-\t\t\t\t.valid = VALID,\n-\t\t\t\t.valid_group = 0,\n-\t\t\t\t.depth = depth,\n-\t\t\t};\n-\t\t\tnew_tbl24_entry.next_hop = next_hop;\n-\n-\t\t\t/* Setting tbl24 entry in one go to avoid race\n-\t\t\t * conditions\n-\t\t\t */\n-\t\t\t__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,\n-\t\t\t\t\t__ATOMIC_RELEASE);\n-\n-\t\t\tcontinue;\n-\t\t}\n-\n-\t\tif (lpm->tbl24[i].valid_group == 1) {\n-\t\t\t/* If tbl24 entry is valid and extended calculate the\n-\t\t\t *  index into tbl8.\n-\t\t\t */\n-\t\t\ttbl8_index = lpm->tbl24[i].group_idx *\n-\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\t\t\ttbl8_group_end = tbl8_index +\n-\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\n-\t\t\tfor (j = tbl8_index; j < tbl8_group_end; j++) {\n-\t\t\t\tif (!lpm->tbl8[j].valid ||\n-\t\t\t\t\t\tlpm->tbl8[j].depth <= depth) {\n-\t\t\t\t\tstruct rte_lpm_tbl_entry_v20\n-\t\t\t\t\t\tnew_tbl8_entry = {\n-\t\t\t\t\t\t.valid = VALID,\n-\t\t\t\t\t\t.valid_group = VALID,\n-\t\t\t\t\t\t.depth = depth,\n-\t\t\t\t\t};\n-\t\t\t\t\tnew_tbl8_entry.next_hop = next_hop;\n-\n-\t\t\t\t\t/*\n-\t\t\t\t\t * Setting tbl8 entry in one go to avoid\n-\t\t\t\t\t * race conditions\n-\t\t\t\t\t */\n-\t\t\t\t\t__atomic_store(&lpm->tbl8[j],\n-\t\t\t\t\t\t&new_tbl8_entry,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n-\n-\t\t\t\t\tcontinue;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static __rte_noinline int32_t\n-add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n+add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \t\tuint32_t next_hop)\n {\n #define group_idx next_hop\n@@ -882,150 +507,7 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n }\n \n static __rte_noinline int32_t\n-add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,\n-\t\tuint8_t next_hop)\n-{\n-\tuint32_t tbl24_index;\n-\tint32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,\n-\t\ttbl8_range, i;\n-\n-\ttbl24_index = (ip_masked >> 8);\n-\ttbl8_range = depth_to_range(depth);\n-\n-\tif (!lpm->tbl24[tbl24_index].valid) {\n-\t\t/* Search for a free tbl8 group. */\n-\t\ttbl8_group_index = tbl8_alloc_v20(lpm->tbl8);\n-\n-\t\t/* Check tbl8 allocation was successful. */\n-\t\tif (tbl8_group_index < 0) {\n-\t\t\treturn tbl8_group_index;\n-\t\t}\n-\n-\t\t/* Find index into tbl8 and range. */\n-\t\ttbl8_index = (tbl8_group_index *\n-\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES) +\n-\t\t\t\t(ip_masked & 0xFF);\n-\n-\t\t/* Set tbl8 entry. */\n-\t\tfor (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {\n-\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t\t.valid = VALID,\n-\t\t\t\t.depth = depth,\n-\t\t\t\t.valid_group = lpm->tbl8[i].valid_group,\n-\t\t\t};\n-\t\t\tnew_tbl8_entry.next_hop = next_hop;\n-\t\t\t__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n-\t\t}\n-\n-\t\t/*\n-\t\t * Update tbl24 entry to point to new tbl8 entry. Note: The\n-\t\t * ext_flag and tbl8_index need to be updated simultaneously,\n-\t\t * so assign whole structure in one go\n-\t\t */\n-\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl24_entry = {\n-\t\t\t.group_idx = (uint8_t)tbl8_group_index,\n-\t\t\t.valid = VALID,\n-\t\t\t.valid_group = 1,\n-\t\t\t.depth = 0,\n-\t\t};\n-\n-\t\t__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,\n-\t\t\t\t__ATOMIC_RELEASE);\n-\n-\t} /* If valid entry but not extended calculate the index into Table8. */\n-\telse if (lpm->tbl24[tbl24_index].valid_group == 0) {\n-\t\t/* Search for free tbl8 group. */\n-\t\ttbl8_group_index = tbl8_alloc_v20(lpm->tbl8);\n-\n-\t\tif (tbl8_group_index < 0) {\n-\t\t\treturn tbl8_group_index;\n-\t\t}\n-\n-\t\ttbl8_group_start = tbl8_group_index *\n-\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\t\ttbl8_group_end = tbl8_group_start +\n-\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\n-\t\t/* Populate new tbl8 with tbl24 value. */\n-\t\tfor (i = tbl8_group_start; i < tbl8_group_end; i++) {\n-\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t\t.valid = VALID,\n-\t\t\t\t.depth = lpm->tbl24[tbl24_index].depth,\n-\t\t\t\t.valid_group = lpm->tbl8[i].valid_group,\n-\t\t\t};\n-\t\t\tnew_tbl8_entry.next_hop =\n-\t\t\t\tlpm->tbl24[tbl24_index].next_hop;\n-\t\t\t__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n-\t\t}\n-\n-\t\ttbl8_index = tbl8_group_start + (ip_masked & 0xFF);\n-\n-\t\t/* Insert new rule into the tbl8 entry. */\n-\t\tfor (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {\n-\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t\t.valid = VALID,\n-\t\t\t\t.depth = depth,\n-\t\t\t\t.valid_group = lpm->tbl8[i].valid_group,\n-\t\t\t};\n-\t\t\tnew_tbl8_entry.next_hop = next_hop;\n-\t\t\t__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,\n-\t\t\t\t\t__ATOMIC_RELAXED);\n-\t\t}\n-\n-\t\t/*\n-\t\t * Update tbl24 entry to point to new tbl8 entry. Note: The\n-\t\t * ext_flag and tbl8_index need to be updated simultaneously,\n-\t\t * so assign whole structure in one go.\n-\t\t */\n-\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl24_entry = {\n-\t\t\t\t.group_idx = (uint8_t)tbl8_group_index,\n-\t\t\t\t.valid = VALID,\n-\t\t\t\t.valid_group = 1,\n-\t\t\t\t.depth = 0,\n-\t\t};\n-\n-\t\t__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,\n-\t\t\t\t__ATOMIC_RELEASE);\n-\n-\t} else { /*\n-\t\t* If it is valid, extended entry calculate the index into tbl8.\n-\t\t*/\n-\t\ttbl8_group_index = lpm->tbl24[tbl24_index].group_idx;\n-\t\ttbl8_group_start = tbl8_group_index *\n-\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\t\ttbl8_index = tbl8_group_start + (ip_masked & 0xFF);\n-\n-\t\tfor (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {\n-\n-\t\t\tif (!lpm->tbl8[i].valid ||\n-\t\t\t\t\tlpm->tbl8[i].depth <= depth) {\n-\t\t\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t\t\t.valid = VALID,\n-\t\t\t\t\t.depth = depth,\n-\t\t\t\t\t.valid_group = lpm->tbl8[i].valid_group,\n-\t\t\t\t};\n-\t\t\t\tnew_tbl8_entry.next_hop = next_hop;\n-\t\t\t\t/*\n-\t\t\t\t * Setting tbl8 entry in one go to avoid race\n-\t\t\t\t * condition\n-\t\t\t\t */\n-\t\t\t\t__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n-\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static __rte_noinline int32_t\n-add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n+add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n \t\tuint32_t next_hop)\n {\n #define group_idx next_hop\n@@ -1038,7 +520,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n \n \tif (!lpm->tbl24[tbl24_index].valid) {\n \t\t/* Search for a free tbl8 group. */\n-\t\ttbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);\n+\t\ttbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);\n \n \t\t/* Check tbl8 allocation was successful. */\n \t\tif (tbl8_group_index < 0) {\n@@ -1084,7 +566,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n \t} /* If valid entry but not extended calculate the index into Table8. */\n \telse if (lpm->tbl24[tbl24_index].valid_group == 0) {\n \t\t/* Search for free tbl8 group. */\n-\t\ttbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);\n+\t\ttbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);\n \n \t\tif (tbl8_group_index < 0) {\n \t\t\treturn tbl8_group_index;\n@@ -1177,49 +659,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,\n /*\n  * Add a route\n  */\n-int __vsym\n-rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n-\t\tuint8_t next_hop)\n-{\n-\tint32_t rule_index, status = 0;\n-\tuint32_t ip_masked;\n-\n-\t/* Check user arguments. */\n-\tif ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))\n-\t\treturn -EINVAL;\n-\n-\tip_masked = ip & depth_to_mask(depth);\n-\n-\t/* Add the rule to the rule table. */\n-\trule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);\n-\n-\t/* If the is no space available for new rule return error. */\n-\tif (rule_index < 0) {\n-\t\treturn rule_index;\n-\t}\n-\n-\tif (depth <= MAX_DEPTH_TBL24) {\n-\t\tstatus = add_depth_small_v20(lpm, ip_masked, depth, next_hop);\n-\t} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */\n-\t\tstatus = add_depth_big_v20(lpm, ip_masked, depth, next_hop);\n-\n-\t\t/*\n-\t\t * If add fails due to exhaustion of tbl8 extensions delete\n-\t\t * rule that was added to rule table.\n-\t\t */\n-\t\tif (status < 0) {\n-\t\t\trule_delete_v20(lpm, rule_index, depth);\n-\n-\t\t\treturn status;\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n+int\n+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \t\tuint32_t next_hop)\n {\n \tint32_t rule_index, status = 0;\n@@ -1232,7 +673,7 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \tip_masked = ip & depth_to_mask(depth);\n \n \t/* Add the rule to the rule table. */\n-\trule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);\n+\trule_index = rule_add(lpm, ip_masked, depth, next_hop);\n \n \t/* If the is no space available for new rule return error. */\n \tif (rule_index < 0) {\n@@ -1240,16 +681,16 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \t}\n \n \tif (depth <= MAX_DEPTH_TBL24) {\n-\t\tstatus = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);\n+\t\tstatus = add_depth_small(lpm, ip_masked, depth, next_hop);\n \t} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */\n-\t\tstatus = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);\n+\t\tstatus = add_depth_big(lpm, ip_masked, depth, next_hop);\n \n \t\t/*\n \t\t * If add fails due to exhaustion of tbl8 extensions delete\n \t\t * rule that was added to rule table.\n \t\t */\n \t\tif (status < 0) {\n-\t\t\trule_delete_v1604(lpm, rule_index, depth);\n+\t\t\trule_delete(lpm, rule_index, depth);\n \n \t\t\treturn status;\n \t\t}\n@@ -1257,42 +698,12 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \n \treturn 0;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,\n-\t\tuint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);\n \n /*\n  * Look for a rule in the high-level rules table\n  */\n-int __vsym\n-rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n-uint8_t *next_hop)\n-{\n-\tuint32_t ip_masked;\n-\tint32_t rule_index;\n-\n-\t/* Check user arguments. */\n-\tif ((lpm == NULL) ||\n-\t\t(next_hop == NULL) ||\n-\t\t(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))\n-\t\treturn -EINVAL;\n-\n-\t/* Look for the rule using rule_find. */\n-\tip_masked = ip & depth_to_mask(depth);\n-\trule_index = rule_find_v20(lpm, ip_masked, depth);\n-\n-\tif (rule_index >= 0) {\n-\t\t*next_hop = lpm->rules_tbl[rule_index].next_hop;\n-\t\treturn 1;\n-\t}\n-\n-\t/* If rule is not found return 0. */\n-\treturn 0;\n-}\n-VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n+int\n+rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n uint32_t *next_hop)\n {\n \tuint32_t ip_masked;\n@@ -1306,7 +717,7 @@ uint32_t *next_hop)\n \n \t/* Look for the rule using rule_find. */\n \tip_masked = ip & depth_to_mask(depth);\n-\trule_index = rule_find_v1604(lpm, ip_masked, depth);\n+\trule_index = rule_find(lpm, ip_masked, depth);\n \n \tif (rule_index >= 0) {\n \t\t*next_hop = lpm->rules_tbl[rule_index].next_hop;\n@@ -1316,12 +727,9 @@ uint32_t *next_hop)\n \t/* If rule is not found return 0. */\n \treturn 0;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,\n-\t\tuint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);\n \n static int32_t\n-find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n+find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n \t\tuint8_t *sub_rule_depth)\n {\n \tint32_t rule_index;\n@@ -1331,7 +739,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n \tfor (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {\n \t\tip_masked = ip & depth_to_mask(prev_depth);\n \n-\t\trule_index = rule_find_v20(lpm, ip_masked, prev_depth);\n+\t\trule_index = rule_find(lpm, ip_masked, prev_depth);\n \n \t\tif (rule_index >= 0) {\n \t\t\t*sub_rule_depth = prev_depth;\n@@ -1343,133 +751,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n }\n \n static int32_t\n-find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-\t\tuint8_t *sub_rule_depth)\n-{\n-\tint32_t rule_index;\n-\tuint32_t ip_masked;\n-\tuint8_t prev_depth;\n-\n-\tfor (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {\n-\t\tip_masked = ip & depth_to_mask(prev_depth);\n-\n-\t\trule_index = rule_find_v1604(lpm, ip_masked, prev_depth);\n-\n-\t\tif (rule_index >= 0) {\n-\t\t\t*sub_rule_depth = prev_depth;\n-\t\t\treturn rule_index;\n-\t\t}\n-\t}\n-\n-\treturn -1;\n-}\n-\n-static int32_t\n-delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,\n-\tuint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)\n-{\n-\tuint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;\n-\n-\t/* Calculate the range and index into Table24. */\n-\ttbl24_range = depth_to_range(depth);\n-\ttbl24_index = (ip_masked >> 8);\n-\n-\t/*\n-\t * Firstly check the sub_rule_index. A -1 indicates no replacement rule\n-\t * and a positive number indicates a sub_rule_index.\n-\t */\n-\tif (sub_rule_index < 0) {\n-\t\t/*\n-\t\t * If no replacement rule exists then invalidate entries\n-\t\t * associated with this rule.\n-\t\t */\n-\t\tfor (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {\n-\n-\t\t\tif (lpm->tbl24[i].valid_group == 0 &&\n-\t\t\t\t\tlpm->tbl24[i].depth <= depth) {\n-\t\t\t\tstruct rte_lpm_tbl_entry_v20\n-\t\t\t\t\tzero_tbl24_entry = {\n-\t\t\t\t\t\t.valid = INVALID,\n-\t\t\t\t\t\t.depth = 0,\n-\t\t\t\t\t\t.valid_group = 0,\n-\t\t\t\t\t};\n-\t\t\t\t\tzero_tbl24_entry.next_hop = 0;\n-\t\t\t\t__atomic_store(&lpm->tbl24[i],\n-\t\t\t\t\t&zero_tbl24_entry, __ATOMIC_RELEASE);\n-\t\t\t} else if (lpm->tbl24[i].valid_group == 1) {\n-\t\t\t\t/*\n-\t\t\t\t * If TBL24 entry is extended, then there has\n-\t\t\t\t * to be a rule with depth >= 25 in the\n-\t\t\t\t * associated TBL8 group.\n-\t\t\t\t */\n-\n-\t\t\t\ttbl8_group_index = lpm->tbl24[i].group_idx;\n-\t\t\t\ttbl8_index = tbl8_group_index *\n-\t\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\n-\t\t\t\tfor (j = tbl8_index; j < (tbl8_index +\n-\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {\n-\n-\t\t\t\t\tif (lpm->tbl8[j].depth <= depth)\n-\t\t\t\t\t\tlpm->tbl8[j].valid = INVALID;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t} else {\n-\t\t/*\n-\t\t * If a replacement rule exists then modify entries\n-\t\t * associated with this rule.\n-\t\t */\n-\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl24_entry = {\n-\t\t\t.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,\n-\t\t\t.valid = VALID,\n-\t\t\t.valid_group = 0,\n-\t\t\t.depth = sub_rule_depth,\n-\t\t};\n-\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t.valid = VALID,\n-\t\t\t.valid_group = VALID,\n-\t\t\t.depth = sub_rule_depth,\n-\t\t};\n-\t\tnew_tbl8_entry.next_hop =\n-\t\t\t\tlpm->rules_tbl[sub_rule_index].next_hop;\n-\n-\t\tfor (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {\n-\n-\t\t\tif (lpm->tbl24[i].valid_group == 0 &&\n-\t\t\t\t\tlpm->tbl24[i].depth <= depth) {\n-\t\t\t\t__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,\n-\t\t\t\t\t\t__ATOMIC_RELEASE);\n-\t\t\t} else  if (lpm->tbl24[i].valid_group == 1) {\n-\t\t\t\t/*\n-\t\t\t\t * If TBL24 entry is extended, then there has\n-\t\t\t\t * to be a rule with depth >= 25 in the\n-\t\t\t\t * associated TBL8 group.\n-\t\t\t\t */\n-\n-\t\t\t\ttbl8_group_index = lpm->tbl24[i].group_idx;\n-\t\t\t\ttbl8_index = tbl8_group_index *\n-\t\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\n-\t\t\t\tfor (j = tbl8_index; j < (tbl8_index +\n-\t\t\t\t\tRTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {\n-\n-\t\t\t\t\tif (lpm->tbl8[j].depth <= depth)\n-\t\t\t\t\t\t__atomic_store(&lpm->tbl8[j],\n-\t\t\t\t\t\t\t&new_tbl8_entry,\n-\t\t\t\t\t\t\t__ATOMIC_RELAXED);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int32_t\n-delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n+delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,\n \tuint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)\n {\n #define group_idx next_hop\n@@ -1576,7 +858,7 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n  * thus can be recycled\n  */\n static int32_t\n-tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,\n+tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,\n \t\tuint32_t tbl8_group_start)\n {\n \tuint32_t tbl8_group_end, i;\n@@ -1623,140 +905,7 @@ tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,\n }\n \n static int32_t\n-tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,\n-\t\tuint32_t tbl8_group_start)\n-{\n-\tuint32_t tbl8_group_end, i;\n-\ttbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\n-\t/*\n-\t * Check the first entry of the given tbl8. If it is invalid we know\n-\t * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH\n-\t *  (As they would affect all entries in a tbl8) and thus this table\n-\t *  can not be recycled.\n-\t */\n-\tif (tbl8[tbl8_group_start].valid) {\n-\t\t/*\n-\t\t * If first entry is valid check if the depth is less than 24\n-\t\t * and if so check the rest of the entries to verify that they\n-\t\t * are all of this depth.\n-\t\t */\n-\t\tif (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {\n-\t\t\tfor (i = (tbl8_group_start + 1); i < tbl8_group_end;\n-\t\t\t\t\ti++) {\n-\n-\t\t\t\tif (tbl8[i].depth !=\n-\t\t\t\t\t\ttbl8[tbl8_group_start].depth) {\n-\n-\t\t\t\t\treturn -EEXIST;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\t/* If all entries are the same return the tb8 index */\n-\t\t\treturn tbl8_group_start;\n-\t\t}\n-\n-\t\treturn -EEXIST;\n-\t}\n-\t/*\n-\t * If the first entry is invalid check if the rest of the entries in\n-\t * the tbl8 are invalid.\n-\t */\n-\tfor (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {\n-\t\tif (tbl8[i].valid)\n-\t\t\treturn -EEXIST;\n-\t}\n-\t/* If no valid entries are found then return -EINVAL. */\n-\treturn -EINVAL;\n-}\n-\n-static int32_t\n-delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,\n-\tuint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)\n-{\n-\tuint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,\n-\t\t\ttbl8_range, i;\n-\tint32_t tbl8_recycle_index;\n-\n-\t/*\n-\t * Calculate the index into tbl24 and range. Note: All depths larger\n-\t * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.\n-\t */\n-\ttbl24_index = ip_masked >> 8;\n-\n-\t/* Calculate the index into tbl8 and range. */\n-\ttbl8_group_index = lpm->tbl24[tbl24_index].group_idx;\n-\ttbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;\n-\ttbl8_index = tbl8_group_start + (ip_masked & 0xFF);\n-\ttbl8_range = depth_to_range(depth);\n-\n-\tif (sub_rule_index < 0) {\n-\t\t/*\n-\t\t * Loop through the range of entries on tbl8 for which the\n-\t\t * rule_to_delete must be removed or modified.\n-\t\t */\n-\t\tfor (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {\n-\t\t\tif (lpm->tbl8[i].depth <= depth)\n-\t\t\t\tlpm->tbl8[i].valid = INVALID;\n-\t\t}\n-\t} else {\n-\t\t/* Set new tbl8 entry. */\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl8_entry = {\n-\t\t\t.valid = VALID,\n-\t\t\t.depth = sub_rule_depth,\n-\t\t\t.valid_group = lpm->tbl8[tbl8_group_start].valid_group,\n-\t\t};\n-\n-\t\tnew_tbl8_entry.next_hop =\n-\t\t\t\tlpm->rules_tbl[sub_rule_index].next_hop;\n-\t\t/*\n-\t\t * Loop through the range of entries on tbl8 for which the\n-\t\t * rule_to_delete must be modified.\n-\t\t */\n-\t\tfor (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {\n-\t\t\tif (lpm->tbl8[i].depth <= depth)\n-\t\t\t\t__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,\n-\t\t\t\t\t\t__ATOMIC_RELAXED);\n-\t\t}\n-\t}\n-\n-\t/*\n-\t * Check if there are any valid entries in this tbl8 group. If all\n-\t * tbl8 entries are invalid we can free the tbl8 and invalidate the\n-\t * associated tbl24 entry.\n-\t */\n-\n-\ttbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);\n-\n-\tif (tbl8_recycle_index == -EINVAL) {\n-\t\t/* Set tbl24 before freeing tbl8 to avoid race condition.\n-\t\t * Prevent the free of the tbl8 group from hoisting.\n-\t\t */\n-\t\tlpm->tbl24[tbl24_index].valid = 0;\n-\t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n-\t\ttbl8_free_v20(lpm->tbl8, tbl8_group_start);\n-\t} else if (tbl8_recycle_index > -1) {\n-\t\t/* Update tbl24 entry. */\n-\t\tstruct rte_lpm_tbl_entry_v20 new_tbl24_entry = {\n-\t\t\t.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,\n-\t\t\t.valid = VALID,\n-\t\t\t.valid_group = 0,\n-\t\t\t.depth = lpm->tbl8[tbl8_recycle_index].depth,\n-\t\t};\n-\n-\t\t/* Set tbl24 before freeing tbl8 to avoid race condition.\n-\t\t * Prevent the free of the tbl8 group from hoisting.\n-\t\t */\n-\t\t__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,\n-\t\t\t\t__ATOMIC_RELAXED);\n-\t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n-\t\ttbl8_free_v20(lpm->tbl8, tbl8_group_start);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int32_t\n-delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n+delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,\n \tuint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)\n {\n #define group_idx next_hop\n@@ -1811,7 +960,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n \t * associated tbl24 entry.\n \t */\n \n-\ttbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);\n+\ttbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);\n \n \tif (tbl8_recycle_index == -EINVAL) {\n \t\t/* Set tbl24 before freeing tbl8 to avoid race condition.\n@@ -1819,7 +968,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n \t\t */\n \t\tlpm->tbl24[tbl24_index].valid = 0;\n \t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n-\t\ttbl8_free_v1604(lpm->tbl8, tbl8_group_start);\n+\t\ttbl8_free(lpm->tbl8, tbl8_group_start);\n \t} else if (tbl8_recycle_index > -1) {\n \t\t/* Update tbl24 entry. */\n \t\tstruct rte_lpm_tbl_entry new_tbl24_entry = {\n@@ -1835,7 +984,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n \t\t__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,\n \t\t\t\t__ATOMIC_RELAXED);\n \t\t__atomic_thread_fence(__ATOMIC_RELEASE);\n-\t\ttbl8_free_v1604(lpm->tbl8, tbl8_group_start);\n+\t\ttbl8_free(lpm->tbl8, tbl8_group_start);\n \t}\n #undef group_idx\n \treturn 0;\n@@ -1844,8 +993,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,\n /*\n  * Deletes a rule\n  */\n-int __vsym\n-rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)\n+int\n+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)\n {\n \tint32_t rule_to_delete_index, sub_rule_index;\n \tuint32_t ip_masked;\n@@ -1864,7 +1013,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)\n \t * Find the index of the input rule, that needs to be deleted, in the\n \t * rule table.\n \t */\n-\trule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);\n+\trule_to_delete_index = rule_find(lpm, ip_masked, depth);\n \n \t/*\n \t * Check if rule_to_delete_index was found. If no rule was found the\n@@ -1874,7 +1023,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)\n \t\treturn -EINVAL;\n \n \t/* Delete the rule from the rule table. */\n-\trule_delete_v20(lpm, rule_to_delete_index, depth);\n+\trule_delete(lpm, rule_to_delete_index, depth);\n \n \t/*\n \t * Find rule to replace the rule_to_delete. If there is no rule to\n@@ -1882,100 +1031,26 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)\n \t * entries associated with this rule.\n \t */\n \tsub_rule_depth = 0;\n-\tsub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);\n+\tsub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);\n \n \t/*\n \t * If the input depth value is less than 25 use function\n \t * delete_depth_small otherwise use delete_depth_big.\n \t */\n \tif (depth <= MAX_DEPTH_TBL24) {\n-\t\treturn delete_depth_small_v20(lpm, ip_masked, depth,\n+\t\treturn delete_depth_small(lpm, ip_masked, depth,\n \t\t\t\tsub_rule_index, sub_rule_depth);\n \t} else { /* If depth > MAX_DEPTH_TBL24 */\n-\t\treturn delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,\n+\t\treturn delete_depth_big(lpm, ip_masked, depth, sub_rule_index,\n \t\t\t\tsub_rule_depth);\n \t}\n }\n-VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)\n-{\n-\tint32_t rule_to_delete_index, sub_rule_index;\n-\tuint32_t ip_masked;\n-\tuint8_t sub_rule_depth;\n-\t/*\n-\t * Check input arguments. Note: IP must be a positive integer of 32\n-\t * bits in length therefore it need not be checked.\n-\t */\n-\tif ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tip_masked = ip & depth_to_mask(depth);\n-\n-\t/*\n-\t * Find the index of the input rule, that needs to be deleted, in the\n-\t * rule table.\n-\t */\n-\trule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);\n-\n-\t/*\n-\t * Check if rule_to_delete_index was found. If no rule was found the\n-\t * function rule_find returns -EINVAL.\n-\t */\n-\tif (rule_to_delete_index < 0)\n-\t\treturn -EINVAL;\n-\n-\t/* Delete the rule from the rule table. */\n-\trule_delete_v1604(lpm, rule_to_delete_index, depth);\n-\n-\t/*\n-\t * Find rule to replace the rule_to_delete. If there is no rule to\n-\t * replace the rule_to_delete we return -1 and invalidate the table\n-\t * entries associated with this rule.\n-\t */\n-\tsub_rule_depth = 0;\n-\tsub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);\n-\n-\t/*\n-\t * If the input depth value is less than 25 use function\n-\t * delete_depth_small otherwise use delete_depth_big.\n-\t */\n-\tif (depth <= MAX_DEPTH_TBL24) {\n-\t\treturn delete_depth_small_v1604(lpm, ip_masked, depth,\n-\t\t\t\tsub_rule_index, sub_rule_depth);\n-\t} else { /* If depth > MAX_DEPTH_TBL24 */\n-\t\treturn delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,\n-\t\t\t\tsub_rule_depth);\n-\t}\n-}\n-BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,\n-\t\tuint8_t depth), rte_lpm_delete_v1604);\n \n /*\n  * Delete all rules from the LPM table.\n  */\n-void __vsym\n-rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)\n-{\n-\t/* Zero rule information. */\n-\tmemset(lpm->rule_info, 0, sizeof(lpm->rule_info));\n-\n-\t/* Zero tbl24. */\n-\tmemset(lpm->tbl24, 0, sizeof(lpm->tbl24));\n-\n-\t/* Zero tbl8. */\n-\tmemset(lpm->tbl8, 0, sizeof(lpm->tbl8));\n-\n-\t/* Delete all rules form the rules table. */\n-\tmemset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);\n-}\n-VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);\n-\n-void __vsym\n-rte_lpm_delete_all_v1604(struct rte_lpm *lpm)\n+void\n+rte_lpm_delete_all(struct rte_lpm *lpm)\n {\n \t/* Zero rule information. */\n \tmemset(lpm->rule_info, 0, sizeof(lpm->rule_info));\n@@ -1990,6 +1065,3 @@ rte_lpm_delete_all_v1604(struct rte_lpm *lpm)\n \t/* Delete all rules form the rules table. */\n \tmemset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);\n-MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),\n-\t\trte_lpm_delete_all_v1604);\ndiff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h\nindex 26303e6288..b9d49ac879 100644\n--- a/lib/librte_lpm/rte_lpm.h\n+++ b/lib/librte_lpm/rte_lpm.h\n@@ -64,31 +64,6 @@ extern \"C\" {\n \n #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n /** @internal Tbl24 entry structure. */\n-__extension__\n-struct rte_lpm_tbl_entry_v20 {\n-\t/**\n-\t * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or\n-\t * a group index pointing to a tbl8 structure (tbl24 only, when\n-\t * valid_group is set)\n-\t */\n-\tRTE_STD_C11\n-\tunion {\n-\t\tuint8_t next_hop;\n-\t\tuint8_t group_idx;\n-\t};\n-\t/* Using single uint8_t to store 3 values. */\n-\tuint8_t valid     :1;   /**< Validation flag. */\n-\t/**\n-\t * For tbl24:\n-\t *  - valid_group == 0: entry stores a next hop\n-\t *  - valid_group == 1: entry stores a group_index pointing to a tbl8\n-\t * For tbl8:\n-\t *  - valid_group indicates whether the current tbl8 is in use or not\n-\t */\n-\tuint8_t valid_group :1;\n-\tuint8_t depth       :6; /**< Rule depth. */\n-} __rte_aligned(sizeof(uint16_t));\n-\n __extension__\n struct rte_lpm_tbl_entry {\n \t/**\n@@ -111,16 +86,6 @@ struct rte_lpm_tbl_entry {\n };\n \n #else\n-__extension__\n-struct rte_lpm_tbl_entry_v20 {\n-\tuint8_t depth       :6;\n-\tuint8_t valid_group :1;\n-\tuint8_t valid       :1;\n-\tunion {\n-\t\tuint8_t group_idx;\n-\t\tuint8_t next_hop;\n-\t};\n-} __rte_aligned(sizeof(uint16_t));\n \n __extension__\n struct rte_lpm_tbl_entry {\n@@ -141,11 +106,6 @@ struct rte_lpm_config {\n };\n \n /** @internal Rule structure. */\n-struct rte_lpm_rule_v20 {\n-\tuint32_t ip; /**< Rule IP address. */\n-\tuint8_t  next_hop; /**< Rule next hop. */\n-};\n-\n struct rte_lpm_rule {\n \tuint32_t ip; /**< Rule IP address. */\n \tuint32_t next_hop; /**< Rule next hop. */\n@@ -158,21 +118,6 @@ struct rte_lpm_rule_info {\n };\n \n /** @internal LPM structure. */\n-struct rte_lpm_v20 {\n-\t/* LPM metadata. */\n-\tchar name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */\n-\tuint32_t max_rules; /**< Max. balanced rules per lpm. */\n-\tstruct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */\n-\n-\t/* LPM Tables. */\n-\tstruct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]\n-\t\t\t__rte_cache_aligned; /**< LPM tbl24 table. */\n-\tstruct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]\n-\t\t\t__rte_cache_aligned; /**< LPM tbl8 table. */\n-\tstruct rte_lpm_rule_v20 rules_tbl[]\n-\t\t\t__rte_cache_aligned; /**< LPM rules. */\n-};\n-\n struct rte_lpm {\n \t/* LPM metadata. */\n \tchar name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */\n@@ -209,11 +154,6 @@ struct rte_lpm {\n struct rte_lpm *\n rte_lpm_create(const char *name, int socket_id,\n \t\tconst struct rte_lpm_config *config);\n-struct rte_lpm_v20 *\n-rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);\n-struct rte_lpm *\n-rte_lpm_create_v1604(const char *name, int socket_id,\n-\t\tconst struct rte_lpm_config *config);\n \n /**\n  * Find an existing LPM object and return a pointer to it.\n@@ -227,10 +167,6 @@ rte_lpm_create_v1604(const char *name, int socket_id,\n  */\n struct rte_lpm *\n rte_lpm_find_existing(const char *name);\n-struct rte_lpm_v20 *\n-rte_lpm_find_existing_v20(const char *name);\n-struct rte_lpm *\n-rte_lpm_find_existing_v1604(const char *name);\n \n /**\n  * Free an LPM object.\n@@ -242,10 +178,6 @@ rte_lpm_find_existing_v1604(const char *name);\n  */\n void\n rte_lpm_free(struct rte_lpm *lpm);\n-void\n-rte_lpm_free_v20(struct rte_lpm_v20 *lpm);\n-void\n-rte_lpm_free_v1604(struct rte_lpm *lpm);\n \n /**\n  * Add a rule to the LPM table.\n@@ -263,12 +195,6 @@ rte_lpm_free_v1604(struct rte_lpm *lpm);\n  */\n int\n rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);\n-int\n-rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n-\t\tuint8_t next_hop);\n-int\n-rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-\t\tuint32_t next_hop);\n \n /**\n  * Check if a rule is present in the LPM table,\n@@ -288,12 +214,6 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n int\n rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n uint32_t *next_hop);\n-int\n-rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,\n-uint8_t *next_hop);\n-int\n-rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,\n-uint32_t *next_hop);\n \n /**\n  * Delete a rule from the LPM table.\n@@ -309,10 +229,6 @@ uint32_t *next_hop);\n  */\n int\n rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);\n-int\n-rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);\n-int\n-rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);\n \n /**\n  * Delete all rules from the LPM table.\n@@ -322,10 +238,6 @@ rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);\n  */\n void\n rte_lpm_delete_all(struct rte_lpm *lpm);\n-void\n-rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm);\n-void\n-rte_lpm_delete_all_v1604(struct rte_lpm *lpm);\n \n /**\n  * Lookup an IP into the LPM table.\ndiff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c\nindex 0d161dc327..c46e557e23 100644\n--- a/lib/librte_lpm/rte_lpm6.c\n+++ b/lib/librte_lpm/rte_lpm6.c\n@@ -809,18 +809,6 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,\n \treturn 1;\n }\n \n-/*\n- * Add a route\n- */\n-int __vsym\n-rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint8_t next_hop)\n-{\n-\treturn rte_lpm6_add_v1705(lpm, ip, depth, next_hop);\n-}\n-VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);\n-\n-\n /*\n  * Simulate adding a route to LPM\n  *\n@@ -842,7 +830,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)\n \n \t/* Inspect the first three bytes through tbl24 on the first step. */\n \tret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip,\n-\t\t\tADD_FIRST_BYTE, 1, depth, &need_tbl_nb);\n+\t\tADD_FIRST_BYTE, 1, depth, &need_tbl_nb);\n \ttotal_need_tbl_nb = need_tbl_nb;\n \t/*\n \t * Inspect one by one the rest of the bytes until\n@@ -851,7 +839,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)\n \tfor (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) {\n \t\ttbl = tbl_next;\n \t\tret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1,\n-\t\t\t\t(uint8_t)(i+1), depth, &need_tbl_nb);\n+\t\t\t(uint8_t)(i + 1), depth, &need_tbl_nb);\n \t\ttotal_need_tbl_nb += need_tbl_nb;\n \t}\n \n@@ -862,9 +850,12 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)\n \treturn 0;\n }\n \n-int __vsym\n-rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint32_t next_hop)\n+/*\n+ * Add a route\n+ */\n+int\n+rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n+\tuint32_t next_hop)\n {\n \tstruct rte_lpm6_tbl_entry *tbl;\n \tstruct rte_lpm6_tbl_entry *tbl_next = NULL;\n@@ -896,8 +887,8 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \t/* Inspect the first three bytes through tbl24 on the first step. */\n \ttbl = lpm->tbl24;\n \tstatus = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num,\n-\t\t\tmasked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,\n-\t\t\tis_new_rule);\n+\t\tmasked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,\n+\t\tis_new_rule);\n \tassert(status >= 0);\n \n \t/*\n@@ -907,17 +898,13 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \tfor (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {\n \t\ttbl = tbl_next;\n \t\tstatus = add_step(lpm, tbl, tbl_next_num, &tbl_next,\n-\t\t\t\t&tbl_next_num, masked_ip, 1, (uint8_t)(i+1),\n-\t\t\t\tdepth, next_hop, is_new_rule);\n+\t\t\t&tbl_next_num, masked_ip, 1, (uint8_t)(i + 1),\n+\t\t\tdepth, next_hop, is_new_rule);\n \t\tassert(status >= 0);\n \t}\n \n \treturn status;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);\n-MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,\n-\t\t\t\tuint8_t depth, uint32_t next_hop),\n-\t\trte_lpm6_add_v1705);\n \n /*\n  * Takes a pointer to a table entry and inspect one level.\n@@ -955,26 +942,8 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,\n /*\n  * Looks up an IP\n  */\n-int __vsym\n-rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)\n-{\n-\tuint32_t next_hop32 = 0;\n-\tint32_t status;\n-\n-\t/* DEBUG: Check user input arguments. */\n-\tif (next_hop == NULL)\n-\t\treturn -EINVAL;\n-\n-\tstatus = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);\n-\tif (status == 0)\n-\t\t*next_hop = (uint8_t)next_hop32;\n-\n-\treturn status;\n-}\n-VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,\n+int\n+rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,\n \t\tuint32_t *next_hop)\n {\n \tconst struct rte_lpm6_tbl_entry *tbl;\n@@ -1001,56 +970,12 @@ rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,\n \n \treturn status;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);\n-MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,\n-\t\t\t\tuint32_t *next_hop), rte_lpm6_lookup_v1705);\n \n /*\n  * Looks up a group of IP addresses\n  */\n-int __vsym\n-rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,\n-\t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n-\t\tint16_t * next_hops, unsigned n)\n-{\n-\tunsigned i;\n-\tconst struct rte_lpm6_tbl_entry *tbl;\n-\tconst struct rte_lpm6_tbl_entry *tbl_next = NULL;\n-\tuint32_t tbl24_index, next_hop;\n-\tuint8_t first_byte;\n-\tint status;\n-\n-\t/* DEBUG: Check user input arguments. */\n-\tif ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))\n-\t\treturn -EINVAL;\n-\n-\tfor (i = 0; i < n; i++) {\n-\t\tfirst_byte = LOOKUP_FIRST_BYTE;\n-\t\ttbl24_index = (ips[i][0] << BYTES2_SIZE) |\n-\t\t\t\t(ips[i][1] << BYTE_SIZE) | ips[i][2];\n-\n-\t\t/* Calculate pointer to the first entry to be inspected */\n-\t\ttbl = &lpm->tbl24[tbl24_index];\n-\n-\t\tdo {\n-\t\t\t/* Continue inspecting following levels until success or failure */\n-\t\t\tstatus = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,\n-\t\t\t\t\t&next_hop);\n-\t\t\ttbl = tbl_next;\n-\t\t} while (status == 1);\n-\n-\t\tif (status < 0)\n-\t\t\tnext_hops[i] = -1;\n-\t\telse\n-\t\t\tnext_hops[i] = (int16_t)next_hop;\n-\t}\n-\n-\treturn 0;\n-}\n-VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,\n+int\n+rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,\n \t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n \t\tint32_t *next_hops, unsigned int n)\n {\n@@ -1090,37 +1015,12 @@ rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,\n \n \treturn 0;\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);\n-MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,\n-\t\t\t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n-\t\t\t\tint32_t *next_hops, unsigned int n),\n-\t\trte_lpm6_lookup_bulk_func_v1705);\n \n /*\n  * Look for a rule in the high-level rules table\n  */\n-int __vsym\n-rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint8_t *next_hop)\n-{\n-\tuint32_t next_hop32 = 0;\n-\tint32_t status;\n-\n-\t/* DEBUG: Check user input arguments. */\n-\tif (next_hop == NULL)\n-\t\treturn -EINVAL;\n-\n-\tstatus = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);\n-\tif (status > 0)\n-\t\t*next_hop = (uint8_t)next_hop32;\n-\n-\treturn status;\n-\n-}\n-VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);\n-\n-int __vsym\n-rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n+int\n+rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \t\tuint32_t *next_hop)\n {\n \tuint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];\n@@ -1136,10 +1036,6 @@ rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \n \treturn rule_find(lpm, masked_ip, depth, next_hop);\n }\n-BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);\n-MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,\n-\t\t\t\tuint8_t *ip, uint8_t depth, uint32_t *next_hop),\n-\t\trte_lpm6_is_rule_present_v1705);\n \n /*\n  * Delete a rule from the rule table.\ndiff --git a/lib/librte_lpm/rte_lpm6.h b/lib/librte_lpm/rte_lpm6.h\nindex 5d59ccb1fe..37dfb20249 100644\n--- a/lib/librte_lpm/rte_lpm6.h\n+++ b/lib/librte_lpm/rte_lpm6.h\n@@ -96,12 +96,6 @@ rte_lpm6_free(struct rte_lpm6 *lpm);\n int\n rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \t\tuint32_t next_hop);\n-int\n-rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint8_t next_hop);\n-int\n-rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint32_t next_hop);\n \n /**\n  * Check if a rule is present in the LPM table,\n@@ -121,12 +115,6 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n int\n rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n \t\tuint32_t *next_hop);\n-int\n-rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint8_t *next_hop);\n-int\n-rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,\n-\t\tuint32_t *next_hop);\n \n /**\n  * Delete a rule from the LPM table.\n@@ -184,11 +172,6 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm);\n  */\n int\n rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint32_t *next_hop);\n-int\n-rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);\n-int\n-rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,\n-\t\tuint32_t *next_hop);\n \n /**\n  * Lookup multiple IP addresses in an LPM table.\n@@ -210,14 +193,6 @@ int\n rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,\n \t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n \t\tint32_t *next_hops, unsigned int n);\n-int\n-rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,\n-\t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n-\t\tint16_t *next_hops, unsigned int n);\n-int\n-rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,\n-\t\tuint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],\n-\t\tint32_t *next_hops, unsigned int n);\n \n #ifdef __cplusplus\n }\ndiff --git a/lib/librte_lpm/rte_lpm_version.map b/lib/librte_lpm/rte_lpm_version.map\nindex 90beac853d..604ed416d1 100644\n--- a/lib/librte_lpm/rte_lpm_version.map\n+++ b/lib/librte_lpm/rte_lpm_version.map\n@@ -1,23 +1,12 @@\n DPDK_2.0 {\n \tglobal:\n \n-\trte_lpm_add;\n-\trte_lpm_create;\n-\trte_lpm_delete;\n-\trte_lpm_delete_all;\n-\trte_lpm_find_existing;\n-\trte_lpm_free;\n-\trte_lpm_is_rule_present;\n-\trte_lpm6_add;\n \trte_lpm6_create;\n \trte_lpm6_delete;\n \trte_lpm6_delete_all;\n \trte_lpm6_delete_bulk_func;\n \trte_lpm6_find_existing;\n \trte_lpm6_free;\n-\trte_lpm6_is_rule_present;\n-\trte_lpm6_lookup;\n-\trte_lpm6_lookup_bulk_func;\n \n \tlocal: *;\n };\n",
    "prefixes": [
        "v8",
        "07/12"
    ]
}