get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/68499/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 68499,
    "url": "https://patches.dpdk.org/api/patches/68499/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1586938751-32808-13-git-send-email-venkatkumar.duvvuru@broadcom.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1586938751-32808-13-git-send-email-venkatkumar.duvvuru@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1586938751-32808-13-git-send-email-venkatkumar.duvvuru@broadcom.com",
    "date": "2020-04-15T08:18:49",
    "name": "[v4,12/34] net/bnxt: add EM/EEM functionality",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "0add295ae150a5292f03a89dccc9416f3732bf12",
    "submitter": {
        "id": 1635,
        "url": "https://patches.dpdk.org/api/people/1635/?format=api",
        "name": "Venkat Duvvuru",
        "email": "venkatkumar.duvvuru@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "https://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1586938751-32808-13-git-send-email-venkatkumar.duvvuru@broadcom.com/mbox/",
    "series": [
        {
            "id": 9386,
            "url": "https://patches.dpdk.org/api/series/9386/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=9386",
            "date": "2020-04-15T08:18:37",
            "name": "add support for host based flow table management",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/9386/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/68499/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/68499/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B1C61A0563;\n\tWed, 15 Apr 2020 10:22:42 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 621141D59B;\n\tWed, 15 Apr 2020 10:20:01 +0200 (CEST)",
            "from mail-pj1-f68.google.com (mail-pj1-f68.google.com\n [209.85.216.68]) by dpdk.org (Postfix) with ESMTP id 736491D559\n for <dev@dpdk.org>; Wed, 15 Apr 2020 10:19:58 +0200 (CEST)",
            "by mail-pj1-f68.google.com with SMTP id e16so6163258pjp.1\n for <dev@dpdk.org>; Wed, 15 Apr 2020 01:19:58 -0700 (PDT)",
            "from S60.dhcp.broadcom.net ([192.19.234.250])\n by smtp.gmail.com with ESMTPSA id fy21sm3819019pjb.25.2020.04.15.01.19.52\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Wed, 15 Apr 2020 01:19:54 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references;\n bh=dBcnJ4dUua4wXwYUPQFepQzp+Ne9znlQoEZP3kXPvf0=;\n b=BLcSx/ZgHVMszECMr0dJKtghpJ7UO3ryUD62WytWKlodlNdjWUpbh5ILhKvUT0ZExI\n J++OkhnUFrt95uAISLiWp/L71rTUA2BwyRxJoo9JlnNglbjYwJX27NV1/6Q9e7rcyXTs\n cqtRzpmIoXi9NQNAckef38J6tA7rJafwCgCdM=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references;\n bh=dBcnJ4dUua4wXwYUPQFepQzp+Ne9znlQoEZP3kXPvf0=;\n b=lxxRgg7sdrA+/6aLcZKuqJUQCzKeNtG1whKEf6pTHn84s70yBUPjwMJehtBhi2uG+w\n DFI2mlP7Qh+mqcaG0Pgls0qFFi2wQ7dHfpo3lUkVg/91yiJ7NnE8dU7YFzZZ6V0cJ9mL\n n/5dETjHo9r2ZBuaC96KeMVikcOQe56nzFzkAXcETAu3ep3iYlT/6VC3GvNA6Mw61mxg\n 3O1vm5jf9NRNyQngdScLAyZ3VYAR4P3ZPG6cqbWsMRFL32TqJ1lIwWrfEj0wOSExBHG1\n /ldQAjHTkvNlqzx1U+wXaFDF4gKd3J1kmpAHKM+U6NmJyGwjCOahKY8vOhPJZJprpTkv\n JUbA==",
        "X-Gm-Message-State": "AGi0PuYdDAsBtKRr/BZB7Bq6xpHG1dsEXQzJVBRMr94n7GCNKk+c2iZD\n Ny9OKzKuFS7IJ34xYrg8uN2FTzziLUQHmOc/jq45YgquYXKfFBX9mXnRGkrGAf2DF9nnFXijKEj\n MsIZhLWhgMxGvNMtAm9Wc9ESY12EoAv53MJm3tC+/eHBcEThl/atJtSttKzdLAWGlp5h0",
        "X-Google-Smtp-Source": "\n APiQypLLOtN5485S4W/H/l4ijxFfCZ9LYajyceZrS1y+QjuERZIfIg5VWP7vY5TIYD6WI5FsQP4Zkw==",
        "X-Received": "by 2002:a17:90a:3086:: with SMTP id\n h6mr5050131pjb.49.1586938795040;\n Wed, 15 Apr 2020 01:19:55 -0700 (PDT)",
        "From": "Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Pete Spreadborough <peter.spreadborough@broadcom.com>",
        "Date": "Wed, 15 Apr 2020 13:48:49 +0530",
        "Message-Id": "\n <1586938751-32808-13-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "\n <1586938751-32808-1-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "References": "\n <1586852011-37536-1-git-send-email-venkatkumar.duvvuru@broadcom.com>\n <1586938751-32808-1-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH v4 12/34] net/bnxt: add EM/EEM functionality",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Pete Spreadborough <peter.spreadborough@broadcom.com>\n\n- Add TruFlow flow memory support\n- Exact Match (EM) adds the capability to manage and manipulate\n  data flows using on chip memory.\n- Extended Exact Match (EEM) behaves similarly to EM, but at a\n  vastly increased scale by using host DDR, with performance\n  tradeoff due to the need to access off-chip memory.\n\nSigned-off-by: Pete Spreadborough <peter.spreadborough@broadcom.com>\nReviewed-by: Randy Schacher <stuart.schacher@broadcom.com>\nReviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\n---\n drivers/net/bnxt/Makefile                     |    2 +\n drivers/net/bnxt/tf_core/lookup3.h            |  162 +++\n drivers/net/bnxt/tf_core/stack.c              |  107 ++\n drivers/net/bnxt/tf_core/stack.h              |  107 ++\n drivers/net/bnxt/tf_core/tf_core.c            |   50 +\n drivers/net/bnxt/tf_core/tf_core.h            |  480 ++++++-\n drivers/net/bnxt/tf_core/tf_em.c              |  515 +++++++\n drivers/net/bnxt/tf_core/tf_em.h              |  117 ++\n drivers/net/bnxt/tf_core/tf_ext_flow_handle.h |  166 +++\n drivers/net/bnxt/tf_core/tf_msg.c             |  171 +++\n drivers/net/bnxt/tf_core/tf_msg.h             |   40 +\n drivers/net/bnxt/tf_core/tf_tbl.c             | 1795 ++++++++++++++++++++++++-\n drivers/net/bnxt/tf_core/tf_tbl.h             |   83 ++\n 13 files changed, 3788 insertions(+), 7 deletions(-)\n create mode 100644 drivers/net/bnxt/tf_core/lookup3.h\n create mode 100644 drivers/net/bnxt/tf_core/stack.c\n create mode 100644 drivers/net/bnxt/tf_core/stack.h\n create mode 100644 drivers/net/bnxt/tf_core/tf_em.c\n create mode 100644 drivers/net/bnxt/tf_core/tf_em.h\n create mode 100644 drivers/net/bnxt/tf_core/tf_ext_flow_handle.h",
    "diff": "diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile\nindex 6714a6a..4c95847 100644\n--- a/drivers/net/bnxt/Makefile\n+++ b/drivers/net/bnxt/Makefile\n@@ -51,6 +51,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_core.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/bitalloc.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_msg.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/rand.c\n+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/stack.c\n+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_em.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_rm.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_tbl.c\n SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tfp.c\ndiff --git a/drivers/net/bnxt/tf_core/lookup3.h b/drivers/net/bnxt/tf_core/lookup3.h\nnew file mode 100644\nindex 0000000..e5abcc2\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/lookup3.h\n@@ -0,0 +1,162 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ *\n+ * Based on lookup3.c, by Bob Jenkins, May 2006, Public Domain.\n+ * http://www.burtleburtle.net/bob/c/lookup3.c\n+ *\n+ * These functions for producing 32-bit hashes for has table lookup.\n+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()\n+ * are externally useful functions. Routines to test the hash are included\n+ * if SELF_TEST is defined. You can use this free for any purpose. It is in\n+ * the public domain. It has no warranty.\n+ */\n+\n+#ifndef _LOOKUP3_H_\n+#define _LOOKUP3_H_\n+\n+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))\n+\n+/** -------------------------------------------------------------------------\n+ * This is reversible, so any information in (a,b,c) before mix() is\n+ * still in (a,b,c) after mix().\n+ *\n+ * If four pairs of (a,b,c) inputs are run through mix(), or through\n+ * mix() in reverse, there are at least 32 bits of the output that\n+ * are sometimes the same for one pair and different for another pair.\n+ * This was tested for:\n+ *   pairs that differed by one bit, by two bits, in any combination\n+ *   of top bits of (a,b,c), or in any combination of bottom bits of\n+ *   (a,b,c).\n+ *   \"differ\" is defined as +, -, ^, or ~^.  For + and -, I transformed\n+ *   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as\n+ *   is commonly produced by subtraction) look like a single 1-bit\n+ *   difference.\n+ *   the base values were pseudorandom, all zero but one bit set, or\n+ *   all zero plus a counter that starts at zero.\n+ *\n+ * Some k values for my \"a-=c; a^=rot(c,k); c+=b;\" arrangement that\n+ * satisfy this are\n+ *     4  6  8 16 19  4\n+ *     9 15  3 18 27 15\n+ *    14  9  3  7 17  3\n+ * Well, \"9 15 3 18 27 15\" didn't quite get 32 bits diffing\n+ * for \"differ\" defined as + with a one-bit base and a two-bit delta.  I\n+ * used http://burtleburtle.net/bob/hash/avalanche.html to choose\n+ * the operations, constants, and arrangements of the variables.\n+ *\n+ * This does not achieve avalanche.  There are input bits of (a,b,c)\n+ * that fail to affect some output bits of (a,b,c), especially of a.  The\n+ * most thoroughly mixed value is c, but it doesn't really even achieve\n+ * avalanche in c.\n+ *\n+ * This allows some parallelism.  Read-after-writes are good at doubling\n+ * the number of bits affected, so the goal of mixing pulls in the opposite\n+ * direction as the goal of parallelism.  I did what I could.  Rotates\n+ * seem to cost as much as shifts on every machine I could lay my hands\n+ * on, and rotates are much kinder to the top and bottom bits, so I used\n+ * rotates.\n+ * --------------------------------------------------------------------------\n+ */\n+#define mix(a, b, c) \\\n+{ \\\n+\t(a) -= (c); (a) ^= rot((c), 4);  (c) += b; \\\n+\t(b) -= (a); (b) ^= rot((a), 6);  (a) += c; \\\n+\t(c) -= (b); (c) ^= rot((b), 8);  (b) += a; \\\n+\t(a) -= (c); (a) ^= rot((c), 16); (c) += b; \\\n+\t(b) -= (a); (b) ^= rot((a), 19); (a) += c; \\\n+\t(c) -= (b); (c) ^= rot((b), 4);  (b) += a; \\\n+}\n+\n+/** --------------------------------------------------------------------------\n+ * final -- final mixing of 3 32-bit values (a,b,c) into c\n+ *\n+ * Pairs of (a,b,c) values differing in only a few bits will usually\n+ * produce values of c that look totally different.  This was tested for\n+ *  pairs that differed by one bit, by two bits, in any combination\n+ *   of top bits of (a,b,c), or in any combination of bottom bits of\n+ *   (a,b,c).\n+ *   \"differ\" is defined as +, -, ^, or ~^.  For + and -, I transformed\n+ *   the output delta to a Gray code (a^(a>>1)) so a string of 1's (as\n+ *   is commonly produced by subtraction) look like a single 1-bit\n+ *   difference.\n+ *   the base values were pseudorandom, all zero but one bit set, or\n+ *   all zero plus a counter that starts at zero.\n+ *\n+ * These constants passed:\n+ *  14 11 25 16 4 14 24\n+ *  12 14 25 16 4 14 24\n+ * and these came close:\n+ *   4  8 15 26 3 22 24\n+ *  10  8 15 26 3 22 24\n+ *  11  8 15 26 3 22 24\n+ * --------------------------------------------------------------------------\n+ */\n+#define final(a, b, c) \\\n+{ \\\n+\t(c) ^= (b); (c) -= rot((b), 14); \\\n+\t(a) ^= (c); (a) -= rot((c), 11); \\\n+\t(b) ^= (a); (b) -= rot((a), 25); \\\n+\t(c) ^= (b); (c) -= rot((b), 16); \\\n+\t(a) ^= (c); (a) -= rot((c), 4);  \\\n+\t(b) ^= (a); (b) -= rot((a), 14); \\\n+\t(c) ^= (b); (c) -= rot((b), 24); \\\n+}\n+\n+/** --------------------------------------------------------------------\n+ *  This works on all machines.  To be useful, it requires\n+ *  -- that the key be an array of uint32_t's, and\n+ *  -- that the length be the number of uint32_t's in the key\n+\n+ *  The function hashword() is identical to hashlittle() on little-endian\n+ *  machines, and identical to hashbig() on big-endian machines,\n+ *  except that the length has to be measured in uint32_ts rather than in\n+ *  bytes. hashlittle() is more complicated than hashword() only because\n+ *  hashlittle() has to dance around fitting the key bytes into registers.\n+ *\n+ *  Input Parameters:\n+ *\t key: an array of uint32_t values\n+ *\t length: the length of the key, in uint32_ts\n+ *\t initval: the previous hash, or an arbitrary value\n+ * --------------------------------------------------------------------\n+ */\n+static inline uint32_t hashword(const uint32_t *k,\n+\t\t\t\tsize_t length,\n+\t\t\t\tuint32_t initval) {\n+\tuint32_t a, b, c;\n+\tint index = 12;\n+\n+\t/* Set up the internal state */\n+\ta = 0xdeadbeef + (((uint32_t)length) << 2) + initval;\n+\tb = a;\n+\tc = a;\n+\n+\t/*-------------------------------------------- handle most of the key */\n+\twhile (length > 3) {\n+\t\ta += k[index];\n+\t\tb += k[index - 1];\n+\t\tc += k[index - 2];\n+\t\tmix(a, b, c);\n+\t\tlength -= 3;\n+\t\tindex -= 3;\n+\t}\n+\n+\t/*-------------------------------------- handle the last 3 uint32_t's */\n+\tswitch (length) {\t      /* all the case statements fall through */\n+\tcase 3:\n+\t\tc += k[index - 2];\n+\t\t/* Falls through. */\n+\tcase 2:\n+\t\tb += k[index - 1];\n+\t\t/* Falls through. */\n+\tcase 1:\n+\t\ta += k[index];\n+\t\tfinal(a, b, c);\n+\t\t/* Falls through. */\n+\tcase 0:\t    /* case 0: nothing left to add */\n+\t\t/* FALLTHROUGH */\n+\t\tbreak;\n+\t}\n+\t/*------------------------------------------------- report the result */\n+\treturn c;\n+}\n+\n+#endif /* _LOOKUP3_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/stack.c b/drivers/net/bnxt/tf_core/stack.c\nnew file mode 100644\nindex 0000000..3337073\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/stack.c\n@@ -0,0 +1,107 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <stdbool.h>\n+#include <stdint.h>\n+#include <errno.h>\n+#include \"stack.h\"\n+\n+#define STACK_EMPTY -1\n+\n+/* Initialize stack\n+ */\n+int\n+stack_init(int num_entries, uint32_t *items, struct stack *st)\n+{\n+\tif (items == NULL || st == NULL)\n+\t\treturn -EINVAL;\n+\n+\tst->max = num_entries;\n+\tst->top = STACK_EMPTY;\n+\tst->items = items;\n+\n+\treturn 0;\n+}\n+\n+/* Return the size of the stack\n+ */\n+int32_t\n+stack_size(struct stack *st)\n+{\n+\treturn st->top + 1;\n+}\n+\n+/* Check if the stack is empty\n+ */\n+bool\n+stack_is_empty(struct stack *st)\n+{\n+\treturn st->top == STACK_EMPTY;\n+}\n+\n+/* Check if the stack is full\n+ */\n+bool\n+stack_is_full(struct stack *st)\n+{\n+\treturn st->top == st->max - 1;\n+}\n+\n+/* Add  element x to  the stack\n+ */\n+int\n+stack_push(struct stack *st, uint32_t x)\n+{\n+\tif (stack_is_full(st))\n+\t\treturn -EOVERFLOW;\n+\n+\t/* add an element and increments the top index\n+\t */\n+\tst->items[++st->top] = x;\n+\n+\treturn 0;\n+}\n+\n+/* Pop top element x from the stack and return\n+ * in user provided location.\n+ */\n+int\n+stack_pop(struct stack *st, uint32_t *x)\n+{\n+\tif (stack_is_empty(st))\n+\t\treturn -ENODATA;\n+\n+\t*x = st->items[st->top];\n+\tst->top--;\n+\n+\treturn 0;\n+}\n+\n+/* Dump the stack\n+ */\n+void stack_dump(struct stack *st)\n+{\n+\tint i, j;\n+\n+\tprintf(\"top=%d\\n\", st->top);\n+\tprintf(\"max=%d\\n\", st->max);\n+\n+\tif (st->top == -1) {\n+\t\tprintf(\"stack is empty\\n\");\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < st->max + 7 / 8; i++) {\n+\t\tprintf(\"item[%d] 0x%08x\", i, st->items[i]);\n+\n+\t\tfor (j = 0; j < 7; j++) {\n+\t\t\tif (i++ < st->max - 1)\n+\t\t\t\tprintf(\" 0x%08x\", st->items[i]);\n+\t\t}\n+\t\tprintf(\"\\n\");\n+\t}\n+}\ndiff --git a/drivers/net/bnxt/tf_core/stack.h b/drivers/net/bnxt/tf_core/stack.h\nnew file mode 100644\nindex 0000000..6fe8829\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/stack.h\n@@ -0,0 +1,107 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+#ifndef _STACK_H_\n+#define _STACK_H_\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <stdbool.h>\n+#include <stdint.h>\n+\n+/** Stack data structure\n+ */\n+struct stack {\n+\tint max;         /**< Maximum number of entries */\n+\tint top;         /**< maximum value in stack */\n+\tuint32_t *items; /**< items in the stack */\n+};\n+\n+/** Initialize stack of uint32_t elements\n+ *\n+ *  [in] num_entries\n+ *    maximum number of elemnts in the stack\n+ *\n+ *  [in] items\n+ *    pointer to items (must be sized to (uint32_t * num_entries)\n+ *\n+ *  s[in] st\n+ *    pointer to the stack structure\n+ *\n+ *  return\n+ *    0 for success\n+ */\n+int stack_init(int num_entries,\n+\t       uint32_t *items,\n+\t       struct stack *st);\n+\n+/** Return the size of the stack\n+ *\n+ *  [in] st\n+ *    pointer to the stack\n+ *\n+ *  return\n+ *    number of elements\n+ */\n+int32_t stack_size(struct stack *st);\n+\n+/** Check if the stack is empty\n+ *\n+ * [in] st\n+ *   pointer to the stack\n+ *\n+ * return\n+ *   true or false\n+ */\n+bool stack_is_empty(struct stack *st);\n+\n+/** Check if the stack is full\n+ *\n+ * [in] st\n+ *   pointer to the stack\n+ *\n+ * return\n+ *   true or false\n+ */\n+bool stack_is_full(struct stack *st);\n+\n+/** Add  element x to  the stack\n+ *\n+ * [in] st\n+ *   pointer to the stack\n+ *\n+ * [in] x\n+ *   value to push on the stack\n+ * return\n+ *  0 for success\n+ */\n+int stack_push(struct stack *st, uint32_t x);\n+\n+/** Pop top element x from the stack and return\n+ * in user provided location.\n+ *\n+ * [in] st\n+ *   pointer to the stack\n+ *\n+ * [in, out] x\n+ *  pointer to where the value popped will be written\n+ *\n+ * return\n+ *  0 for success\n+ */\n+int stack_pop(struct stack *st, uint32_t *x);\n+\n+/** Dump stack information\n+ *\n+ * Warning: Don't use for large stacks due to prints\n+ *\n+ * [in] st\n+ *   pointer to the stack\n+ *\n+ * return\n+ *    none\n+ */\n+void stack_dump(struct stack *st);\n+\n+#endif /* _STACK_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c\nindex f04a9b1..fc7d638 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.c\n+++ b/drivers/net/bnxt/tf_core/tf_core.c\n@@ -8,6 +8,7 @@\n #include \"tf_core.h\"\n #include \"tf_session.h\"\n #include \"tf_tbl.h\"\n+#include \"tf_em.h\"\n #include \"tf_rm.h\"\n #include \"tf_msg.h\"\n #include \"tfp.h\"\n@@ -289,6 +290,55 @@ tf_close_session(struct tf *tfp)\n \treturn rc_close;\n }\n \n+/** insert EM hash entry API\n+ *\n+ *    returns:\n+ *    0       - Success\n+ *    -EINVAL - Error\n+ */\n+int tf_insert_em_entry(struct tf *tfp,\n+\t\t       struct tf_insert_em_entry_parms *parms)\n+{\n+\tstruct tf_tbl_scope_cb     *tbl_scope_cb;\n+\n+\tif (tfp == NULL || parms == NULL)\n+\t\treturn -EINVAL;\n+\n+\ttbl_scope_cb =\n+\t\ttbl_scope_cb_find((struct tf_session *)tfp->session->core_data,\n+\t\t\t\t  parms->tbl_scope_id);\n+\tif (tbl_scope_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Process the EM entry per Table Scope type */\n+\treturn tf_insert_eem_entry((struct tf_session *)tfp->session->core_data,\n+\t\t\t\t   tbl_scope_cb,\n+\t\t\t\t   parms);\n+}\n+\n+/** Delete EM hash entry API\n+ *\n+ *    returns:\n+ *    0       - Success\n+ *    -EINVAL - Error\n+ */\n+int tf_delete_em_entry(struct tf *tfp,\n+\t\t       struct tf_delete_em_entry_parms *parms)\n+{\n+\tstruct tf_tbl_scope_cb     *tbl_scope_cb;\n+\n+\tif (tfp == NULL || parms == NULL)\n+\t\treturn -EINVAL;\n+\n+\ttbl_scope_cb =\n+\t\ttbl_scope_cb_find((struct tf_session *)tfp->session->core_data,\n+\t\t\t\t  parms->tbl_scope_id);\n+\tif (tbl_scope_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\treturn tf_delete_eem_entry(tfp, parms);\n+}\n+\n /** allocate identifier resource\n  *\n  * Returns success or failure code.\ndiff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h\nindex 4c90677..34e643c 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.h\n+++ b/drivers/net/bnxt/tf_core/tf_core.h\n@@ -21,6 +21,10 @@\n \n /********** BEGIN Truflow Core DEFINITIONS **********/\n \n+\n+#define TF_KILOBYTE  1024\n+#define TF_MEGABYTE  (1024 * 1024)\n+\n /**\n  * direction\n  */\n@@ -31,6 +35,27 @@ enum tf_dir {\n };\n \n /**\n+ * memory choice\n+ */\n+enum tf_mem {\n+\tTF_MEM_INTERNAL, /**< Internal */\n+\tTF_MEM_EXTERNAL, /**< External */\n+\tTF_MEM_MAX\n+};\n+\n+/**\n+ * The size of the external action record (Wh+/Brd2)\n+ *\n+ * Currently set to 512.\n+ *\n+ * AR (16B) + encap (256B) + stats_ptrs (8) + resvd (8)\n+ * + stats (16) = 304 aligned on a 16B boundary\n+ *\n+ * Theoretically, the size should be smaller. ~304B\n+ */\n+#define TF_ACTION_RECORD_SZ 512\n+\n+/**\n  * External pool size\n  *\n  * Defines a single pool of external action records of\n@@ -56,6 +81,23 @@ enum tf_dir {\n #define TF_EXT_POOL_0      0 /**< matches TF_TBL_TYPE_EXT   */\n #define TF_EXT_POOL_1      1 /**< matches TF_TBL_TYPE_EXT_0 */\n \n+/** EEM record AR helper\n+ *\n+ * Helpers to handle the Action Record Pointer in the EEM Record Entry.\n+ *\n+ * Convert absolute offset to action record pointer in EEM record entry\n+ * Convert action record pointer in EEM record entry to absolute offset\n+ */\n+#define TF_ACT_REC_OFFSET_2_PTR(offset) ((offset) >> 4)\n+#define TF_ACT_REC_PTR_2_OFFSET(offset) ((offset) << 4)\n+\n+#define TF_ACT_REC_INDEX_2_OFFSET(idx) ((idx) << 9)\n+\n+/*\n+ * Helper Macros\n+ */\n+#define TF_BITS_2_BYTES(num_bits) (((num_bits) + 7) / 8)\n+\n /********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/\n \n /**\n@@ -495,7 +537,7 @@ struct tf_alloc_tbl_scope_parms {\n \t */\n \tuint32_t rx_num_flows_in_k;\n \t/**\n-\t * [in] SR2 only receive table access interface id\n+\t * [in] Brd4 only receive table access interface id\n \t */\n \tuint32_t rx_tbl_if_id;\n \t/**\n@@ -517,7 +559,7 @@ struct tf_alloc_tbl_scope_parms {\n \t */\n \tuint32_t tx_num_flows_in_k;\n \t/**\n-\t * [in] SR2 only receive table access interface id\n+\t * [in] Brd4 only receive table access interface id\n \t */\n \tuint32_t tx_tbl_if_id;\n \t/**\n@@ -536,7 +578,7 @@ struct tf_free_tbl_scope_parms {\n /**\n  * allocate a table scope\n  *\n- * On SR2 Firmware will allocate a scope ID.  On other devices, the scope\n+ * On Brd4 Firmware will allocate a scope ID.  On other devices, the scope\n  * is a software construct to identify an EEM table.  This function will\n  * divide the hash memory/buckets and records according to the device\n  * device constraints based upon calculations using either the number of flows\n@@ -546,7 +588,7 @@ struct tf_free_tbl_scope_parms {\n  *\n  * This API will allocate the table region in\n  * DRAM, program the PTU page table entries, and program the number of static\n- * buckets (if SR2) in the RX and TX CFAs.  Buckets are assumed to start at\n+ * buckets (if Brd4) in the RX and TX CFAs.  Buckets are assumed to start at\n  * 0 in the EM memory for the scope.  Upon successful completion of this API,\n  * hash tables are fully initialized and ready for entries to be inserted.\n  *\n@@ -563,7 +605,7 @@ struct tf_free_tbl_scope_parms {\n  * memory allocated based on the rx_em_hash_mb/tx_em_hash_mb parameters.  The\n  * hash table buckets are stored at the beginning of that memory.\n  *\n- * NOTES:  No EM internal setup is done here. On chip EM records are managed\n+ * NOTE:  No EM internal setup is done here. On chip EM records are managed\n  * internally by TruFlow core.\n  *\n  * Returns success or failure code.\n@@ -577,7 +619,7 @@ int tf_alloc_tbl_scope(struct tf *tfp,\n  *\n  * Firmware checks that the table scope ID is owned by the TruFlow\n  * session, verifies that no references to this table scope remains\n- * (SR2 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,\n+ * (Brd4 ILT) or Profile TCAM entries for either CFA (RX/TX) direction,\n  * then frees the table scope ID.\n  *\n  * Returns success or failure code.\n@@ -905,4 +947,430 @@ enum tf_tbl_type {\n \tTF_TBL_TYPE_EXT_0,\n \tTF_TBL_TYPE_MAX\n };\n+\n+/** tf_alloc_tbl_entry parameter definition\n+ */\n+struct tf_alloc_tbl_entry_parms {\n+\t/**\n+\t * [in] Receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] Type of the allocation\n+\t */\n+\tenum tf_tbl_type type;\n+\t/**\n+\t * [in] Enable search for matching entry. If the table type is\n+\t * internal the shadow copy will be searched before\n+\t * alloc. Session must be configured with shadow copy enabled.\n+\t */\n+\tuint8_t search_enable;\n+\t/**\n+\t * [in] Result data to search for (if search_enable)\n+\t */\n+\tuint8_t *result;\n+\t/**\n+\t * [in] Result data size in bytes (if search_enable)\n+\t */\n+\tuint16_t result_sz_in_bytes;\n+\t/**\n+\t * [out] If search_enable, set if matching entry found\n+\t */\n+\tuint8_t hit;\n+\t/**\n+\t * [out] Current ref count after allocation (if search_enable)\n+\t */\n+\tuint16_t ref_cnt;\n+\t/**\n+\t * [out] Idx of allocated entry or found entry (if search_enable)\n+\t */\n+\tuint32_t idx;\n+};\n+\n+/** allocate index table entries\n+ *\n+ * Internal types:\n+ *\n+ * Allocate an on chip index table entry or search for a matching\n+ * entry of the indicated type for this TruFlow session.\n+ *\n+ * Allocates an index table record. This function will attempt to\n+ * allocate an entry or search an index table for a matching entry if\n+ * search is enabled (only the shadow copy of the table is accessed).\n+ *\n+ * If search is not enabled, the first available free entry is\n+ * returned. If search is enabled and a matching entry to entry_data\n+ * is found hit is set to TRUE and success is returned.\n+ *\n+ * External types:\n+ *\n+ * These are used to allocate inlined action record memory.\n+ *\n+ * Allocates an external index table action record.\n+ *\n+ * NOTE:\n+ * Implementation of the internals of this function will be a stack with push\n+ * and pop.\n+ *\n+ * Returns success or failure code.\n+ */\n+int tf_alloc_tbl_entry(struct tf *tfp,\n+\t\t       struct tf_alloc_tbl_entry_parms *parms);\n+\n+/** tf_free_tbl_entry parameter definition\n+ */\n+struct tf_free_tbl_entry_parms {\n+\t/**\n+\t * [in] Receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] Type of the allocation type\n+\t */\n+\tenum tf_tbl_type type;\n+\t/**\n+\t * [in] Index to free\n+\t */\n+\tuint32_t idx;\n+\t/**\n+\t * [out] Reference count after free, only valid if session has been\n+\t * created with shadow_copy.\n+\t */\n+\tuint16_t ref_cnt;\n+};\n+\n+/** free index table entry\n+ *\n+ * Used to free a previously allocated table entry.\n+ *\n+ * Internal types:\n+ *\n+ * If session has shadow_copy enabled the shadow DB is searched and if\n+ * found the element ref_cnt is decremented. If ref_cnt goes to\n+ * zero then the element is returned to the session pool.\n+ *\n+ * If the session does not have a shadow DB the element is free'ed and\n+ * given back to the session pool.\n+ *\n+ * External types:\n+ *\n+ * Free's an external index table action record.\n+ *\n+ * NOTE:\n+ * Implementation of the internals of this function will be a stack with push\n+ * and pop.\n+ *\n+ * Returns success or failure code.\n+ */\n+int tf_free_tbl_entry(struct tf *tfp,\n+\t\t      struct tf_free_tbl_entry_parms *parms);\n+\n+/** tf_set_tbl_entry parameter definition\n+ */\n+struct tf_set_tbl_entry_parms {\n+\t/**\n+\t * [in] Table scope identifier\n+\t *\n+\t */\n+\tuint32_t tbl_scope_id;\n+\t/**\n+\t * [in] Receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] Type of object to set\n+\t */\n+\tenum tf_tbl_type type;\n+\t/**\n+\t * [in] Entry data\n+\t */\n+\tuint8_t *data;\n+\t/**\n+\t * [in] Entry size\n+\t */\n+\tuint16_t data_sz_in_bytes;\n+\t/**\n+\t * [in] Entry index to write to\n+\t */\n+\tuint32_t idx;\n+};\n+\n+/** set index table entry\n+ *\n+ * Used to insert an application programmed index table entry into a\n+ * previous allocated table location.  A shadow copy of the table\n+ * is maintained (if enabled) (only for internal objects)\n+ *\n+ * Returns success or failure code.\n+ */\n+int tf_set_tbl_entry(struct tf *tfp,\n+\t\t     struct tf_set_tbl_entry_parms *parms);\n+\n+/** tf_get_tbl_entry parameter definition\n+ */\n+struct tf_get_tbl_entry_parms {\n+\t/**\n+\t * [in] Receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] Type of object to get\n+\t */\n+\tenum tf_tbl_type type;\n+\t/**\n+\t * [out] Entry data\n+\t */\n+\tuint8_t *data;\n+\t/**\n+\t * [out] Entry size\n+\t */\n+\tuint16_t data_sz_in_bytes;\n+\t/**\n+\t * [in] Entry index to read\n+\t */\n+\tuint32_t idx;\n+};\n+\n+/** get index table entry\n+ *\n+ * Used to retrieve a previous set index table entry.\n+ *\n+ * Reads and compares with the shadow table copy (if enabled) (only\n+ * for internal objects).\n+ *\n+ * Returns success or failure code. Failure will be returned if the\n+ * provided data buffer is too small for the data type requested.\n+ */\n+int tf_get_tbl_entry(struct tf *tfp,\n+\t\t     struct tf_get_tbl_entry_parms *parms);\n+\n+/**\n+ * @page exact_match Exact Match Table\n+ *\n+ * @ref tf_insert_em_entry\n+ *\n+ * @ref tf_delete_em_entry\n+ *\n+ * @ref tf_search_em_entry\n+ *\n+ */\n+/** tf_insert_em_entry parameter definition\n+ */\n+struct tf_insert_em_entry_parms {\n+\t/**\n+\t * [in] receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] internal or external\n+\t */\n+\tenum tf_mem mem;\n+\t/**\n+\t * [in] ID of table scope to use (external only)\n+\t */\n+\tuint32_t tbl_scope_id;\n+\t/**\n+\t * [in] ID of table interface to use (Brd4 only)\n+\t */\n+\tuint32_t tbl_if_id;\n+\t/**\n+\t * [in] ptr to structure containing key fields\n+\t */\n+\tuint8_t *key;\n+\t/**\n+\t * [in] key bit length\n+\t */\n+\tuint16_t key_sz_in_bits;\n+\t/**\n+\t * [in] ptr to structure containing result field\n+\t */\n+\tuint8_t *em_record;\n+\t/**\n+\t * [out] result size in bits\n+\t */\n+\tuint16_t em_record_sz_in_bits;\n+\t/**\n+\t * [in] duplicate check flag\n+\t */\n+\tuint8_t\tdup_check;\n+\t/**\n+\t * [out] Flow handle value for the inserted entry.  This is encoded\n+\t * as the entries[4]:bucket[2]:hashId[1]:hash[14]\n+\t */\n+\tuint64_t flow_handle;\n+\t/**\n+\t * [out] Flow id is returned as null (internal)\n+\t * Flow id is the GFID value for the inserted entry (external)\n+\t * This is the value written to the BD and useful information for mark.\n+\t */\n+\tuint64_t flow_id;\n+};\n+/**\n+ * tf_delete_em_entry parameter definition\n+ */\n+struct tf_delete_em_entry_parms {\n+\t/**\n+\t * [in] receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] internal or external\n+\t */\n+\tenum tf_mem mem;\n+\t/**\n+\t * [in] ID of table scope to use (external only)\n+\t */\n+\tuint32_t tbl_scope_id;\n+\t/**\n+\t * [in] ID of table interface to use (Brd4 only)\n+\t */\n+\tuint32_t tbl_if_id;\n+\t/**\n+\t * [in] epoch group IDs of entry to delete\n+\t * 2 element array with 2 ids. (Brd4 only)\n+\t */\n+\tuint16_t *epochs;\n+\t/**\n+\t * [in] structure containing flow delete handle information\n+\t */\n+\tuint64_t flow_handle;\n+};\n+/**\n+ * tf_search_em_entry parameter definition\n+ */\n+struct tf_search_em_entry_parms {\n+\t/**\n+\t * [in] receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] internal or external\n+\t */\n+\tenum tf_mem mem;\n+\t/**\n+\t * [in] ID of table scope to use (external only)\n+\t */\n+\tuint32_t tbl_scope_id;\n+\t/**\n+\t * [in] ID of table interface to use (Brd4 only)\n+\t */\n+\tuint32_t tbl_if_id;\n+\t/**\n+\t * [in] ptr to structure containing key fields\n+\t */\n+\tuint8_t *key;\n+\t/**\n+\t * [in] key bit length\n+\t */\n+\tuint16_t key_sz_in_bits;\n+\t/**\n+\t * [in/out] ptr to structure containing EM record fields\n+\t */\n+\tuint8_t *em_record;\n+\t/**\n+\t * [out] result size in bits\n+\t */\n+\tuint16_t em_record_sz_in_bits;\n+\t/**\n+\t * [in] epoch group IDs of entry to lookup\n+\t * 2 element array with 2 ids. (Brd4 only)\n+\t */\n+\tuint16_t *epochs;\n+\t/**\n+\t * [in] ptr to structure containing flow delete handle\n+\t */\n+\tuint64_t flow_handle;\n+};\n+\n+/** insert em hash entry in internal table memory\n+ *\n+ * Internal:\n+ *\n+ * This API inserts an exact match entry into internal EM table memory\n+ * of the specified direction.\n+ *\n+ * Note: The EM record is managed within the TruFlow core and not the\n+ * application.\n+ *\n+ * Shadow copy of internal record table an association with hash and 1,2, or 4\n+ * associated buckets\n+ *\n+ * External:\n+ * This API inserts an exact match entry into DRAM EM table memory of the\n+ * specified direction and table scope.\n+ *\n+ * When inserting an entry into an exact match table, the TruFlow library may\n+ * need to allocate a dynamic bucket for the entry (Brd4 only).\n+ *\n+ * The insertion of duplicate entries in an EM table is not permitted.\tIf a\n+ * TruFlow application can guarantee that it will never insert duplicates, it\n+ * can disable duplicate checking by passing a zero value in the  dup_check\n+ * parameter to this API.  This will optimize performance. Otherwise, the\n+ * TruFlow library will enforce protection against inserting duplicate entries.\n+ *\n+ * Flow handle is defined in this document:\n+ *\n+ * https://docs.google.com\n+ * /document/d/1NESu7RpTN3jwxbokaPfYORQyChYRmJgs40wMIRe8_-Q/edit\n+ *\n+ * Returns success or busy code.\n+ *\n+ */\n+int tf_insert_em_entry(struct tf *tfp,\n+\t\t       struct tf_insert_em_entry_parms *parms);\n+\n+/** delete em hash entry table memory\n+ *\n+ * Internal:\n+ *\n+ * This API deletes an exact match entry from internal EM table memory of the\n+ * specified direction. If a valid flow ptr is passed in then that takes\n+ * precedence over the pointer to the complete key passed in.\n+ *\n+ *\n+ * External:\n+ *\n+ * This API deletes an exact match entry from EM table memory of the specified\n+ * direction and table scope. If a valid flow handle is passed in then that\n+ * takes precedence over the pointer to the complete key passed in.\n+ *\n+ * The TruFlow library may release a dynamic bucket when an entry is deleted.\n+ *\n+ *\n+ * Returns success or not found code\n+ *\n+ *\n+ */\n+int tf_delete_em_entry(struct tf *tfp,\n+\t\t       struct tf_delete_em_entry_parms *parms);\n+\n+/** search em hash entry table memory\n+ *\n+ * Internal:\n+\n+ * This API looks up an EM entry in table memory with the specified EM\n+ * key or flow (flow takes precedence) and direction.\n+ *\n+ * The status will be one of: success or entry not found.  If the lookup\n+ * succeeds, a pointer to the matching entry and the result record associated\n+ * with the matching entry will be provided.\n+ *\n+ * If flow_handle is set, search shadow copy.\n+ *\n+ * Otherwise, query the fw with key to get result.\n+ *\n+ * External:\n+ *\n+ * This API looks up an EM entry in table memory with the specified EM\n+ * key or flow_handle (flow takes precedence), direction and table scope.\n+ *\n+ * The status will be one of: success or entry not found.  If the lookup\n+ * succeeds, a pointer to the matching entry and the result record associated\n+ * with the matching entry will be provided.\n+ *\n+ * Returns success or not found code\n+ *\n+ */\n+int tf_search_em_entry(struct tf *tfp,\n+\t\t       struct tf_search_em_entry_parms *parms);\n #endif /* _TF_CORE_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_em.c b/drivers/net/bnxt/tf_core/tf_em.c\nnew file mode 100644\nindex 0000000..bd8e2ba\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_em.c\n@@ -0,0 +1,515 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#include <string.h>\n+#include <rte_common.h>\n+#include <rte_errno.h>\n+#include <rte_log.h>\n+\n+#include \"tf_core.h\"\n+#include \"tf_em.h\"\n+#include \"tf_msg.h\"\n+#include \"tfp.h\"\n+#include \"lookup3.h\"\n+#include \"tf_ext_flow_handle.h\"\n+\n+#include \"bnxt.h\"\n+\n+/* Enable EEM table dump\n+ */\n+#define TF_EEM_DUMP\n+\n+static struct tf_eem_64b_entry zero_key_entry;\n+\n+static uint32_t tf_em_get_key_mask(int num_entries)\n+{\n+\tuint32_t mask = num_entries - 1;\n+\n+\tif (num_entries & 0x7FFF)\n+\t\treturn 0;\n+\n+\tif (num_entries > (128 * 1024 * 1024))\n+\t\treturn 0;\n+\n+\treturn mask;\n+}\n+\n+/* CRC32i support for Key0 hash */\n+#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))\n+#define crc32(x, y) crc32i(~0, x, y)\n+\n+static const uint32_t crc32tbl[] = {\t/* CRC polynomial 0xedb88320 */\n+0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,\n+0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,\n+0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,\n+0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,\n+0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,\n+0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,\n+0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,\n+0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,\n+0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,\n+0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,\n+0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,\n+0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,\n+0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,\n+0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,\n+0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,\n+0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,\n+0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,\n+0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,\n+0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,\n+0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,\n+0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,\n+0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,\n+0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,\n+0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,\n+0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,\n+0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,\n+0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,\n+0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,\n+0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,\n+0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,\n+0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,\n+0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,\n+0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,\n+0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,\n+0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,\n+0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,\n+0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,\n+0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,\n+0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,\n+0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,\n+0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,\n+0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,\n+0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,\n+0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,\n+0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,\n+0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,\n+0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,\n+0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,\n+0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,\n+0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,\n+0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,\n+0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,\n+0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,\n+0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,\n+0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,\n+0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,\n+0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,\n+0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,\n+0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,\n+0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,\n+0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,\n+0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,\n+0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,\n+0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d\n+};\n+\n+static uint32_t crc32i(uint32_t crc, const uint8_t *buf, size_t len)\n+{\n+\tint l;\n+\n+\tfor (l = (len - 1); l >= 0; l--)\n+\t\tcrc = ucrc32(buf[l], crc);\n+\n+\treturn ~crc;\n+}\n+\n+static uint32_t tf_em_lkup_get_crc32_hash(struct tf_session *session,\n+\t\t\t\t\t  uint8_t *key,\n+\t\t\t\t\t  enum tf_dir dir)\n+{\n+\tint i;\n+\tuint32_t index;\n+\tuint32_t val1, val2;\n+\tuint8_t temp[4];\n+\tuint8_t *kptr = key;\n+\n+\t/* Do byte-wise XOR of the 52-byte HASH key first. */\n+\tindex = *key;\n+\tkptr--;\n+\n+\tfor (i = TF_HW_EM_KEY_MAX_SIZE - 2; i >= 0; i--) {\n+\t\tindex = index ^ *kptr;\n+\t\tkptr--;\n+\t}\n+\n+\t/* Get seeds */\n+\tval1 = session->lkup_em_seed_mem[dir][index * 2];\n+\tval2 = session->lkup_em_seed_mem[dir][index * 2 + 1];\n+\n+\ttemp[3] = (uint8_t)(val1 >> 24);\n+\ttemp[2] = (uint8_t)(val1 >> 16);\n+\ttemp[1] = (uint8_t)(val1 >> 8);\n+\ttemp[0] = (uint8_t)(val1 & 0xff);\n+\tval1 = 0;\n+\n+\t/* Start with seed */\n+\tif (!(val2 & 0x1))\n+\t\tval1 = crc32i(~val1, temp, 4);\n+\n+\tval1 = crc32i(~val1,\n+\t\t      (key - (TF_HW_EM_KEY_MAX_SIZE - 1)),\n+\t\t      TF_HW_EM_KEY_MAX_SIZE);\n+\n+\t/* End with seed */\n+\tif (val2 & 0x1)\n+\t\tval1 = crc32i(~val1, temp, 4);\n+\n+\treturn val1;\n+}\n+\n+static uint32_t tf_em_lkup_get_lookup3_hash(uint32_t lookup3_init_value,\n+\t\t\t\t\t    uint8_t *in_key)\n+{\n+\tuint32_t val1;\n+\n+\tval1 = hashword(((uint32_t *)in_key) + 1,\n+\t\t\t TF_HW_EM_KEY_MAX_SIZE / (sizeof(uint32_t)),\n+\t\t\t lookup3_init_value);\n+\n+\treturn val1;\n+}\n+\n+void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t   enum tf_dir dir,\n+\t\t\t   uint32_t offset,\n+\t\t\t   enum tf_em_table_type table_type)\n+{\n+\tint level = 0;\n+\tint page = offset / TF_EM_PAGE_SIZE;\n+\tvoid *addr = NULL;\n+\tstruct tf_em_ctx_mem_info *ctx = &tbl_scope_cb->em_ctx_info[dir];\n+\n+\tif (ctx == NULL)\n+\t\treturn NULL;\n+\n+\tif (dir != TF_DIR_RX && dir != TF_DIR_TX)\n+\t\treturn NULL;\n+\n+\tif (table_type < KEY0_TABLE || table_type > EFC_TABLE)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * Use the level according to the num_level of page table\n+\t */\n+\tlevel = ctx->em_tables[table_type].num_lvl - 1;\n+\n+\taddr = (void *)ctx->em_tables[table_type].pg_tbl[level].pg_va_tbl[page];\n+\n+\treturn addr;\n+}\n+\n+/** Read Key table entry\n+ *\n+ * Entry is read in to entry\n+ */\n+static int tf_em_read_entry(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t\t struct tf_eem_64b_entry *entry,\n+\t\t\t\t uint32_t entry_size,\n+\t\t\t\t uint32_t index,\n+\t\t\t\t enum tf_em_table_type table_type,\n+\t\t\t\t enum tf_dir dir)\n+{\n+\tvoid *page;\n+\tuint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE;\n+\n+\tpage = tf_em_get_table_page(tbl_scope_cb,\n+\t\t\t\t    dir,\n+\t\t\t\t    (index * entry_size),\n+\t\t\t\t    table_type);\n+\n+\tif (page == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemcpy((uint8_t *)entry, (uint8_t *)page + entry_offset, entry_size);\n+\treturn 0;\n+}\n+\n+static int tf_em_write_entry(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t\t struct tf_eem_64b_entry *entry,\n+\t\t\t\t uint32_t entry_size,\n+\t\t\t\t uint32_t index,\n+\t\t\t\t enum tf_em_table_type table_type,\n+\t\t\t\t enum tf_dir dir)\n+{\n+\tvoid *page;\n+\tuint32_t entry_offset = (index * entry_size) % TF_EM_PAGE_SIZE;\n+\n+\tpage = tf_em_get_table_page(tbl_scope_cb,\n+\t\t\t\t    dir,\n+\t\t\t\t    (index * entry_size),\n+\t\t\t\t    table_type);\n+\n+\tif (page == NULL)\n+\t\treturn -EINVAL;\n+\n+\tmemcpy((uint8_t *)page + entry_offset, entry, entry_size);\n+\n+\treturn 0;\n+}\n+\n+static int tf_em_entry_exists(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t       struct tf_eem_64b_entry *entry,\n+\t\t\t       uint32_t index,\n+\t\t\t       enum tf_em_table_type table_type,\n+\t\t\t       enum tf_dir dir)\n+{\n+\tint rc;\n+\tstruct tf_eem_64b_entry table_entry;\n+\n+\trc = tf_em_read_entry(tbl_scope_cb,\n+\t\t\t      &table_entry,\n+\t\t\t      TF_EM_KEY_RECORD_SIZE,\n+\t\t\t      index,\n+\t\t\t      table_type,\n+\t\t\t      dir);\n+\n+\tif (rc != 0)\n+\t\treturn -EINVAL;\n+\n+\tif (table_entry.hdr.word1 & (1 << TF_LKUP_RECORD_VALID_SHIFT)) {\n+\t\tif (entry != NULL) {\n+\t\t\tif (memcmp(&table_entry,\n+\t\t\t\t   entry,\n+\t\t\t\t   TF_EM_KEY_RECORD_SIZE) == 0)\n+\t\t\t\treturn -EEXIST;\n+\t\t} else {\n+\t\t\treturn -EEXIST;\n+\t\t}\n+\n+\t\treturn -EBUSY;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void tf_em_create_key_entry(struct tf_eem_entry_hdr *result,\n+\t\t\t\t    uint8_t\t       *in_key,\n+\t\t\t\t    struct tf_eem_64b_entry *key_entry)\n+{\n+\tkey_entry->hdr.word1 = result->word1;\n+\n+\tif (result->word1 & TF_LKUP_RECORD_ACT_REC_INT_MASK)\n+\t\tkey_entry->hdr.pointer = result->pointer;\n+\telse\n+\t\tkey_entry->hdr.pointer = result->pointer;\n+\n+\tmemcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);\n+}\n+\n+/* tf_em_select_inject_table\n+ *\n+ * Returns:\n+ * 0 - Key does not exist in either table and can be inserted\n+ *\t\tat \"index\" in table \"table\".\n+ * EEXIST  - Key does exist in table at \"index\" in table \"table\".\n+ * TF_ERR     - Something went horribly wrong.\n+ */\n+static int tf_em_select_inject_table(struct tf_tbl_scope_cb\t*tbl_scope_cb,\n+\t\t\t\t\t  enum tf_dir dir,\n+\t\t\t\t\t  struct tf_eem_64b_entry *entry,\n+\t\t\t\t\t  uint32_t key0_hash,\n+\t\t\t\t\t  uint32_t key1_hash,\n+\t\t\t\t\t  uint32_t *index,\n+\t\t\t\t\t  enum tf_em_table_type *table)\n+{\n+\tint key0_entry;\n+\tint key1_entry;\n+\n+\t/*\n+\t * Check KEY0 table.\n+\t */\n+\tkey0_entry = tf_em_entry_exists(tbl_scope_cb,\n+\t\t\t\t\t entry,\n+\t\t\t\t\t key0_hash,\n+\t\t\t\t\t KEY0_TABLE,\n+\t\t\t\t\t dir);\n+\n+\t/*\n+\t * Check KEY1 table.\n+\t */\n+\tkey1_entry = tf_em_entry_exists(tbl_scope_cb,\n+\t\t\t\t\t entry,\n+\t\t\t\t\t key1_hash,\n+\t\t\t\t\t KEY1_TABLE,\n+\t\t\t\t\t dir);\n+\n+\tif (key0_entry == -EEXIST) {\n+\t\t*table = KEY0_TABLE;\n+\t\t*index = key0_hash;\n+\t\treturn -EEXIST;\n+\t} else if (key1_entry == -EEXIST) {\n+\t\t*table = KEY1_TABLE;\n+\t\t*index = key1_hash;\n+\t\treturn -EEXIST;\n+\t} else if (key0_entry == 0) {\n+\t\t*table = KEY0_TABLE;\n+\t\t*index = key0_hash;\n+\t\treturn 0;\n+\t} else if (key1_entry == 0) {\n+\t\t*table = KEY1_TABLE;\n+\t\t*index = key1_hash;\n+\t\treturn 0;\n+\t}\n+\n+\treturn -EINVAL;\n+}\n+\n+/** insert EEM entry API\n+ *\n+ * returns:\n+ *  0\n+ *  TF_ERR\t    - unable to get lock\n+ *\n+ * insert callback returns:\n+ *   0\n+ *   TF_ERR_EM_DUP  - key is already in table\n+ */\n+int tf_insert_eem_entry(struct tf_session\t   *session,\n+\t\t\tstruct tf_tbl_scope_cb\t   *tbl_scope_cb,\n+\t\t\tstruct tf_insert_em_entry_parms *parms)\n+{\n+\tuint32_t\t   mask;\n+\tuint32_t\t   key0_hash;\n+\tuint32_t\t   key1_hash;\n+\tuint32_t\t   key0_index;\n+\tuint32_t\t   key1_index;\n+\tstruct tf_eem_64b_entry key_entry;\n+\tuint32_t\t   index;\n+\tenum tf_em_table_type table_type;\n+\tuint32_t\t   gfid;\n+\tint\t\t   num_of_entry;\n+\n+\t/* Get mask to use on hash */\n+\tmask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[KEY0_TABLE].num_entries);\n+\n+\tif (!mask)\n+\t\treturn -EINVAL;\n+\n+\tnum_of_entry = TF_HW_EM_KEY_MAX_SIZE + 4;\n+\n+\tkey0_hash = tf_em_lkup_get_crc32_hash(session,\n+\t\t\t\t      &parms->key[num_of_entry] - 1,\n+\t\t\t\t      parms->dir);\n+\tkey0_index = key0_hash & mask;\n+\n+\tkey1_hash =\n+\t   tf_em_lkup_get_lookup3_hash(session->lkup_lkup3_init_cfg[parms->dir],\n+\t\t\t\t\tparms->key);\n+\tkey1_index = key1_hash & mask;\n+\n+\t/*\n+\t * Use the \"result\" arg to populate all of the key entry then\n+\t * store the byte swapped \"raw\" entry in a local copy ready\n+\t * for insertion in to the table.\n+\t */\n+\ttf_em_create_key_entry((struct tf_eem_entry_hdr *)parms->em_record,\n+\t\t\t\t((uint8_t *)parms->key),\n+\t\t\t\t&key_entry);\n+\n+\t/*\n+\t * Find which table to use\n+\t */\n+\tif (tf_em_select_inject_table(tbl_scope_cb,\n+\t\t\t\t      parms->dir,\n+\t\t\t\t      &key_entry,\n+\t\t\t\t      key0_index,\n+\t\t\t\t      key1_index,\n+\t\t\t\t      &index,\n+\t\t\t\t      &table_type) == 0) {\n+\t\tif (table_type == KEY0_TABLE) {\n+\t\t\tTF_SET_GFID(gfid,\n+\t\t\t\t    key0_index,\n+\t\t\t\t    KEY0_TABLE);\n+\t\t} else {\n+\t\t\tTF_SET_GFID(gfid,\n+\t\t\t\t    key1_index,\n+\t\t\t\t    KEY1_TABLE);\n+\t\t}\n+\n+\t\t/*\n+\t\t * Inject\n+\t\t */\n+\t\tif (tf_em_write_entry(tbl_scope_cb,\n+\t\t\t\t      &key_entry,\n+\t\t\t\t      TF_EM_KEY_RECORD_SIZE,\n+\t\t\t\t      index,\n+\t\t\t\t      table_type,\n+\t\t\t\t      parms->dir) == 0) {\n+\t\t\tTF_SET_FLOW_ID(parms->flow_id,\n+\t\t\t\t       gfid,\n+\t\t\t\t       TF_GFID_TABLE_EXTERNAL,\n+\t\t\t\t       parms->dir);\n+\t\t\tTF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,\n+\t\t\t\t\t\t     0,\n+\t\t\t\t\t\t     0,\n+\t\t\t\t\t\t     0,\n+\t\t\t\t\t\t     index,\n+\t\t\t\t\t\t     0,\n+\t\t\t\t\t\t     table_type);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\treturn -EINVAL;\n+}\n+\n+/** delete EEM hash entry API\n+ *\n+ * returns:\n+ *   0\n+ *   -EINVAL\t  - parameter error\n+ *   TF_NO_SESSION    - bad session ID\n+ *   TF_ERR_TBL_SCOPE - invalid table scope\n+ *   TF_ERR_TBL_IF    - invalid table interface\n+ *\n+ * insert callback returns\n+ *   0\n+ *   TF_NO_EM_MATCH - entry not found\n+ */\n+int tf_delete_eem_entry(struct tf *tfp,\n+\t\t\tstruct tf_delete_em_entry_parms *parms)\n+{\n+\tstruct tf_session\t   *session;\n+\tstruct tf_tbl_scope_cb\t   *tbl_scope_cb;\n+\tenum tf_em_table_type hash_type;\n+\tuint32_t index;\n+\n+\tif (parms == NULL)\n+\t\treturn -EINVAL;\n+\n+\tsession = (struct tf_session *)tfp->session->core_data;\n+\tif (session == NULL)\n+\t\treturn -EINVAL;\n+\n+\ttbl_scope_cb = tbl_scope_cb_find(session,\n+\t\t\t\t\t parms->tbl_scope_id);\n+\tif (tbl_scope_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (parms->flow_handle == 0)\n+\t\treturn -EINVAL;\n+\n+\tTF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);\n+\tTF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);\n+\n+\tif (tf_em_entry_exists(tbl_scope_cb,\n+\t\t\t       NULL,\n+\t\t\t       index,\n+\t\t\t       hash_type,\n+\t\t\t       parms->dir) == -EEXIST) {\n+\t\ttf_em_write_entry(tbl_scope_cb,\n+\t\t\t\t  &zero_key_entry,\n+\t\t\t\t  TF_EM_KEY_RECORD_SIZE,\n+\t\t\t\t  index,\n+\t\t\t\t  hash_type,\n+\t\t\t\t  parms->dir);\n+\n+\t\treturn 0;\n+\t}\n+\n+\treturn -EINVAL;\n+}\ndiff --git a/drivers/net/bnxt/tf_core/tf_em.h b/drivers/net/bnxt/tf_core/tf_em.h\nnew file mode 100644\nindex 0000000..8a3584f\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_em.h\n@@ -0,0 +1,117 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _TF_EM_H_\n+#define _TF_EM_H_\n+\n+#include \"tf_core.h\"\n+#include \"tf_session.h\"\n+\n+#define TF_HW_EM_KEY_MAX_SIZE 52\n+#define TF_EM_KEY_RECORD_SIZE 64\n+\n+/** EEM Entry header\n+ *\n+ */\n+struct tf_eem_entry_hdr {\n+\tuint32_t pointer;\n+\tuint32_t word1;  /*\n+\t\t\t  * The header is made up of two words,\n+\t\t\t  * this is the first word. This field has multiple\n+\t\t\t  * subfields, there is no suitable single name for\n+\t\t\t  * it so just going with word1.\n+\t\t\t  */\n+#define TF_LKUP_RECORD_VALID_SHIFT 31\n+#define TF_LKUP_RECORD_VALID_MASK 0x80000000\n+#define TF_LKUP_RECORD_L1_CACHEABLE_SHIFT 30\n+#define TF_LKUP_RECORD_L1_CACHEABLE_MASK 0x40000000\n+#define TF_LKUP_RECORD_STRENGTH_SHIFT 28\n+#define TF_LKUP_RECORD_STRENGTH_MASK 0x30000000\n+#define TF_LKUP_RECORD_RESERVED_SHIFT 17\n+#define TF_LKUP_RECORD_RESERVED_MASK 0x0FFE0000\n+#define TF_LKUP_RECORD_KEY_SIZE_SHIFT 8\n+#define TF_LKUP_RECORD_KEY_SIZE_MASK 0x0001FF00\n+#define TF_LKUP_RECORD_ACT_REC_SIZE_SHIFT 3\n+#define TF_LKUP_RECORD_ACT_REC_SIZE_MASK 0x000000F8\n+#define TF_LKUP_RECORD_ACT_REC_INT_SHIFT 2\n+#define TF_LKUP_RECORD_ACT_REC_INT_MASK 0x00000004\n+#define TF_LKUP_RECORD_EXT_FLOW_CTR_SHIFT 1\n+#define TF_LKUP_RECORD_EXT_FLOW_CTR_MASK 0x00000002\n+#define TF_LKUP_RECORD_ACT_PTR_MSB_SHIFT 0\n+#define TF_LKUP_RECORD_ACT_PTR_MSB_MASK 0x00000001\n+};\n+\n+/** EEM Entry\n+ *  Each EEM entry is 512-bit (64-bytes)\n+ */\n+struct tf_eem_64b_entry {\n+\t/** Key is 448 bits - 56 bytes */\n+\tuint8_t key[TF_EM_KEY_RECORD_SIZE - sizeof(struct tf_eem_entry_hdr)];\n+\t/** Header is 8 bytes long */\n+\tstruct tf_eem_entry_hdr hdr;\n+};\n+\n+/**\n+ * Allocates EEM Table scope\n+ *\n+ * [in] tfp\n+ *   Pointer to TruFlow handle\n+ *\n+ * [in] parms\n+ *   Pointer to input parameters\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -EINVAL - Parameter error\n+ *   -ENOMEM - Out of memory\n+ */\n+int tf_alloc_eem_tbl_scope(struct tf *tfp,\n+\t\t\t   struct tf_alloc_tbl_scope_parms *parms);\n+\n+/**\n+ * Free's EEM Table scope control block\n+ *\n+ * [in] tfp\n+ *   Pointer to TruFlow handle\n+ *\n+ * [in] parms\n+ *   Pointer to input parameters\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -EINVAL - Parameter error\n+ */\n+int tf_free_eem_tbl_scope_cb(struct tf *tfp,\n+\t\t\t     struct tf_free_tbl_scope_parms *parms);\n+\n+/**\n+ * Function to search for table scope control block structure\n+ * with specified table scope ID.\n+ *\n+ * [in] session\n+ *   Session to use for the search of the table scope control block\n+ * [in] tbl_scope_id\n+ *   Table scope ID to search for\n+ *\n+ * Returns:\n+ *  Pointer to the found table scope control block struct or NULL if\n+ *  table scope control block struct not found\n+ */\n+struct tf_tbl_scope_cb *tbl_scope_cb_find(struct tf_session *session,\n+\t\t\t\t\t  uint32_t tbl_scope_id);\n+\n+int tf_insert_eem_entry(struct tf_session *session,\n+\t\t\tstruct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\tstruct tf_insert_em_entry_parms *parms);\n+\n+int tf_delete_eem_entry(struct tf *tfp,\n+\t\t\tstruct tf_delete_em_entry_parms *parms);\n+\n+void *tf_em_get_table_page(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t   enum tf_dir dir,\n+\t\t\t   uint32_t offset,\n+\t\t\t   enum tf_em_table_type table_type);\n+\n+#endif /* _TF_EM_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h b/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h\nnew file mode 100644\nindex 0000000..417a99c\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_ext_flow_handle.h\n@@ -0,0 +1,166 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _TF_EXT_FLOW_HANDLE_H_\n+#define _TF_EXT_FLOW_HANDLE_H_\n+\n+#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK\t0x00000000F0000000ULL\n+#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT\t28\n+#define TF_FLOW_TYPE_FLOW_HANDLE_MASK\t\t0x00000000000000F0ULL\n+#define TF_FLOW_TYPE_FLOW_HANDLE_SFT\t\t4\n+#define TF_FLAGS_FLOW_HANDLE_MASK\t\t0x000000000000000FULL\n+#define TF_FLAGS_FLOW_HANDLE_SFT\t\t0\n+#define TF_INDEX_FLOW_HANDLE_MASK\t\t0xFFFFFFF000000000ULL\n+#define TF_INDEX_FLOW_HANDLE_SFT\t\t36\n+#define TF_ENTRY_NUM_FLOW_HANDLE_MASK\t\t0x0000000E00000000ULL\n+#define TF_ENTRY_NUM_FLOW_HANDLE_SFT\t\t33\n+#define TF_HASH_TYPE_FLOW_HANDLE_MASK\t\t0x0000000100000000ULL\n+#define TF_HASH_TYPE_FLOW_HANDLE_SFT\t\t32\n+\n+#define TF_FLOW_HANDLE_MASK (TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK |\t\\\n+\t\t\t\tTF_FLOW_TYPE_FLOW_HANDLE_MASK |\t\t\\\n+\t\t\t\tTF_FLAGS_FLOW_HANDLE_MASK |\t\t\\\n+\t\t\t\tTF_INDEX_FLOW_HANDLE_MASK |\t\t\\\n+\t\t\t\tTF_ENTRY_NUM_FLOW_HANDLE_MASK |\t\t\\\n+\t\t\t\tTF_HASH_TYPE_FLOW_HANDLE_MASK)\n+\n+#define TF_GET_FIELDS_FROM_FLOW_HANDLE(flow_handle,\t\t\t\\\n+\t\t\t\t       num_key_entries,\t\t\t\\\n+\t\t\t\t       flow_type,\t\t\t\\\n+\t\t\t\t       flags,\t\t\t\t\\\n+\t\t\t\t       index,\t\t\t\t\\\n+\t\t\t\t       entry_num,\t\t\t\\\n+\t\t\t\t       hash_type)\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\t(num_key_entries) = \\\n+\t\t(((flow_handle) & TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK) >> \\\n+\t\t TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT);\t\t\t\\\n+\t(flow_type) = (((flow_handle) & TF_FLOW_TYPE_FLOW_HANDLE_MASK) >> \\\n+\t\t     TF_FLOW_TYPE_FLOW_HANDLE_SFT);\t\t\t\\\n+\t(flags) = (((flow_handle) & TF_FLAGS_FLOW_HANDLE_MASK) >>\t\\\n+\t\t     TF_FLAGS_FLOW_HANDLE_SFT);\t\t\t\t\\\n+\t(index) = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >>\t\\\n+\t\t     TF_INDEX_FLOW_HANDLE_SFT);\t\t\t\t\\\n+\t(entry_num) = (((flow_handle) & TF_ENTRY_NUM_FLOW_HANDLE_MASK) >> \\\n+\t\t     TF_ENTRY_NUM_FLOW_HANDLE_SFT);\t\t\t\\\n+\t(hash_type) = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >> \\\n+\t\t     TF_HASH_TYPE_FLOW_HANDLE_SFT);\t\t\t\\\n+} while (0)\n+\n+#define TF_SET_FIELDS_IN_FLOW_HANDLE(flow_handle,\t\t\t\\\n+\t\t\t\t     num_key_entries,\t\t\t\\\n+\t\t\t\t     flow_type,\t\t\t\t\\\n+\t\t\t\t     flags,\t\t\t\t\\\n+\t\t\t\t     index,\t\t\t\t\\\n+\t\t\t\t     entry_num,\t\t\t\t\\\n+\t\t\t\t     hash_type)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\t(flow_handle) &= ~TF_FLOW_HANDLE_MASK;\t\t\t\t\\\n+\t(flow_handle) |= \\\n+\t\t(((num_key_entries) << TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT) & \\\n+\t\t TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK);\t\t\t\\\n+\t(flow_handle) |= (((flow_type) << TF_FLOW_TYPE_FLOW_HANDLE_SFT) & \\\n+\t\t\tTF_FLOW_TYPE_FLOW_HANDLE_MASK);\t\t\t\\\n+\t(flow_handle) |= (((flags) << TF_FLAGS_FLOW_HANDLE_SFT) &\t\\\n+\t\t\tTF_FLAGS_FLOW_HANDLE_MASK);\t\t\t\\\n+\t(flow_handle) |= ((((uint64_t)index) << TF_INDEX_FLOW_HANDLE_SFT) & \\\n+\t\t\tTF_INDEX_FLOW_HANDLE_MASK);\t\t\t\\\n+\t(flow_handle) |=\t\t\t\t\t\t\\\n+\t\t((((uint64_t)entry_num) << TF_ENTRY_NUM_FLOW_HANDLE_SFT) & \\\n+\t\t TF_ENTRY_NUM_FLOW_HANDLE_MASK);\t\t\t\\\n+\t(flow_handle) |=\t\t\t\t\t\t\\\n+\t\t((((uint64_t)hash_type) << TF_HASH_TYPE_FLOW_HANDLE_SFT) & \\\n+\t\t TF_HASH_TYPE_FLOW_HANDLE_MASK);\t\t\t\\\n+} while (0)\n+#define TF_SET_FIELDS_IN_WH_FLOW_HANDLE TF_SET_FIELDS_IN_FLOW_HANDLE\n+\n+#define TF_GET_INDEX_FROM_FLOW_HANDLE(flow_handle,\t\t\t\\\n+\t\t\t\t      index)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tindex = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >>\t\t\\\n+\t\t     TF_INDEX_FLOW_HANDLE_SFT);\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(flow_handle,\t\t\t\\\n+\t\t\t\t\t  hash_type)\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\thash_type = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >>\t\\\n+\t\t     TF_HASH_TYPE_FLOW_HANDLE_SFT);\t\t\t\\\n+} while (0)\n+\n+/*\n+ * 32 bit Flow ID handlers\n+ */\n+#define TF_GFID_FLOW_ID_MASK\t\t0xFFFFFFF0UL\n+#define TF_GFID_FLOW_ID_SFT\t\t4\n+#define TF_FLAG_FLOW_ID_MASK\t\t0x00000002UL\n+#define TF_FLAG_FLOW_ID_SFT\t\t1\n+#define TF_DIR_FLOW_ID_MASK\t\t0x00000001UL\n+#define TF_DIR_FLOW_ID_SFT\t\t0\n+\n+#define TF_SET_FLOW_ID(flow_id, gfid, flag, dir)\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\t(flow_id) &= ~(TF_GFID_FLOW_ID_MASK |\t\t\t\t\\\n+\t\t     TF_FLAG_FLOW_ID_MASK |\t\t\t\t\\\n+\t\t     TF_DIR_FLOW_ID_MASK);\t\t\t\t\\\n+\t(flow_id) |= (((gfid) << TF_GFID_FLOW_ID_SFT) &\t\t\t\\\n+\t\t    TF_GFID_FLOW_ID_MASK) |\t\t\t\t\\\n+\t\t(((flag) << TF_FLAG_FLOW_ID_SFT) &\t\t\t\\\n+\t\t TF_FLAG_FLOW_ID_MASK) |\t\t\t\t\\\n+\t\t(((dir) << TF_DIR_FLOW_ID_SFT) &\t\t\t\\\n+\t\t TF_DIR_FLOW_ID_MASK);\t\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_GFID_FROM_FLOW_ID(flow_id, gfid)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tgfid = (((flow_id) & TF_GFID_FLOW_ID_MASK) >>\t\t\t\\\n+\t\tTF_GFID_FLOW_ID_SFT);\t\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_DIR_FROM_FLOW_ID(flow_id, dir)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tdir = (((flow_id) & TF_DIR_FLOW_ID_MASK) >>\t\t\t\\\n+\t\tTF_DIR_FLOW_ID_SFT);\t\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_FLAG_FROM_FLOW_ID(flow_id, flag)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tflag = (((flow_id) & TF_FLAG_FLOW_ID_MASK) >>\t\t\t\\\n+\t\tTF_FLAG_FLOW_ID_SFT);\t\t\t\t\t\\\n+} while (0)\n+\n+/*\n+ * 32 bit GFID handlers\n+ */\n+#define TF_HASH_INDEX_GFID_MASK\t0x07FFFFFFUL\n+#define TF_HASH_INDEX_GFID_SFT\t0\n+#define TF_HASH_TYPE_GFID_MASK\t0x08000000UL\n+#define TF_HASH_TYPE_GFID_SFT\t27\n+\n+#define TF_GFID_TABLE_INTERNAL 0\n+#define TF_GFID_TABLE_EXTERNAL 1\n+\n+#define TF_SET_GFID(gfid, index, type)\t\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tgfid = (((index) << TF_HASH_INDEX_GFID_SFT) &\t\t\t\\\n+\t\tTF_HASH_INDEX_GFID_MASK) |\t\t\t\t\\\n+\t\t(((type) << TF_HASH_TYPE_GFID_SFT) &\t\t\t\\\n+\t\t TF_HASH_TYPE_GFID_MASK);\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_HASH_INDEX_FROM_GFID(gfid, index)\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tindex = (((gfid) & TF_HASH_INDEX_GFID_MASK) >>\t\t\t\\\n+\t\tTF_HASH_INDEX_GFID_SFT);\t\t\t\t\\\n+} while (0)\n+\n+#define TF_GET_HASH_TYPE_FROM_GFID(gfid, type)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\ttype = (((gfid) & TF_HASH_TYPE_GFID_MASK) >>\t\t\t\\\n+\t\tTF_HASH_TYPE_GFID_SFT);\t\t\t\t\t\\\n+} while (0)\n+\n+\n+#endif /* _TF_EXT_FLOW_HANDLE_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c\nindex 3f3001c..bdf8f15 100644\n--- a/drivers/net/bnxt/tf_core/tf_msg.c\n+++ b/drivers/net/bnxt/tf_core/tf_msg.c\n@@ -869,6 +869,177 @@ tf_msg_session_sram_resc_flush(struct tf *tfp,\n \treturn tfp_le_to_cpu_32(parms.tf_resp_code);\n }\n \n+/**\n+ * Sends EM mem register request to Firmware\n+ */\n+int tf_msg_em_mem_rgtr(struct tf *tfp,\n+\t\t       int           page_lvl,\n+\t\t       int           page_size,\n+\t\t       uint64_t      dma_addr,\n+\t\t       uint16_t     *ctx_id)\n+{\n+\tint rc;\n+\tstruct hwrm_tf_ctxt_mem_rgtr_input req = { 0 };\n+\tstruct hwrm_tf_ctxt_mem_rgtr_output resp = { 0 };\n+\tstruct tfp_send_msg_parms parms = { 0 };\n+\n+\treq.page_level = page_lvl;\n+\treq.page_size = page_size;\n+\treq.page_dir = tfp_cpu_to_le_64(dma_addr);\n+\n+\tparms.tf_type = HWRM_TF_CTXT_MEM_RGTR;\n+\tparms.req_data = (uint32_t *)&req;\n+\tparms.req_size = sizeof(req);\n+\tparms.resp_data = (uint32_t *)&resp;\n+\tparms.resp_size = sizeof(resp);\n+\tparms.mailbox = TF_KONG_MB;\n+\n+\trc = tfp_send_msg_direct(tfp,\n+\t\t\t\t &parms);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\t*ctx_id = tfp_le_to_cpu_16(resp.ctx_id);\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * Sends EM mem unregister request to Firmware\n+ */\n+int tf_msg_em_mem_unrgtr(struct tf *tfp,\n+\t\t\t uint16_t  *ctx_id)\n+{\n+\tint rc;\n+\tstruct hwrm_tf_ctxt_mem_unrgtr_input req = {0};\n+\tstruct hwrm_tf_ctxt_mem_unrgtr_output resp = {0};\n+\tstruct tfp_send_msg_parms parms = { 0 };\n+\n+\treq.ctx_id = tfp_cpu_to_le_32(*ctx_id);\n+\n+\tparms.tf_type = HWRM_TF_CTXT_MEM_UNRGTR;\n+\tparms.req_data = (uint32_t *)&req;\n+\tparms.req_size = sizeof(req);\n+\tparms.resp_data = (uint32_t *)&resp;\n+\tparms.resp_size = sizeof(resp);\n+\tparms.mailbox = TF_KONG_MB;\n+\n+\trc = tfp_send_msg_direct(tfp,\n+\t\t\t\t &parms);\n+\treturn rc;\n+}\n+\n+/**\n+ * Sends EM qcaps request to Firmware\n+ */\n+int tf_msg_em_qcaps(struct tf *tfp,\n+\t\t    int dir,\n+\t\t    struct tf_em_caps *em_caps)\n+{\n+\tint rc;\n+\tstruct hwrm_tf_ext_em_qcaps_input  req = {0};\n+\tstruct hwrm_tf_ext_em_qcaps_output resp = { 0 };\n+\tuint32_t             flags;\n+\tstruct tfp_send_msg_parms parms = { 0 };\n+\n+\tflags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX :\n+\t\t HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX);\n+\treq.flags = tfp_cpu_to_le_32(flags);\n+\n+\tparms.tf_type = HWRM_TF_EXT_EM_QCAPS;\n+\tparms.req_data = (uint32_t *)&req;\n+\tparms.req_size = sizeof(req);\n+\tparms.resp_data = (uint32_t *)&resp;\n+\tparms.resp_size = sizeof(resp);\n+\tparms.mailbox = TF_KONG_MB;\n+\n+\trc = tfp_send_msg_direct(tfp,\n+\t\t\t\t &parms);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tem_caps->supported = tfp_le_to_cpu_32(resp.supported);\n+\tem_caps->max_entries_supported =\n+\t\ttfp_le_to_cpu_32(resp.max_entries_supported);\n+\tem_caps->key_entry_size = tfp_le_to_cpu_16(resp.key_entry_size);\n+\tem_caps->record_entry_size =\n+\t\ttfp_le_to_cpu_16(resp.record_entry_size);\n+\tem_caps->efc_entry_size = tfp_le_to_cpu_16(resp.efc_entry_size);\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * Sends EM config request to Firmware\n+ */\n+int tf_msg_em_cfg(struct tf *tfp,\n+\t\t  uint32_t   num_entries,\n+\t\t  uint16_t   key0_ctx_id,\n+\t\t  uint16_t   key1_ctx_id,\n+\t\t  uint16_t   record_ctx_id,\n+\t\t  uint16_t   efc_ctx_id,\n+\t\t  int        dir)\n+{\n+\tint rc;\n+\tstruct hwrm_tf_ext_em_cfg_input  req = {0};\n+\tstruct hwrm_tf_ext_em_cfg_output resp = {0};\n+\tuint32_t flags;\n+\tstruct tfp_send_msg_parms parms = { 0 };\n+\n+\tflags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :\n+\t\t HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);\n+\tflags |= HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD;\n+\n+\treq.flags = tfp_cpu_to_le_32(flags);\n+\treq.num_entries = tfp_cpu_to_le_32(num_entries);\n+\n+\treq.key0_ctx_id = tfp_cpu_to_le_16(key0_ctx_id);\n+\treq.key1_ctx_id = tfp_cpu_to_le_16(key1_ctx_id);\n+\treq.record_ctx_id = tfp_cpu_to_le_16(record_ctx_id);\n+\treq.efc_ctx_id = tfp_cpu_to_le_16(efc_ctx_id);\n+\n+\tparms.tf_type = HWRM_TF_EXT_EM_CFG;\n+\tparms.req_data = (uint32_t *)&req;\n+\tparms.req_size = sizeof(req);\n+\tparms.resp_data = (uint32_t *)&resp;\n+\tparms.resp_size = sizeof(resp);\n+\tparms.mailbox = TF_KONG_MB;\n+\n+\trc = tfp_send_msg_direct(tfp,\n+\t\t\t\t &parms);\n+\treturn rc;\n+}\n+\n+/**\n+ * Sends EM operation request to Firmware\n+ */\n+int tf_msg_em_op(struct tf *tfp,\n+\t\t int        dir,\n+\t\t uint16_t   op)\n+{\n+\tint rc;\n+\tstruct hwrm_tf_ext_em_op_input  req = {0};\n+\tstruct hwrm_tf_ext_em_op_output resp = {0};\n+\tuint32_t flags;\n+\tstruct tfp_send_msg_parms parms = { 0 };\n+\n+\tflags = (dir == TF_DIR_TX ? HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX :\n+\t\t HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX);\n+\treq.flags = tfp_cpu_to_le_32(flags);\n+\treq.op = tfp_cpu_to_le_16(op);\n+\n+\tparms.tf_type = HWRM_TF_EXT_EM_OP;\n+\tparms.req_data = (uint32_t *)&req;\n+\tparms.req_size = sizeof(req);\n+\tparms.resp_data = (uint32_t *)&resp;\n+\tparms.resp_size = sizeof(resp);\n+\tparms.mailbox = TF_KONG_MB;\n+\n+\trc = tfp_send_msg_direct(tfp,\n+\t\t\t\t &parms);\n+\treturn rc;\n+}\n+\n int\n tf_msg_set_tbl_entry(struct tf *tfp,\n \t\t     enum tf_dir dir,\ndiff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h\nindex 9055b16..b8d8c1e 100644\n--- a/drivers/net/bnxt/tf_core/tf_msg.h\n+++ b/drivers/net/bnxt/tf_core/tf_msg.h\n@@ -122,6 +122,46 @@ int tf_msg_session_sram_resc_flush(struct tf *tfp,\n \t\t\t\t   struct tf_rm_entry *sram_entry);\n \n /**\n+ * Sends EM mem register request to Firmware\n+ */\n+int tf_msg_em_mem_rgtr(struct tf *tfp,\n+\t\t       int           page_lvl,\n+\t\t       int           page_size,\n+\t\t       uint64_t      dma_addr,\n+\t\t       uint16_t     *ctx_id);\n+\n+/**\n+ * Sends EM mem unregister request to Firmware\n+ */\n+int tf_msg_em_mem_unrgtr(struct tf *tfp,\n+\t\t\t uint16_t     *ctx_id);\n+\n+/**\n+ * Sends EM qcaps request to Firmware\n+ */\n+int tf_msg_em_qcaps(struct tf *tfp,\n+\t\t    int dir,\n+\t\t    struct tf_em_caps *em_caps);\n+\n+/**\n+ * Sends EM config request to Firmware\n+ */\n+int tf_msg_em_cfg(struct tf *tfp,\n+\t\t  uint32_t      num_entries,\n+\t\t  uint16_t      key0_ctx_id,\n+\t\t  uint16_t      key1_ctx_id,\n+\t\t  uint16_t      record_ctx_id,\n+\t\t  uint16_t      efc_ctx_id,\n+\t\t  int           dir);\n+\n+/**\n+ * Sends EM operation request to Firmware\n+ */\n+int tf_msg_em_op(struct tf *tfp,\n+\t\t int        dir,\n+\t\t uint16_t   op);\n+\n+/**\n  * Sends tcam entry 'set' to the Firmware.\n  *\n  * [in] tfp\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c\nindex 14bf4ef..cc27b9c 100644\n--- a/drivers/net/bnxt/tf_core/tf_tbl.c\n+++ b/drivers/net/bnxt/tf_core/tf_tbl.c\n@@ -15,7 +15,7 @@\n #include \"hsi_struct_def_dpdk.h\"\n \n #include \"tf_core.h\"\n-#include \"tf_session.h\"\n+#include \"tf_em.h\"\n #include \"tf_msg.h\"\n #include \"tfp.h\"\n #include \"hwrm_tf.h\"\n@@ -30,6 +30,1366 @@\n /* Number of pointers per page_size */\n #define\tMAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))\n \n+#define TF_EM_PG_SZ_4K        (1 << 12)\n+#define TF_EM_PG_SZ_8K        (1 << 13)\n+#define TF_EM_PG_SZ_64K       (1 << 16)\n+#define TF_EM_PG_SZ_256K      (1 << 18)\n+#define TF_EM_PG_SZ_1M        (1 << 20)\n+#define TF_EM_PG_SZ_2M        (1 << 21)\n+#define TF_EM_PG_SZ_4M        (1 << 22)\n+#define TF_EM_PG_SZ_1G        (1 << 30)\n+\n+#define\tTF_EM_CTX_ID_INVALID   0xFFFF\n+\n+#define\tTF_EM_MIN_ENTRIES     (1 << 15) /* 32K */\n+#define\tTF_EM_MAX_ENTRIES     (1 << 27) /* 128M */\n+\n+/**\n+ * Function to free a page table\n+ *\n+ * [in] tp\n+ *   Pointer to the page table to free\n+ */\n+static void\n+tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < tp->pg_count; i++) {\n+\t\tif (!tp->pg_va_tbl[i]) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    \"No map for page %d table %016\" PRIu64 \"\\n\",\n+\t\t\t\t    i,\n+\t\t\t\t    (uint64_t)(uintptr_t)tp);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\ttfp_free(tp->pg_va_tbl[i]);\n+\t\ttp->pg_va_tbl[i] = NULL;\n+\t}\n+\n+\ttp->pg_count = 0;\n+\ttfp_free(tp->pg_va_tbl);\n+\ttp->pg_va_tbl = NULL;\n+\ttfp_free(tp->pg_pa_tbl);\n+\ttp->pg_pa_tbl = NULL;\n+}\n+\n+/**\n+ * Function to free an EM table\n+ *\n+ * [in] tbl\n+ *   Pointer to the EM table to free\n+ */\n+static void\n+tf_em_free_page_table(struct tf_em_table *tbl)\n+{\n+\tstruct tf_em_page_tbl *tp;\n+\tint i;\n+\n+\tfor (i = 0; i < tbl->num_lvl; i++) {\n+\t\ttp = &tbl->pg_tbl[i];\n+\n+\t\tPMD_DRV_LOG(INFO,\n+\t\t\t   \"EEM: Freeing page table: size %u lvl %d cnt %u\\n\",\n+\t\t\t   TF_EM_PAGE_SIZE,\n+\t\t\t    i,\n+\t\t\t    tp->pg_count);\n+\n+\t\ttf_em_free_pg_tbl(tp);\n+\t}\n+\n+\ttbl->l0_addr = NULL;\n+\ttbl->l0_dma_addr = 0;\n+\ttbl->num_lvl = 0;\n+\ttbl->num_data_pages = 0;\n+}\n+\n+/**\n+ * Allocation of page tables\n+ *\n+ * [in] tfp\n+ *   Pointer to a TruFlow handle\n+ *\n+ * [in] pg_count\n+ *   Page count to allocate\n+ *\n+ * [in] pg_size\n+ *   Size of each page\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -ENOMEM - Out of memmory\n+ */\n+static int\n+tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,\n+\t\t   uint32_t pg_count,\n+\t\t   uint32_t pg_size)\n+{\n+\tuint32_t i;\n+\tstruct tfp_calloc_parms parms;\n+\n+\tparms.nitems = pg_count;\n+\tparms.size = sizeof(void *);\n+\tparms.alignment = 0;\n+\n+\tif (tfp_calloc(&parms) != 0)\n+\t\treturn -ENOMEM;\n+\n+\ttp->pg_va_tbl = parms.mem_va;\n+\n+\tif (tfp_calloc(&parms) != 0) {\n+\t\ttfp_free(tp->pg_va_tbl);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttp->pg_pa_tbl = parms.mem_va;\n+\n+\ttp->pg_count = 0;\n+\ttp->pg_size = pg_size;\n+\n+\tfor (i = 0; i < pg_count; i++) {\n+\t\tparms.nitems = 1;\n+\t\tparms.size = pg_size;\n+\t\tparms.alignment = TF_EM_PAGE_ALIGNMENT;\n+\n+\t\tif (tfp_calloc(&parms) != 0)\n+\t\t\tgoto cleanup;\n+\n+\t\ttp->pg_pa_tbl[i] = (uint64_t)(uintptr_t)parms.mem_pa;\n+\t\ttp->pg_va_tbl[i] = parms.mem_va;\n+\n+\t\tmemset(tp->pg_va_tbl[i], 0, pg_size);\n+\t\ttp->pg_count++;\n+\t}\n+\n+\treturn 0;\n+\n+cleanup:\n+\ttf_em_free_pg_tbl(tp);\n+\treturn -ENOMEM;\n+}\n+\n+/**\n+ * Allocates EM page tables\n+ *\n+ * [in] tbl\n+ *   Table to allocate pages for\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -ENOMEM - Out of memmory\n+ */\n+static int\n+tf_em_alloc_page_table(struct tf_em_table *tbl)\n+{\n+\tstruct tf_em_page_tbl *tp;\n+\tint rc = 0;\n+\tint i;\n+\tuint32_t j;\n+\n+\tfor (i = 0; i < tbl->num_lvl; i++) {\n+\t\ttp = &tbl->pg_tbl[i];\n+\n+\t\trc = tf_em_alloc_pg_tbl(tp,\n+\t\t\t\t\ttbl->page_cnt[i],\n+\t\t\t\t\tTF_EM_PAGE_SIZE);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t\"Failed to allocate page table: lvl: %d\\n\",\n+\t\t\t\ti);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tfor (j = 0; j < tp->pg_count; j++) {\n+\t\t\tPMD_DRV_LOG(INFO,\n+\t\t\t\t\"EEM: Allocated page table: size %u lvl %d cnt\"\n+\t\t\t\t\" %u VA:%p PA:%p\\n\",\n+\t\t\t\tTF_EM_PAGE_SIZE,\n+\t\t\t\ti,\n+\t\t\t\ttp->pg_count,\n+\t\t\t\t(uint32_t *)tp->pg_va_tbl[j],\n+\t\t\t\t(uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);\n+\t\t}\n+\t}\n+\treturn rc;\n+\n+cleanup:\n+\ttf_em_free_page_table(tbl);\n+\treturn rc;\n+}\n+\n+/**\n+ * Links EM page tables\n+ *\n+ * [in] tp\n+ *   Pointer to page table\n+ *\n+ * [in] tp_next\n+ *   Pointer to the next page table\n+ *\n+ * [in] set_pte_last\n+ *   Flag controlling if the page table is last\n+ */\n+static void\n+tf_em_link_page_table(struct tf_em_page_tbl *tp,\n+\t\t      struct tf_em_page_tbl *tp_next,\n+\t\t      bool set_pte_last)\n+{\n+\tuint64_t *pg_pa = tp_next->pg_pa_tbl;\n+\tuint64_t *pg_va;\n+\tuint64_t valid;\n+\tuint32_t k = 0;\n+\tuint32_t i;\n+\tuint32_t j;\n+\n+\tfor (i = 0; i < tp->pg_count; i++) {\n+\t\tpg_va = tp->pg_va_tbl[i];\n+\n+\t\tfor (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {\n+\t\t\tif (k == tp_next->pg_count - 2 && set_pte_last)\n+\t\t\t\tvalid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;\n+\t\t\telse if (k == tp_next->pg_count - 1 && set_pte_last)\n+\t\t\t\tvalid = PTU_PTE_LAST | PTU_PTE_VALID;\n+\t\t\telse\n+\t\t\t\tvalid = PTU_PTE_VALID;\n+\n+\t\t\tpg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);\n+\t\t\tif (++k >= tp_next->pg_count)\n+\t\t\t\treturn;\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Setup a EM page table\n+ *\n+ * [in] tbl\n+ *   Pointer to EM page table\n+ */\n+static void\n+tf_em_setup_page_table(struct tf_em_table *tbl)\n+{\n+\tstruct tf_em_page_tbl *tp_next;\n+\tstruct tf_em_page_tbl *tp;\n+\tbool set_pte_last = 0;\n+\tint i;\n+\n+\tfor (i = 0; i < tbl->num_lvl - 1; i++) {\n+\t\ttp = &tbl->pg_tbl[i];\n+\t\ttp_next = &tbl->pg_tbl[i + 1];\n+\t\tif (i == tbl->num_lvl - 2)\n+\t\t\tset_pte_last = 1;\n+\t\ttf_em_link_page_table(tp, tp_next, set_pte_last);\n+\t}\n+\n+\ttbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];\n+\ttbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];\n+}\n+\n+/**\n+ * Given the page size, size of each data item (entry size),\n+ * and the total number of entries needed, determine the number\n+ * of page table levels and the number of data pages required.\n+ *\n+ * [in] page_size\n+ *   Page size\n+ *\n+ * [in] entry_size\n+ *   Entry size\n+ *\n+ * [in] num_entries\n+ *   Number of entries needed\n+ *\n+ * [out] num_data_pages\n+ *   Number of pages required\n+ *\n+ * Returns:\n+ *   Success  - Number of EM page levels required\n+ *   -ENOMEM  - Out of memory\n+ */\n+static int\n+tf_em_size_page_tbl_lvl(uint32_t page_size,\n+\t\t\tuint32_t entry_size,\n+\t\t\tuint32_t num_entries,\n+\t\t\tuint64_t *num_data_pages)\n+{\n+\tuint64_t lvl_data_size = page_size;\n+\tint lvl = PT_LVL_0;\n+\tuint64_t data_size;\n+\n+\t*num_data_pages = 0;\n+\tdata_size = (uint64_t)num_entries * entry_size;\n+\n+\twhile (lvl_data_size < data_size) {\n+\t\tlvl++;\n+\n+\t\tif (lvl == PT_LVL_1)\n+\t\t\tlvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *\n+\t\t\t\tpage_size;\n+\t\telse if (lvl == PT_LVL_2)\n+\t\t\tlvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *\n+\t\t\t\tMAX_PAGE_PTRS(page_size) * page_size;\n+\t\telse\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t*num_data_pages = roundup(data_size, page_size) / page_size;\n+\n+\treturn lvl;\n+}\n+\n+/**\n+ * Return the number of page table pages needed to\n+ * reference the given number of next level pages.\n+ *\n+ * [in] num_pages\n+ *   Number of EM pages\n+ *\n+ * [in] page_size\n+ *   Size of each EM page\n+ *\n+ * Returns:\n+ *   Number of EM page table pages\n+ */\n+static uint32_t\n+tf_em_page_tbl_pgcnt(uint32_t num_pages,\n+\t\t     uint32_t page_size)\n+{\n+\treturn roundup(num_pages, MAX_PAGE_PTRS(page_size)) /\n+\t\t       MAX_PAGE_PTRS(page_size);\n+\treturn 0;\n+}\n+\n+/**\n+ * Given the number of data pages, page_size and the maximum\n+ * number of page table levels (already determined), size\n+ * the number of page table pages required at each level.\n+ *\n+ * [in] max_lvl\n+ *   Max number of levels\n+ *\n+ * [in] num_data_pages\n+ *   Number of EM data pages\n+ *\n+ * [in] page_size\n+ *   Size of an EM page\n+ *\n+ * [out] *page_cnt\n+ *   EM page count\n+ */\n+static void\n+tf_em_size_page_tbls(int max_lvl,\n+\t\t     uint64_t num_data_pages,\n+\t\t     uint32_t page_size,\n+\t\t     uint32_t *page_cnt)\n+{\n+\tif (max_lvl == PT_LVL_0) {\n+\t\tpage_cnt[PT_LVL_0] = num_data_pages;\n+\t} else if (max_lvl == PT_LVL_1) {\n+\t\tpage_cnt[PT_LVL_1] = num_data_pages;\n+\t\tpage_cnt[PT_LVL_0] =\n+\t\ttf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);\n+\t} else if (max_lvl == PT_LVL_2) {\n+\t\tpage_cnt[PT_LVL_2] = num_data_pages;\n+\t\tpage_cnt[PT_LVL_1] =\n+\t\ttf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);\n+\t\tpage_cnt[PT_LVL_0] =\n+\t\ttf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);\n+\t} else {\n+\t\treturn;\n+\t}\n+}\n+\n+/**\n+ * Size the EM table based on capabilities\n+ *\n+ * [in] tbl\n+ *   EM table to size\n+ *\n+ * Returns:\n+ *   0        - Success\n+ *   - EINVAL - Parameter error\n+ *   - ENOMEM - Out of memory\n+ */\n+static int\n+tf_em_size_table(struct tf_em_table *tbl)\n+{\n+\tuint64_t num_data_pages;\n+\tuint32_t *page_cnt;\n+\tint max_lvl;\n+\tuint32_t num_entries;\n+\tuint32_t cnt = TF_EM_MIN_ENTRIES;\n+\n+\t/* Ignore entry if both size and number are zero */\n+\tif (!tbl->entry_size && !tbl->num_entries)\n+\t\treturn 0;\n+\n+\t/* If only one is set then error */\n+\tif (!tbl->entry_size || !tbl->num_entries)\n+\t\treturn -EINVAL;\n+\n+\t/* Determine number of page table levels and the number\n+\t * of data pages needed to process the given eem table.\n+\t */\n+\tif (tbl->type == RECORD_TABLE) {\n+\t\t/*\n+\t\t * For action records just a memory size is provided. Work\n+\t\t * backwards to resolve to number of entries\n+\t\t */\n+\t\tnum_entries = tbl->num_entries / tbl->entry_size;\n+\t\tif (num_entries < TF_EM_MIN_ENTRIES) {\n+\t\t\tnum_entries = TF_EM_MIN_ENTRIES;\n+\t\t} else {\n+\t\t\twhile (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)\n+\t\t\t\tcnt *= 2;\n+\t\t\tnum_entries = cnt;\n+\t\t}\n+\t} else {\n+\t\tnum_entries = tbl->num_entries;\n+\t}\n+\n+\tmax_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,\n+\t\t\t\t\t  tbl->entry_size,\n+\t\t\t\t\t  tbl->num_entries,\n+\t\t\t\t\t  &num_data_pages);\n+\tif (max_lvl < 0) {\n+\t\tPMD_DRV_LOG(WARNING, \"EEM: Failed to size page table levels\\n\");\n+\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t    \"table: %d data-sz: %016\" PRIu64 \" page-sz: %u\\n\",\n+\t\t\t    tbl->type,\n+\t\t\t    (uint64_t)num_entries * tbl->entry_size,\n+\t\t\t    TF_EM_PAGE_SIZE);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttbl->num_lvl = max_lvl + 1;\n+\ttbl->num_data_pages = num_data_pages;\n+\n+\t/* Determine the number of pages needed at each level */\n+\tpage_cnt = tbl->page_cnt;\n+\tmemset(page_cnt, 0, sizeof(tbl->page_cnt));\n+\ttf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,\n+\t\t\t\tpage_cnt);\n+\n+\tPMD_DRV_LOG(INFO, \"EEM: Sized page table: %d\\n\", tbl->type);\n+\tPMD_DRV_LOG(INFO,\n+\t\t    \"EEM: lvls: %d sz: %016\" PRIu64 \" pgs: %016\" PRIu64 \" l0: %u l1: %u l2: %u\\n\",\n+\t\t    max_lvl + 1,\n+\t\t    (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,\n+\t\t    num_data_pages,\n+\t\t    page_cnt[PT_LVL_0],\n+\t\t    page_cnt[PT_LVL_1],\n+\t\t    page_cnt[PT_LVL_2]);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Unregisters EM Ctx in Firmware\n+ *\n+ * [in] tfp\n+ *   Pointer to a TruFlow handle\n+ *\n+ * [in] tbl_scope_cb\n+ *   Pointer to a table scope control block\n+ *\n+ * [in] dir\n+ *   Receive or transmit direction\n+ */\n+static void\n+tf_em_ctx_unreg(struct tf *tfp,\n+\t\tstruct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\tint dir)\n+{\n+\tstruct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];\n+\tstruct tf_em_table *tbl;\n+\tint i;\n+\n+\tfor (i = KEY0_TABLE; i < MAX_TABLE; i++) {\n+\t\ttbl = &ctxp->em_tables[i];\n+\n+\t\tif (tbl->num_entries != 0 && tbl->entry_size != 0) {\n+\t\t\ttf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);\n+\t\t\ttf_em_free_page_table(tbl);\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Registers EM Ctx in Firmware\n+ *\n+ * [in] tfp\n+ *   Pointer to a TruFlow handle\n+ *\n+ * [in] tbl_scope_cb\n+ *   Pointer to a table scope control block\n+ *\n+ * [in] dir\n+ *   Receive or transmit direction\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -ENOMEM - Out of Memory\n+ */\n+static int\n+tf_em_ctx_reg(struct tf *tfp,\n+\t      struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t      int dir)\n+{\n+\tstruct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];\n+\tstruct tf_em_table *tbl;\n+\tint rc;\n+\tint i;\n+\n+\tfor (i = KEY0_TABLE; i < MAX_TABLE; i++) {\n+\t\ttbl = &ctxp->em_tables[i];\n+\n+\t\tif (tbl->num_entries && tbl->entry_size) {\n+\t\t\trc = tf_em_size_table(tbl);\n+\n+\t\t\tif (rc)\n+\t\t\t\tgoto cleanup;\n+\n+\t\t\trc = tf_em_alloc_page_table(tbl);\n+\t\t\tif (rc)\n+\t\t\t\tgoto cleanup;\n+\n+\t\t\ttf_em_setup_page_table(tbl);\n+\t\t\trc = tf_msg_em_mem_rgtr(tfp,\n+\t\t\t\t\t\ttbl->num_lvl - 1,\n+\t\t\t\t\t\tTF_EM_PAGE_SIZE_ENUM,\n+\t\t\t\t\t\ttbl->l0_dma_addr,\n+\t\t\t\t\t\t&tbl->ctx_id);\n+\t\t\tif (rc)\n+\t\t\t\tgoto cleanup;\n+\t\t}\n+\t}\n+\treturn rc;\n+\n+cleanup:\n+\ttf_em_ctx_unreg(tfp, tbl_scope_cb, dir);\n+\treturn rc;\n+}\n+\n+/**\n+ * Validates EM number of entries requested\n+ *\n+ * [in] tbl_scope_cb\n+ *   Pointer to table scope control block to be populated\n+ *\n+ * [in] parms\n+ *   Pointer to input parameters\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -EINVAL - Parameter error\n+ */\n+static int\n+tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t   struct tf_alloc_tbl_scope_parms *parms)\n+{\n+\tuint32_t cnt;\n+\n+\tif (parms->rx_mem_size_in_mb != 0) {\n+\t\tuint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);\n+\t\tuint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)\n+\t\t\t\t     + 1);\n+\t\tuint32_t num_entries = (parms->rx_mem_size_in_mb *\n+\t\t\t\t\tTF_MEGABYTE) / (key_b + action_b);\n+\n+\t\tif (num_entries < TF_EM_MIN_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR, \"EEM: Insufficient memory requested:\"\n+\t\t\t\t    \"%uMB\\n\",\n+\t\t\t\t    parms->rx_mem_size_in_mb);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tcnt = TF_EM_MIN_ENTRIES;\n+\t\twhile (num_entries > cnt &&\n+\t\t       cnt <= TF_EM_MAX_ENTRIES)\n+\t\t\tcnt *= 2;\n+\n+\t\tif (cnt > TF_EM_MAX_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR, \"EEM: Invalid number of Tx requested: \"\n+\t\t\t\t    \"%u\\n\",\n+\t\t       (parms->tx_num_flows_in_k * TF_KILOBYTE));\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tparms->rx_num_flows_in_k = cnt / TF_KILOBYTE;\n+\t} else {\n+\t\tif ((parms->rx_num_flows_in_k * TF_KILOBYTE) <\n+\t\t    TF_EM_MIN_ENTRIES ||\n+\t\t    (parms->rx_num_flows_in_k * TF_KILOBYTE) >\n+\t\t    tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Invalid number of Rx flows \"\n+\t\t\t\t    \"requested:%u max:%u\\n\",\n+\t\t\t\t    parms->rx_num_flows_in_k * TF_KILOBYTE,\n+\t\t\ttbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\t/* must be a power-of-2 supported value\n+\t\t * in the range 32K - 128M\n+\t\t */\n+\t\tcnt = TF_EM_MIN_ENTRIES;\n+\t\twhile ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&\n+\t\t       cnt <= TF_EM_MAX_ENTRIES)\n+\t\t\tcnt *= 2;\n+\n+\t\tif (cnt > TF_EM_MAX_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Invalid number of Rx requested: %u\\n\",\n+\t\t\t\t    (parms->rx_num_flows_in_k * TF_KILOBYTE));\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (parms->tx_mem_size_in_mb != 0) {\n+\t\tuint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);\n+\t\tuint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)\n+\t\t\t\t     + 1);\n+\t\tuint32_t num_entries = (parms->tx_mem_size_in_mb *\n+\t\t\t\t\t(TF_KILOBYTE * TF_KILOBYTE)) /\n+\t\t\t(key_b + action_b);\n+\n+\t\tif (num_entries < TF_EM_MIN_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Insufficient memory requested:%uMB\\n\",\n+\t\t\t\t    parms->rx_mem_size_in_mb);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tcnt = TF_EM_MIN_ENTRIES;\n+\t\twhile (num_entries > cnt &&\n+\t\t       cnt <= TF_EM_MAX_ENTRIES)\n+\t\t\tcnt *= 2;\n+\n+\t\tif (cnt > TF_EM_MAX_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Invalid number of Tx requested: %u\\n\",\n+\t\t       (parms->tx_num_flows_in_k * TF_KILOBYTE));\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tparms->tx_num_flows_in_k = cnt / TF_KILOBYTE;\n+\t} else {\n+\t\tif ((parms->tx_num_flows_in_k * TF_KILOBYTE) <\n+\t\t    TF_EM_MIN_ENTRIES ||\n+\t\t    (parms->tx_num_flows_in_k * TF_KILOBYTE) >\n+\t\t    tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Invalid number of Tx flows \"\n+\t\t\t\t    \"requested:%u max:%u\\n\",\n+\t\t\t\t    (parms->tx_num_flows_in_k * TF_KILOBYTE),\n+\t\t\ttbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tcnt = TF_EM_MIN_ENTRIES;\n+\t\twhile ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&\n+\t\t       cnt <= TF_EM_MAX_ENTRIES)\n+\t\t\tcnt *= 2;\n+\n+\t\tif (cnt > TF_EM_MAX_ENTRIES) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Invalid number of Tx requested: %u\\n\",\n+\t\t       (parms->tx_num_flows_in_k * TF_KILOBYTE));\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\t/* Rx */\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =\n+\t\tparms->rx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =\n+\t\tparms->rx_max_key_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =\n+\t\tparms->rx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =\n+\t\tparms->rx_max_key_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =\n+\t\tparms->rx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =\n+\t\tparms->tx_max_action_entry_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =\n+\t\t0;\n+\n+\t/* Tx */\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =\n+\t\tparms->tx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =\n+\t\tparms->tx_max_key_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =\n+\t\tparms->tx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =\n+\t\tparms->tx_max_key_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =\n+\t\tparms->tx_num_flows_in_k * TF_KILOBYTE;\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =\n+\t\tparms->tx_max_action_entry_sz_in_bits / 8;\n+\n+\ttbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =\n+\t\t0;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Internal function to set a Table Entry. Supports all internal Table Types\n+ *\n+ * [in] tfp\n+ *   Pointer to TruFlow handle\n+ *\n+ * [in] parms\n+ *   Pointer to input parameters\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -EINVAL - Parameter error\n+ */\n+static int\n+tf_set_tbl_entry_internal(struct tf *tfp,\n+\t\t\t  struct tf_set_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+\tint id;\n+\tuint32_t index;\n+\tstruct bitalloc *session_pool;\n+\tstruct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\t/* Lookup the pool using the table type of the element */\n+\trc = tf_rm_lookup_tbl_type_pool(tfs,\n+\t\t\t\t\tparms->dir,\n+\t\t\t\t\tparms->type,\n+\t\t\t\t\t&session_pool);\n+\t/* Error logging handled by tf_rm_lookup_tbl_type_pool */\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tindex = parms->idx;\n+\n+\tif (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&\n+\t    parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Type not supported, type:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* Adjust the returned index/offset as there is no guarantee\n+\t * that the start is 0 at time of RM allocation\n+\t */\n+\ttf_rm_convert_index(tfs,\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    TF_RM_CONVERT_RM_BASE,\n+\t\t\t    parms->idx,\n+\t\t\t    &index);\n+\n+\t/* Verify that the entry has been previously allocated */\n+\tid = ba_inuse(session_pool, index);\n+\tif (id != 1) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, Invalid or not allocated index, type:%d, idx:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type,\n+\t\t   index);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Set the entry */\n+\trc = tf_msg_set_tbl_entry(tfp,\n+\t\t\t\t  parms->dir,\n+\t\t\t\t  parms->type,\n+\t\t\t\t  parms->data_sz_in_bytes,\n+\t\t\t\t  parms->data,\n+\t\t\t\t  parms->idx);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Set failed, type:%d, rc:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    rc);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * Internal function to get a Table Entry. Supports all Table Types\n+ * except the TF_TBL_TYPE_EXT as that is handled as a table scope.\n+ *\n+ * [in] tfp\n+ *   Pointer to TruFlow handle\n+ *\n+ * [in] parms\n+ *   Pointer to input parameters\n+ *\n+ * Returns:\n+ *   0       - Success\n+ *   -EINVAL - Parameter error\n+ */\n+static int\n+tf_get_tbl_entry_internal(struct tf *tfp,\n+\t\t\t  struct tf_get_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+\tint id;\n+\tuint32_t index;\n+\tstruct bitalloc *session_pool;\n+\tstruct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\t/* Lookup the pool using the table type of the element */\n+\trc = tf_rm_lookup_tbl_type_pool(tfs,\n+\t\t\t\t\tparms->dir,\n+\t\t\t\t\tparms->type,\n+\t\t\t\t\t&session_pool);\n+\t/* Error logging handled by tf_rm_lookup_tbl_type_pool */\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tindex = parms->idx;\n+\n+\t/* Adjust the returned index/offset as there is no guarantee\n+\t * that the start is 0 at time of RM allocation\n+\t */\n+\ttf_rm_convert_index(tfs,\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    TF_RM_CONVERT_RM_BASE,\n+\t\t\t    parms->idx,\n+\t\t\t    &index);\n+\n+\t/* Verify that the entry has been previously allocated */\n+\tid = ba_inuse(session_pool, index);\n+\tif (id != 1) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, Invalid or not allocated index, type:%d, idx:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type,\n+\t\t   index);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Get the entry */\n+\trc = tf_msg_get_tbl_entry(tfp,\n+\t\t\t\t  parms->dir,\n+\t\t\t\t  parms->type,\n+\t\t\t\t  parms->data_sz_in_bytes,\n+\t\t\t\t  parms->data,\n+\t\t\t\t  parms->idx);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Get failed, type:%d, rc:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    rc);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+#if (TF_SHADOW == 1)\n+/**\n+ * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for\n+ * the requested entry. If found the ref count is incremente and\n+ * returned.\n+ *\n+ * [in] tfs\n+ *   Pointer to session\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry found and ref count incremented\n+ *  -ENOENT - Failure, entry not found\n+ */\n+static int\n+tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,\n+\t\t\t  struct tf_alloc_tbl_entry_parms *parms __rte_unused)\n+{\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"dir:%d, Entry Alloc with search not supported\\n\",\n+\t\t    parms->dir);\n+\n+\n+\treturn -EOPNOTSUPP;\n+}\n+\n+/**\n+ * Free Tbl entry from the Shadow DB. Shadow DB is searched for\n+ * the requested entry. If found the ref count is decremente and\n+ * new ref_count returned.\n+ *\n+ * [in] tfs\n+ *   Pointer to session\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry found and ref count decremented\n+ *  -ENOENT - Failure, entry not found\n+ */\n+static int\n+tf_free_tbl_entry_shadow(struct tf_session *tfs,\n+\t\t\t struct tf_free_tbl_entry_parms *parms)\n+{\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"dir:%d, Entry Free with search not supported\\n\",\n+\t\t    parms->dir);\n+\n+\treturn -EOPNOTSUPP;\n+}\n+#endif /* TF_SHADOW */\n+\n+/**\n+ * Create External Tbl pool of memory indexes.\n+ *\n+ * [in] session\n+ *   Pointer to session\n+ * [in] dir\n+ *   direction\n+ * [in] tbl_scope_cb\n+ *   pointer to the table scope\n+ * [in] tbl_scope_id\n+ *   id of the table scope\n+ * [in] num_entries\n+ *   number of entries to write\n+ * [in] entry_sz_bytes\n+ *   size of each entry\n+ *\n+ * Return:\n+ *  0       - Success, entry allocated - no search support\n+ *  -ENOMEM -EINVAL -EOPNOTSUPP\n+ *          - Failure, entry not allocated, out of resources\n+ */\n+static int\n+tf_create_tbl_pool_external(struct tf_session *session,\n+\t\t\t    enum tf_dir dir,\n+\t\t\t    struct tf_tbl_scope_cb *tbl_scope_cb,\n+\t\t\t    uint32_t table_scope_id,\n+\t\t\t    uint32_t num_entries,\n+\t\t\t    uint32_t entry_sz_bytes)\n+\n+{\n+\tstruct tfp_calloc_parms parms;\n+\tuint32_t i, j;\n+\tint rc = 0;\n+\tstruct stack *pool = &tbl_scope_cb->ext_pool[dir][TF_EXT_POOL_0];\n+\n+\tparms.nitems = num_entries;\n+\tparms.size = sizeof(uint32_t);\n+\tparms.alignment = 0;\n+\n+\tif (tfp_calloc(&parms) != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"%d: TBL: external pool failure %s\\n\",\n+\t\t\t    dir, strerror(-ENOMEM));\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Create empty stack\n+\t */\n+\trc = stack_init(num_entries, parms.mem_va, pool);\n+\n+\tif (rc != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"%d: TBL: stack init failure %s\\n\",\n+\t\t\t    dir, strerror(-rc));\n+\t\tgoto cleanup;\n+\t}\n+\n+\t/* Save the  malloced memory address so that it can\n+\t * be freed when the table scope is freed.\n+\t */\n+\ttbl_scope_cb->ext_pool_mem[dir][TF_EXT_POOL_0] =\n+\t\t(uint32_t *)parms.mem_va;\n+\n+\t/* Fill pool with indexes\n+\t */\n+\tj = num_entries * entry_sz_bytes - 1;\n+\n+\tfor (i = 0; i < num_entries; i++) {\n+\t\trc = stack_push(pool, j);\n+\t\tif (rc != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"%d TBL: stack failure %s\\n\",\n+\t\t\t\t    dir, strerror(-rc));\n+\t\t\tgoto cleanup;\n+\t\t}\n+\t\tj -= entry_sz_bytes;\n+\t}\n+\n+\tif (!stack_is_full(pool)) {\n+\t\trc = -EINVAL;\n+\t\tPMD_DRV_LOG(ERR, \"%d TBL: stack failure %s\\n\",\n+\t\t\t    dir, strerror(-rc));\n+\t\tgoto cleanup;\n+\t}\n+\t/* Set the table scope associated with the pool\n+\t */\n+\tsession->ext_pool_2_scope[dir][TF_EXT_POOL_0] = table_scope_id;\n+\n+\treturn 0;\n+cleanup:\n+\ttfp_free((void *)parms.mem_va);\n+\treturn rc;\n+}\n+\n+/**\n+ * Destroy External Tbl pool of memory indexes.\n+ *\n+ * [in] session\n+ *   Pointer to session\n+ * [in] dir\n+ *   direction\n+ * [in] tbl_scope_cb\n+ *   pointer to the table scope\n+ *\n+ */\n+static void\n+tf_destroy_tbl_pool_external(struct tf_session *session,\n+\t\t\t    enum tf_dir dir,\n+\t\t\t    struct tf_tbl_scope_cb *tbl_scope_cb)\n+{\n+\tuint32_t *ext_pool_mem =\n+\t\ttbl_scope_cb->ext_pool_mem[dir][TF_EXT_POOL_0];\n+\n+\ttfp_free(ext_pool_mem);\n+\n+\t/* Set the table scope associated with the pool\n+\t */\n+\tsession->ext_pool_2_scope[dir][TF_EXT_POOL_0] = TF_TBL_SCOPE_INVALID;\n+}\n+\n+/**\n+ * Allocate External Tbl entry from the Session Pool.\n+ *\n+ * [in] tfp\n+ *   Pointer to Truflow Handle\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry allocated - no search support\n+ *  -ENOMEM -EINVAL -EOPNOTSUPP\n+ *          - Failure, entry not allocated, out of resources\n+ */\n+static int\n+tf_alloc_tbl_entry_pool_external(struct tf *tfp,\n+\t\t\t\t struct tf_alloc_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+\tuint32_t index;\n+\tstruct tf_session *tfs;\n+\tuint32_t tbl_scope_id;\n+\tstruct tf_tbl_scope_cb *tbl_scope_cb;\n+\tstruct stack *pool;\n+\n+\t/* Check parameters */\n+\tif (tfp == NULL || parms == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\tif (parms->type != TF_TBL_TYPE_EXT) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Type not supported, type:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* Get the pool info from the table scope\n+\t */\n+\ttbl_scope_id = tfs->ext_pool_2_scope[parms->dir][TF_EXT_POOL_0];\n+\ttbl_scope_cb = tbl_scope_cb_find(tfs, tbl_scope_id);\n+\n+\tif (tbl_scope_cb == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, table scope not allocated\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\tpool = &tbl_scope_cb->ext_pool[parms->dir][TF_EXT_POOL_0];\n+\n+\t/* Allocate an element\n+\t */\n+\trc = stack_pop(pool, &index);\n+\n+\tif (rc != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, Allocation failed, type:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type);\n+\t\treturn rc;\n+\t}\n+\tparms->idx = index;\n+\treturn rc;\n+}\n+\n+/**\n+ * Allocate Internal Tbl entry from the Session Pool.\n+ *\n+ * [in] tfp\n+ *   Pointer to Truflow Handle\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry found and ref count decremented\n+ *  -ENOMEM - Failure, entry not allocated, out of resources\n+ */\n+static int\n+tf_alloc_tbl_entry_pool_internal(struct tf *tfp,\n+\t\t\t\t struct tf_alloc_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+\tint id;\n+\tint free_cnt;\n+\tuint32_t index;\n+\tstruct bitalloc *session_pool;\n+\tstruct tf_session *tfs;\n+\n+\t/* Check parameters */\n+\tif (tfp == NULL || parms == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\tif (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&\n+\t    parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&\n+\t    parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Type not supported, type:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* Lookup the pool using the table type of the element */\n+\trc = tf_rm_lookup_tbl_type_pool(tfs,\n+\t\t\t\t\tparms->dir,\n+\t\t\t\t\tparms->type,\n+\t\t\t\t\t&session_pool);\n+\t/* Error logging handled by tf_rm_lookup_tbl_type_pool */\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tid = ba_alloc(session_pool);\n+\tif (id == -1) {\n+\t\tfree_cnt = ba_free_count(session_pool);\n+\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, Allocation failed, type:%d, free:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type,\n+\t\t   free_cnt);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Adjust the returned index/offset as there is no guarantee\n+\t * that the start is 0 at time of RM allocation\n+\t */\n+\ttf_rm_convert_index(tfs,\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    TF_RM_CONVERT_ADD_BASE,\n+\t\t\t    id,\n+\t\t\t    &index);\n+\tparms->idx = index;\n+\treturn rc;\n+}\n+\n+/**\n+ * Free External Tbl entry to the session pool.\n+ *\n+ * [in] tfp\n+ *   Pointer to Truflow Handle\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry freed\n+ *\n+ * - Failure, entry not successfully freed for these reasons\n+ *  -ENOMEM\n+ *  -EOPNOTSUPP\n+ *  -EINVAL\n+ */\n+static int\n+tf_free_tbl_entry_pool_external(struct tf *tfp,\n+\t\t       struct tf_free_tbl_entry_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_session *tfs;\n+\tuint32_t index;\n+\tuint32_t tbl_scope_id;\n+\tstruct tf_tbl_scope_cb *tbl_scope_cb;\n+\tstruct stack *pool;\n+\n+\t/* Check parameters */\n+\tif (tfp == NULL || parms == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\tif (parms->type != TF_TBL_TYPE_EXT) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Type not supported, type:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* Get the pool info from the table scope\n+\t */\n+\ttbl_scope_id = tfs->ext_pool_2_scope[parms->dir][TF_EXT_POOL_0];\n+\ttbl_scope_cb = tbl_scope_cb_find(tfs, tbl_scope_id);\n+\n+\tif (tbl_scope_cb == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, table scope error\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\tpool = &tbl_scope_cb->ext_pool[parms->dir][TF_EXT_POOL_0];\n+\n+\tindex = parms->idx;\n+\n+\trc = stack_push(pool, index);\n+\n+\tif (rc != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, consistency error, stack full, type:%d, idx:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type,\n+\t\t   index);\n+\t}\n+\treturn rc;\n+}\n+\n+/**\n+ * Free Internal Tbl entry from the Session Pool.\n+ *\n+ * [in] tfp\n+ *   Pointer to Truflow Handle\n+ * [in] parms\n+ *   Allocation parameters\n+ *\n+ * Return:\n+ *  0       - Success, entry found and ref count decremented\n+ *  -ENOMEM - Failure, entry not allocated, out of resources\n+ */\n+static int\n+tf_free_tbl_entry_pool_internal(struct tf *tfp,\n+\t\t       struct tf_free_tbl_entry_parms *parms)\n+{\n+\tint rc = 0;\n+\tint id;\n+\tstruct bitalloc *session_pool;\n+\tstruct tf_session *tfs;\n+\tuint32_t index;\n+\n+\t/* Check parameters */\n+\tif (tfp == NULL || parms == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\tif (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&\n+\t    parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&\n+\t    parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&\n+\t    parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Type not supported, type:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type);\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\t/* Lookup the pool using the table type of the element */\n+\trc = tf_rm_lookup_tbl_type_pool(tfs,\n+\t\t\t\t\tparms->dir,\n+\t\t\t\t\tparms->type,\n+\t\t\t\t\t&session_pool);\n+\t/* Error logging handled by tf_rm_lookup_tbl_type_pool */\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tindex = parms->idx;\n+\n+\t/* Adjust the returned index/offset as there is no guarantee\n+\t * that the start is 0 at time of RM allocation\n+\t */\n+\ttf_rm_convert_index(tfs,\n+\t\t\t    parms->dir,\n+\t\t\t    parms->type,\n+\t\t\t    TF_RM_CONVERT_RM_BASE,\n+\t\t\t    parms->idx,\n+\t\t\t    &index);\n+\n+\t/* Check if element was indeed allocated */\n+\tid = ba_inuse_free(session_pool, index);\n+\tif (id == -1) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t   \"dir:%d, Element not previously alloc'ed, type:%d, idx:%d\\n\",\n+\t\t   parms->dir,\n+\t\t   parms->type,\n+\t\t   index);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn rc;\n+}\n+\n /* API defined in tf_tbl.h */\n void\n tf_init_tbl_pool(struct tf_session *session)\n@@ -41,3 +1401,436 @@ tf_init_tbl_pool(struct tf_session *session)\n \t\t\tTF_TBL_SCOPE_INVALID;\n \t}\n }\n+\n+/* API defined in tf_em.h */\n+struct tf_tbl_scope_cb *\n+tbl_scope_cb_find(struct tf_session *session,\n+\t\t  uint32_t tbl_scope_id)\n+{\n+\tint i;\n+\n+\t/* Check that id is valid */\n+\ti = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);\n+\tif (i < 0)\n+\t\treturn NULL;\n+\n+\tfor (i = 0; i < TF_NUM_TBL_SCOPE; i++) {\n+\t\tif (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)\n+\t\t\treturn &session->tbl_scopes[i];\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_free_eem_tbl_scope_cb(struct tf *tfp,\n+\t\t\t struct tf_free_tbl_scope_parms *parms)\n+{\n+\tint rc = 0;\n+\tenum tf_dir  dir;\n+\tstruct tf_tbl_scope_cb *tbl_scope_cb;\n+\tstruct tf_session *session;\n+\n+\tsession = (struct tf_session *)(tfp->session->core_data);\n+\n+\ttbl_scope_cb = tbl_scope_cb_find(session,\n+\t\t\t\t\t parms->tbl_scope_id);\n+\n+\tif (tbl_scope_cb == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Free Table control block */\n+\tba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);\n+\n+\t/* free table scope locks */\n+\tfor (dir = 0; dir < TF_DIR_MAX; dir++) {\n+\t\t/* Free associated external pools\n+\t\t */\n+\t\ttf_destroy_tbl_pool_external(session,\n+\t\t\t\t\t     dir,\n+\t\t\t\t\t     tbl_scope_cb);\n+\t\ttf_msg_em_op(tfp,\n+\t\t\t     dir,\n+\t\t\t     HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);\n+\n+\t\t/* free table scope and all associated resources */\n+\t\ttf_em_ctx_unreg(tfp, tbl_scope_cb, dir);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_em.h */\n+int\n+tf_alloc_eem_tbl_scope(struct tf *tfp,\n+\t\t       struct tf_alloc_tbl_scope_parms *parms)\n+{\n+\tint rc;\n+\tenum tf_dir dir;\n+\tstruct tf_tbl_scope_cb *tbl_scope_cb;\n+\tstruct tf_em_table *em_tables;\n+\tint index;\n+\tstruct tf_session *session;\n+\tstruct tf_free_tbl_scope_parms free_parms;\n+\n+\t/* check parameters */\n+\tif (parms == NULL || tfp->session == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"TBL: Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tsession = (struct tf_session *)tfp->session->core_data;\n+\n+\t/* Get Table Scope control block from the session pool */\n+\tindex = ba_alloc(session->tbl_scope_pool_rx);\n+\tif (index == -1) {\n+\t\tPMD_DRV_LOG(ERR, \"EEM: Unable to allocate table scope \"\n+\t\t\t    \"Control Block\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttbl_scope_cb = &session->tbl_scopes[index];\n+\ttbl_scope_cb->index = index;\n+\ttbl_scope_cb->tbl_scope_id = index;\n+\tparms->tbl_scope_id = index;\n+\n+\tfor (dir = 0; dir < TF_DIR_MAX; dir++) {\n+\t\trc = tf_msg_em_qcaps(tfp,\n+\t\t\t\t     dir,\n+\t\t\t\t     &tbl_scope_cb->em_caps[dir]);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"EEM: Unable to query for EEM capability\\n\");\n+\t\t\tgoto cleanup;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Validate and setup table sizes\n+\t */\n+\tif (tf_em_validate_num_entries(tbl_scope_cb, parms))\n+\t\tgoto cleanup;\n+\n+\tfor (dir = 0; dir < TF_DIR_MAX; dir++) {\n+\t\t/*\n+\t\t * Allocate tables and signal configuration to FW\n+\t\t */\n+\t\trc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Unable to register for EEM ctx\\n\");\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tem_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;\n+\t\trc = tf_msg_em_cfg(tfp,\n+\t\t\t\t   em_tables[KEY0_TABLE].num_entries,\n+\t\t\t\t   em_tables[KEY0_TABLE].ctx_id,\n+\t\t\t\t   em_tables[KEY1_TABLE].ctx_id,\n+\t\t\t\t   em_tables[RECORD_TABLE].ctx_id,\n+\t\t\t\t   em_tables[EFC_TABLE].ctx_id,\n+\t\t\t\t   dir);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"TBL: Unable to configure EEM in firmware\\n\");\n+\t\t\tgoto cleanup_full;\n+\t\t}\n+\n+\t\trc = tf_msg_em_op(tfp,\n+\t\t\t\t  dir,\n+\t\t\t\t  HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);\n+\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"EEM: Unable to enable EEM in firmware\\n\");\n+\t\t\tgoto cleanup_full;\n+\t\t}\n+\n+\t\t/* Allocate the pool of offsets of the external memory.\n+\t\t * Initially, this is a single fixed size pool for all external\n+\t\t * actions related to a single table scope.\n+\t\t */\n+\t\trc = tf_create_tbl_pool_external(session,\n+\t\t\t\t\t\t dir,\n+\t\t\t\t\t\t tbl_scope_cb,\n+\t\t\t\t\t\t index,\n+\t\t\t\t\t\t TF_EXT_POOL_ENTRY_CNT,\n+\t\t\t\t\t\t TF_EXT_POOL_ENTRY_SZ_BYTES);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"%d TBL: Unable to allocate idx pools %s\\n\",\n+\t\t\t\t    dir,\n+\t\t\t\t    strerror(-rc));\n+\t\t\tgoto cleanup_full;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+cleanup_full:\n+\tfree_parms.tbl_scope_id = index;\n+\ttf_free_eem_tbl_scope_cb(tfp, &free_parms);\n+\treturn -EINVAL;\n+\n+cleanup:\n+\t/* Free Table control block */\n+\tba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);\n+\treturn -EINVAL;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_set_tbl_entry(struct tf *tfp,\n+\t\t struct tf_set_tbl_entry_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_tbl_scope_cb *tbl_scope_cb;\n+\tstruct tf_session *session;\n+\n+\tif (tfp == NULL || parms == NULL || parms->data == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (parms->type == TF_TBL_TYPE_EXT) {\n+\t\tvoid *base_addr;\n+\t\tuint32_t offset = TF_ACT_REC_INDEX_2_OFFSET(parms->idx);\n+\t\tuint32_t tbl_scope_id;\n+\n+\t\tsession = (struct tf_session *)(tfp->session->core_data);\n+\n+\t\ttbl_scope_id =\n+\t\t\tsession->ext_pool_2_scope[parms->dir][TF_EXT_POOL_0];\n+\n+\t\tif (tbl_scope_id == TF_TBL_SCOPE_INVALID)  {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"dir:%d, Table scope not allocated\\n\",\n+\t\t\t\t    parms->dir);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\t/* Get the table scope control block associated with the\n+\t\t * external pool\n+\t\t */\n+\n+\t\ttbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);\n+\n+\t\tif (tbl_scope_cb == NULL)\n+\t\t\treturn -EINVAL;\n+\n+\t\t/* External table, implicitly the Action table */\n+\t\tbase_addr = tf_em_get_table_page(tbl_scope_cb,\n+\t\t\t\t\t\t parms->dir,\n+\t\t\t\t\t\t offset,\n+\t\t\t\t\t\t RECORD_TABLE);\n+\t\tif (base_addr == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"dir:%d, Base address lookup failed\\n\",\n+\t\t\t\t    parms->dir);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\toffset %= TF_EM_PAGE_SIZE;\n+\t\trte_memcpy((char *)base_addr + offset,\n+\t\t\t   parms->data,\n+\t\t\t   parms->data_sz_in_bytes);\n+\t} else {\n+\t\t/* Internal table type processing */\n+\t\trc = tf_set_tbl_entry_internal(tfp, parms);\n+\t\tif (rc) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"dir:%d, Set failed, type:%d, rc:%d\\n\",\n+\t\t\t\t    parms->dir,\n+\t\t\t\t    parms->type,\n+\t\t\t\t    rc);\n+\t\t}\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_get_tbl_entry(struct tf *tfp,\n+\t\t struct tf_get_tbl_entry_parms *parms)\n+{\n+\tint rc = 0;\n+\n+\tif (tfp == NULL || parms == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (parms->type == TF_TBL_TYPE_EXT) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, External table type not supported\\n\",\n+\t\t\t    parms->dir);\n+\n+\t\trc = -EOPNOTSUPP;\n+\t} else {\n+\t\t/* Internal table type processing */\n+\t\trc = tf_get_tbl_entry_internal(tfp, parms);\n+\t\tif (rc)\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"dir:%d, Get failed, type:%d, rc:%d\\n\",\n+\t\t\t\t    parms->dir,\n+\t\t\t\t    parms->type,\n+\t\t\t\t    rc);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_alloc_tbl_scope(struct tf *tfp,\n+\t\t   struct tf_alloc_tbl_scope_parms *parms)\n+{\n+\tint rc;\n+\n+\t/* check parameters */\n+\tif (parms == NULL || tfp == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"TBL: Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trc = tf_alloc_eem_tbl_scope(tfp, parms);\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_free_tbl_scope(struct tf *tfp,\n+\t\t  struct tf_free_tbl_scope_parms *parms)\n+{\n+\tint rc;\n+\n+\t/* check parameters */\n+\tif (parms == NULL || tfp == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"TBL: Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* free table scope and all associated resources */\n+\trc = tf_free_eem_tbl_scope_cb(tfp, parms);\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_alloc_tbl_entry(struct tf *tfp,\n+\t\t   struct tf_alloc_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+#if (TF_SHADOW == 1)\n+\tstruct tf_session *tfs;\n+#endif /* TF_SHADOW */\n+\n+\t/* Check parameters */\n+\tif (parms == NULL || tfp == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"TBL: Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\t/*\n+\t * No shadow copy support for external tables, allocate and return\n+\t */\n+\tif (parms->type == TF_TBL_TYPE_EXT) {\n+\t\trc = tf_alloc_tbl_entry_pool_external(tfp, parms);\n+\t\treturn rc;\n+\t}\n+\n+#if (TF_SHADOW == 1)\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\t/* Search the Shadow DB for requested element. If not found go\n+\t * allocate one from the Session Pool\n+\t */\n+\tif (parms->search_enable && tfs->shadow_copy) {\n+\t\trc = tf_alloc_tbl_entry_shadow(tfs, parms);\n+\t\t/* Entry found and parms populated with return data */\n+\t\tif (rc == 0)\n+\t\t\treturn rc;\n+\t}\n+#endif /* TF_SHADOW */\n+\n+\trc = tf_alloc_tbl_entry_pool_internal(tfp, parms);\n+\tif (rc)\n+\t\tPMD_DRV_LOG(ERR, \"dir%d, Alloc failed, rc:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    rc);\n+\n+\treturn rc;\n+}\n+\n+/* API defined in tf_core.h */\n+int\n+tf_free_tbl_entry(struct tf *tfp,\n+\t\t  struct tf_free_tbl_entry_parms *parms)\n+{\n+\tint rc;\n+#if (TF_SHADOW == 1)\n+\tstruct tf_session *tfs;\n+#endif /* TF_SHADOW */\n+\n+\t/* Check parameters */\n+\tif (parms == NULL || tfp == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"TBL: Invalid parameters\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\t/*\n+\t * No shadow of external tables so just free the entry\n+\t */\n+\tif (parms->type == TF_TBL_TYPE_EXT) {\n+\t\trc = tf_free_tbl_entry_pool_external(tfp, parms);\n+\t\treturn rc;\n+\t}\n+\n+#if (TF_SHADOW == 1)\n+\tif (tfp->session == NULL || tfp->session->core_data == NULL) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Session info invalid\\n\",\n+\t\t\t    parms->dir);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttfs = (struct tf_session *)(tfp->session->core_data);\n+\n+\t/* Search the Shadow DB for requested element. If not found go\n+\t * allocate one from the Session Pool\n+\t */\n+\tif (parms->search_enable && tfs->shadow_copy) {\n+\t\trc = tf_free_tbl_entry_shadow(tfs, parms);\n+\t\t/* Entry free'ed and parms populated with return data */\n+\t\tif (rc == 0)\n+\t\t\treturn rc;\n+\t}\n+#endif /* TF_SHADOW */\n+\n+\trc = tf_free_tbl_entry_pool_internal(tfp, parms);\n+\n+\tif (rc)\n+\t\tPMD_DRV_LOG(ERR, \"dir:%d, Alloc failed, rc:%d\\n\",\n+\t\t\t    parms->dir,\n+\t\t\t    rc);\n+\treturn rc;\n+}\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h\nindex 5a5e72f..cb7ce9d 100644\n--- a/drivers/net/bnxt/tf_core/tf_tbl.h\n+++ b/drivers/net/bnxt/tf_core/tf_tbl.h\n@@ -7,6 +7,7 @@\n #define _TF_TBL_H_\n \n #include <stdint.h>\n+#include \"stack.h\"\n \n enum tf_pg_tbl_lvl {\n \tPT_LVL_0,\n@@ -15,6 +16,48 @@ enum tf_pg_tbl_lvl {\n \tPT_LVL_MAX\n };\n \n+enum tf_em_table_type {\n+\tKEY0_TABLE,\n+\tKEY1_TABLE,\n+\tRECORD_TABLE,\n+\tEFC_TABLE,\n+\tMAX_TABLE\n+};\n+\n+struct tf_em_page_tbl {\n+\tuint32_t\tpg_count;\n+\tuint32_t\tpg_size;\n+\tvoid\t\t**pg_va_tbl;\n+\tuint64_t\t*pg_pa_tbl;\n+};\n+\n+struct tf_em_table {\n+\tint\t\t\t\ttype;\n+\tuint32_t\t\t\tnum_entries;\n+\tuint16_t\t\t\tctx_id;\n+\tuint32_t\t\t\tentry_size;\n+\tint\t\t\t\tnum_lvl;\n+\tuint32_t\t\t\tpage_cnt[PT_LVL_MAX];\n+\tuint64_t\t\t\tnum_data_pages;\n+\tvoid\t\t\t\t*l0_addr;\n+\tuint64_t\t\t\tl0_dma_addr;\n+\tstruct tf_em_page_tbl pg_tbl[PT_LVL_MAX];\n+};\n+\n+struct tf_em_ctx_mem_info {\n+\tstruct tf_em_table\t\tem_tables[MAX_TABLE];\n+};\n+\n+/** table scope control block content */\n+struct tf_em_caps {\n+\tuint32_t flags;\n+\tuint32_t supported;\n+\tuint32_t max_entries_supported;\n+\tuint16_t key_entry_size;\n+\tuint16_t record_entry_size;\n+\tuint16_t efc_entry_size;\n+};\n+\n /** Invalid table scope id */\n #define TF_TBL_SCOPE_INVALID 0xffffffff\n \n@@ -27,9 +70,49 @@ enum tf_pg_tbl_lvl {\n struct tf_tbl_scope_cb {\n \tuint32_t tbl_scope_id;\n \tint index;\n+\tstruct tf_em_ctx_mem_info  em_ctx_info[TF_DIR_MAX];\n+\tstruct tf_em_caps          em_caps[TF_DIR_MAX];\n+\tstruct stack               ext_pool[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];\n \tuint32_t              *ext_pool_mem[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];\n };\n \n+/** Hardware Page sizes supported for EEM: 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G.\n+ * Round-down other page sizes to the lower hardware page size supported.\n+ */\n+#define PAGE_SHIFT 22 /** 2M */\n+\n+#if (PAGE_SHIFT < 12)\t\t\t\t/** < 4K >> 4K */\n+#define TF_EM_PAGE_SHIFT 12\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K\n+#elif (PAGE_SHIFT <= 13)\t\t\t/** 4K, 8K */\n+#define TF_EM_PAGE_SHIFT 13\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K\n+#elif (PAGE_SHIFT < 16)\t\t\t\t/** 16K, 32K >> 8K */\n+#define TF_EM_PAGE_SHIFT 15\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_32K\n+#elif (PAGE_SHIFT <= 17)\t\t\t/** 64K, 128K >> 64K */\n+#define TF_EM_PAGE_SHIFT 16\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K\n+#elif (PAGE_SHIFT <= 19)\t\t\t/** 256K, 512K >> 256K */\n+#define TF_EM_PAGE_SHIFT 18\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K\n+#elif (PAGE_SHIFT <= 21)\t\t\t/** 1M */\n+#define TF_EM_PAGE_SHIFT 20\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M\n+#elif (PAGE_SHIFT <= 22)\t\t\t/** 2M, 4M */\n+#define TF_EM_PAGE_SHIFT 21\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M\n+#elif (PAGE_SHIFT <= 29)\t\t\t/** 8M ... 512M >> 4M */\n+#define TF_EM_PAGE_SHIFT 22\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M\n+#else\t\t\t\t\t\t/** >= 1G >> 1G */\n+#define TF_EM_PAGE_SHIFT\t30\n+#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G\n+#endif\n+\n+#define TF_EM_PAGE_SIZE\t(1 << TF_EM_PAGE_SHIFT)\n+#define TF_EM_PAGE_ALIGNMENT (1 << TF_EM_PAGE_SHIFT)\n+\n /**\n  * Initialize table pool structure to indicate\n  * no table scope has been associated with the\n",
    "prefixes": [
        "v4",
        "12/34"
    ]
}