get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97691/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97691,
    "url": "http://patches.dpdk.org/api/patches/97691/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com",
    "date": "2021-09-01T14:24:23",
    "name": "[04/14] net/bnxt: add Thor SRAM mgr model",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4c13474a5813135a2701048fed365ba282ea6c7b",
    "submitter": {
        "id": 1635,
        "url": "http://patches.dpdk.org/api/people/1635/?format=api",
        "name": "Venkat Duvvuru",
        "email": "venkatkumar.duvvuru@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "http://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com/mbox/",
    "series": [
        {
            "id": 18604,
            "url": "http://patches.dpdk.org/api/series/18604/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18604",
            "date": "2021-09-01T14:24:19",
            "name": "enhancements to host based flow table management",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/18604/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/97691/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/97691/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B875FA0C45;\n\tWed,  1 Sep 2021 16:25:18 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CFDC541158;\n\tWed,  1 Sep 2021 16:24:55 +0200 (CEST)",
            "from relay.smtp-ext.broadcom.com (lpdvsmtp11.broadcom.com\n [192.19.166.231])\n by mails.dpdk.org (Postfix) with ESMTP id E507D41164\n for <dev@dpdk.org>; Wed,  1 Sep 2021 16:24:52 +0200 (CEST)",
            "from S60.dhcp.broadcom.net (unknown [10.123.66.170])\n (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))\n (No client certificate requested)\n by relay.smtp-ext.broadcom.com (Postfix) with ESMTPS id D18832E5C1;\n Wed,  1 Sep 2021 07:24:50 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 relay.smtp-ext.broadcom.com D18832E5C1",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com;\n s=dkimrelay; t=1630506292;\n bh=dhYoW/EVF0U5zwjvjRVhjr3CLN1EaxQrzQtyA2hH8Jc=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n b=SmLNVvT/C5DjYIyxeibEzu4cmDhttidlo76jS9179VfeyZWStUMYskusMH/mdsov6\n nX4MpP293yWNcfFtdGM7q5cyHAPnW+0lkRvSzRCZDgXgmajy/rF3KX0KMx+QsEggbO\n v/5ZTN7QL8XdMJf1GF7OIknk4cB0/PAH+UfsSEe0=",
        "From": "Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Farah Smith <farah.smith@broadcom.com>",
        "Date": "Wed,  1 Sep 2021 19:54:23 +0530",
        "Message-Id": "<20210901142433.8444-5-venkatkumar.duvvuru@broadcom.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210901142433.8444-1-venkatkumar.duvvuru@broadcom.com>",
        "References": "<20210901142433.8444-1-venkatkumar.duvvuru@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH 04/14] net/bnxt: add Thor SRAM mgr model",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Farah Smith <farah.smith@broadcom.com>\n\nAdd dynamic SRAM manager allocation support.\n\nSigned-off-by: Farah Smith <farah.smith@broadcom.com>\nReviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>\nReviewed-by: Peter Spreadborough <peter.spreadborough@broadcom.com>\n---\n drivers/net/bnxt/tf_core/ll.c             |   3 +\n drivers/net/bnxt/tf_core/ll.h             |  50 +-\n drivers/net/bnxt/tf_core/meson.build      |   2 +\n drivers/net/bnxt/tf_core/tf_core.c        | 104 ++-\n drivers/net/bnxt/tf_core/tf_core.h        |  48 +-\n drivers/net/bnxt/tf_core/tf_device.c      |  40 +-\n drivers/net/bnxt/tf_core/tf_device.h      | 133 ++-\n drivers/net/bnxt/tf_core/tf_device_p4.c   |  75 +-\n drivers/net/bnxt/tf_core/tf_device_p4.h   |  50 +-\n drivers/net/bnxt/tf_core/tf_device_p58.c  | 105 ++-\n drivers/net/bnxt/tf_core/tf_device_p58.h  |  60 +-\n drivers/net/bnxt/tf_core/tf_msg.c         |   2 +-\n drivers/net/bnxt/tf_core/tf_rm.c          |  46 +-\n drivers/net/bnxt/tf_core/tf_rm.h          |  62 +-\n drivers/net/bnxt/tf_core/tf_session.c     |  56 ++\n drivers/net/bnxt/tf_core/tf_session.h     |  58 +-\n drivers/net/bnxt/tf_core/tf_sram_mgr.c    | 971 ++++++++++++++++++++++\n drivers/net/bnxt/tf_core/tf_sram_mgr.h    | 317 +++++++\n drivers/net/bnxt/tf_core/tf_tbl.c         | 186 +----\n drivers/net/bnxt/tf_core/tf_tbl.h         |  15 +-\n drivers/net/bnxt/tf_core/tf_tbl_sram.c    | 713 ++++++++++++++++\n drivers/net/bnxt/tf_core/tf_tbl_sram.h    | 154 ++++\n drivers/net/bnxt/tf_core/tf_tcam.c        |  10 +-\n drivers/net/bnxt/tf_core/tf_tcam.h        |   7 +\n drivers/net/bnxt/tf_core/tf_tcam_shared.c |  28 +-\n drivers/net/bnxt/tf_core/tf_util.c        |  10 +\n drivers/net/bnxt/tf_ulp/bnxt_ulp.c        |  23 +\n meson_options.txt                         |   2 +\n 28 files changed, 2978 insertions(+), 352 deletions(-)\n create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.c\n create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.h\n create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.c\n create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.h",
    "diff": "diff --git a/drivers/net/bnxt/tf_core/ll.c b/drivers/net/bnxt/tf_core/ll.c\nindex cd168a7970..f2bdff6b9e 100644\n--- a/drivers/net/bnxt/tf_core/ll.c\n+++ b/drivers/net/bnxt/tf_core/ll.c\n@@ -13,6 +13,7 @@ void ll_init(struct ll *ll)\n {\n \tll->head = NULL;\n \tll->tail = NULL;\n+\tll->cnt = 0;\n }\n \n /* insert entry in linked list */\n@@ -30,6 +31,7 @@ void ll_insert(struct ll *ll,\n \t\tentry->next->prev = entry;\n \t\tll->head = entry->next->prev;\n \t}\n+\tll->cnt++;\n }\n \n /* delete entry from linked list */\n@@ -49,4 +51,5 @@ void ll_delete(struct ll *ll,\n \t\tentry->prev->next = entry->next;\n \t\tentry->next->prev = entry->prev;\n \t}\n+\tll->cnt--;\n }\ndiff --git a/drivers/net/bnxt/tf_core/ll.h b/drivers/net/bnxt/tf_core/ll.h\nindex 239478b4f8..9cf8f64ec2 100644\n--- a/drivers/net/bnxt/tf_core/ll.h\n+++ b/drivers/net/bnxt/tf_core/ll.h\n@@ -8,6 +8,8 @@\n #ifndef _LL_H_\n #define _LL_H_\n \n+#include <stdint.h>\n+\n /* linked list entry */\n struct ll_entry {\n \tstruct ll_entry *prev;\n@@ -18,6 +20,7 @@ struct ll_entry {\n struct ll {\n \tstruct ll_entry *head;\n \tstruct ll_entry *tail;\n+\tuint32_t cnt;\n };\n \n /**\n@@ -28,7 +31,7 @@ struct ll {\n void ll_init(struct ll *ll);\n \n /**\n- * Linked list insert\n+ * Linked list insert head\n  *\n  * [in] ll, linked list where element is inserted\n  * [in] entry, entry to be added\n@@ -43,4 +46,49 @@ void ll_insert(struct ll *ll, struct ll_entry *entry);\n  */\n void ll_delete(struct ll *ll, struct ll_entry *entry);\n \n+/**\n+ * Linked list return next entry without deleting it\n+ *\n+ * Useful in performing search\n+ *\n+ * [in] Entry in the list\n+ */\n+static inline struct ll_entry *ll_next(struct ll_entry *entry)\n+{\n+\treturn entry->next;\n+}\n+\n+/**\n+ * Linked list return the head of the list without removing it\n+ *\n+ * Useful in performing search\n+ *\n+ * [in] ll, linked list\n+ */\n+static inline struct ll_entry *ll_head(struct ll *ll)\n+{\n+\treturn ll->head;\n+}\n+\n+/**\n+ * Linked list return the tail of the list without removing it\n+ *\n+ * Useful in performing search\n+ *\n+ * [in] ll, linked list\n+ */\n+static inline struct ll_entry *ll_tail(struct ll *ll)\n+{\n+\treturn ll->tail;\n+}\n+\n+/**\n+ * Linked list return the number of entries in the list\n+ *\n+ * [in] ll, linked list\n+ */\n+static inline uint32_t ll_cnt(struct ll *ll)\n+{\n+\treturn ll->cnt;\n+}\n #endif /* _LL_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/meson.build b/drivers/net/bnxt/tf_core/meson.build\nindex f28e77ec2e..b7333a431b 100644\n--- a/drivers/net/bnxt/tf_core/meson.build\n+++ b/drivers/net/bnxt/tf_core/meson.build\n@@ -16,6 +16,8 @@ sources += files(\n         'stack.c',\n         'tf_rm.c',\n         'tf_tbl.c',\n+\t'tf_tbl_sram.c',\n+\t'tf_sram_mgr.c',\n         'tf_em_common.c',\n         'tf_em_host.c',\n         'tf_em_internal.c',\ndiff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c\nindex 5458f76e2d..936102c804 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.c\n+++ b/drivers/net/bnxt/tf_core/tf_core.c\n@@ -1079,17 +1079,16 @@ tf_alloc_tbl_entry(struct tf *tfp,\n \t\t\t\t    strerror(-rc));\n \t\t\treturn rc;\n \t\t}\n-\n-\t} else {\n-\t\tif (dev->ops->tf_dev_alloc_tbl == NULL) {\n-\t\t\trc = -EOPNOTSUPP;\n+\t} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {\n+\t\trc = dev->ops->tf_dev_alloc_sram_tbl(tfp, &aparms);\n+\t\tif (rc) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Operation not supported, rc:%s\\n\",\n+\t\t\t\t    \"%s: SRAM table allocation failed, rc:%s\\n\",\n \t\t\t\t    tf_dir_2_str(parms->dir),\n \t\t\t\t    strerror(-rc));\n-\t\t\treturn -EOPNOTSUPP;\n+\t\t\treturn rc;\n \t\t}\n-\n+\t} else {\n \t\trc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);\n \t\tif (rc) {\n \t\t\tTFP_DRV_LOG(ERR,\n@@ -1162,15 +1161,16 @@ tf_free_tbl_entry(struct tf *tfp,\n \t\t\t\t    strerror(-rc));\n \t\t\treturn rc;\n \t\t}\n-\t} else {\n-\t\tif (dev->ops->tf_dev_free_tbl == NULL) {\n-\t\t\trc = -EOPNOTSUPP;\n+\t} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {\n+\t\trc = dev->ops->tf_dev_free_sram_tbl(tfp, &fparms);\n+\t\tif (rc) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Operation not supported, rc:%s\\n\",\n+\t\t\t\t    \"%s: SRAM table free failed, rc:%s\\n\",\n \t\t\t\t    tf_dir_2_str(parms->dir),\n \t\t\t\t    strerror(-rc));\n-\t\t\treturn -EOPNOTSUPP;\n+\t\t\treturn rc;\n \t\t}\n+\t} else {\n \n \t\trc = dev->ops->tf_dev_free_tbl(tfp, &fparms);\n \t\tif (rc) {\n@@ -1181,7 +1181,6 @@ tf_free_tbl_entry(struct tf *tfp,\n \t\t\treturn rc;\n \t\t}\n \t}\n-\n \treturn 0;\n }\n \n@@ -1244,6 +1243,15 @@ tf_set_tbl_entry(struct tf *tfp,\n \t\t\t\t    strerror(-rc));\n \t\t\treturn rc;\n \t\t}\n+\t}  else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {\n+\t\trc = dev->ops->tf_dev_set_sram_tbl(tfp, &sparms);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: SRAM table set failed, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn rc;\n+\t\t}\n \t} else {\n \t\tif (dev->ops->tf_dev_set_tbl == NULL) {\n \t\t\trc = -EOPNOTSUPP;\n@@ -1300,28 +1308,39 @@ tf_get_tbl_entry(struct tf *tfp,\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n-\n-\tif (dev->ops->tf_dev_get_tbl == NULL) {\n-\t\trc = -EOPNOTSUPP;\n-\t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s: Operation not supported, rc:%s\\n\",\n-\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    strerror(-rc));\n-\t\treturn -EOPNOTSUPP;\n-\t}\n-\n \tgparms.dir = parms->dir;\n \tgparms.type = parms->type;\n \tgparms.data = parms->data;\n \tgparms.data_sz_in_bytes = parms->data_sz_in_bytes;\n \tgparms.idx = parms->idx;\n-\trc = dev->ops->tf_dev_get_tbl(tfp, &gparms);\n-\tif (rc) {\n-\t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s: Table get failed, rc:%s\\n\",\n-\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    strerror(-rc));\n-\t\treturn rc;\n+\n+\tif (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {\n+\t\trc = dev->ops->tf_dev_get_sram_tbl(tfp, &gparms);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: SRAM table get failed, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn rc;\n+\t\t}\n+\t} else {\n+\t\tif (dev->ops->tf_dev_get_tbl == NULL) {\n+\t\t\trc = -EOPNOTSUPP;\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: Operation not supported, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn -EOPNOTSUPP;\n+\t\t}\n+\n+\t\trc = dev->ops->tf_dev_get_tbl(tfp, &gparms);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: Table get failed, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn rc;\n+\t\t}\n \t}\n \n \treturn rc;\n@@ -1361,6 +1380,13 @@ tf_bulk_get_tbl_entry(struct tf *tfp,\n \t\treturn rc;\n \t}\n \n+\tbparms.dir = parms->dir;\n+\tbparms.type = parms->type;\n+\tbparms.starting_idx = parms->starting_idx;\n+\tbparms.num_entries = parms->num_entries;\n+\tbparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;\n+\tbparms.physical_mem_addr = parms->physical_mem_addr;\n+\n \tif (parms->type == TF_TBL_TYPE_EXT) {\n \t\t/* Not supported, yet */\n \t\trc = -EOPNOTSUPP;\n@@ -1370,10 +1396,17 @@ tf_bulk_get_tbl_entry(struct tf *tfp,\n \t\t\t    strerror(-rc));\n \n \t\treturn rc;\n+\t} else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) {\n+\t\trc = dev->ops->tf_dev_get_bulk_sram_tbl(tfp, &bparms);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: SRAM table bulk get failed, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    strerror(-rc));\n+\t\t}\n+\t\treturn rc;\n \t}\n \n-\t/* Internal table type processing */\n-\n \tif (dev->ops->tf_dev_get_bulk_tbl == NULL) {\n \t\trc = -EOPNOTSUPP;\n \t\tTFP_DRV_LOG(ERR,\n@@ -1383,12 +1416,6 @@ tf_bulk_get_tbl_entry(struct tf *tfp,\n \t\treturn -EOPNOTSUPP;\n \t}\n \n-\tbparms.dir = parms->dir;\n-\tbparms.type = parms->type;\n-\tbparms.starting_idx = parms->starting_idx;\n-\tbparms.num_entries = parms->num_entries;\n-\tbparms.entry_sz_in_bytes = parms->entry_sz_in_bytes;\n-\tbparms.physical_mem_addr = parms->physical_mem_addr;\n \trc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n@@ -1397,7 +1424,6 @@ tf_bulk_get_tbl_entry(struct tf *tfp,\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n-\n \treturn rc;\n }\n \ndiff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h\nindex af8d13bd7e..fb02c2b161 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.h\n+++ b/drivers/net/bnxt/tf_core/tf_core.h\n@@ -65,6 +65,16 @@ enum tf_ext_mem_chan_type {\n \tTF_EXT_MEM_CHAN_TYPE_MAX\n };\n \n+/**\n+ * WC TCAM number of slice per row that devices supported\n+ */\n+enum tf_wc_num_slice {\n+\tTF_WC_TCAM_1_SLICE_PER_ROW = 1,\n+\tTF_WC_TCAM_2_SLICE_PER_ROW = 2,\n+\tTF_WC_TCAM_4_SLICE_PER_ROW = 4,\n+\tTF_WC_TCAM_8_SLICE_PER_ROW = 8,\n+};\n+\n /**\n  * EEM record AR helper\n  *\n@@ -670,6 +680,13 @@ struct tf_open_session_parms {\n \t */\n \tvoid *bp;\n \n+\t/**\n+\t * [in]\n+\t *\n+\t * The number of slices per row for WC TCAM entry.\n+\t */\n+\tenum tf_wc_num_slice wc_num_slices;\n+\n \t/**\n \t * [out] shared_session_creator\n \t *\n@@ -734,8 +751,6 @@ int tf_open_session(struct tf *tfp,\n /**\n  * General internal resource info\n  *\n- * TODO: remove tf_rm_new_entry structure and use this structure\n- * internally.\n  */\n struct tf_resource_info {\n \tuint16_t start;\n@@ -1656,12 +1671,7 @@ struct tf_alloc_tbl_entry_parms {\n  * entry of the indicated type for this TruFlow session.\n  *\n  * Allocates an index table record. This function will attempt to\n- * allocate an entry or search an index table for a matching entry if\n- * search is enabled (only the shadow copy of the table is accessed).\n- *\n- * If search is not enabled, the first available free entry is\n- * returned. If search is enabled and a matching entry to entry_data\n- * is found hit is set to TRUE and success is returned.\n+ * allocate an index table entry.\n  *\n  * External types:\n  *\n@@ -1670,8 +1680,8 @@ struct tf_alloc_tbl_entry_parms {\n  * Allocates an external index table action record.\n  *\n  * NOTE:\n- * Implementation of the internals of this function will be a stack with push\n- * and pop.\n+ * Implementation of the internals of the external function will be a stack with\n+ * push and pop.\n  *\n  * Returns success or failure code.\n  */\n@@ -1707,20 +1717,15 @@ struct tf_free_tbl_entry_parms {\n  *\n  * Internal types:\n  *\n- * If session has shadow_copy enabled the shadow DB is searched and if\n- * found the element ref_cnt is decremented. If ref_cnt goes to\n- * zero then the element is returned to the session pool.\n- *\n- * If the session does not have a shadow DB the element is free'ed and\n- * given back to the session pool.\n+ * The element is freed and given back to the session pool.\n  *\n  * External types:\n  *\n- * Free's an external index table action record.\n+ * Frees an external index table action record.\n  *\n  * NOTE:\n- * Implementation of the internals of this function will be a stack with push\n- * and pop.\n+ * Implementation of the internals of the external table will be a stack with\n+ * push and pop.\n  *\n  * Returns success or failure code.\n  */\n@@ -1764,9 +1769,8 @@ struct tf_set_tbl_entry_parms {\n /**\n  * set index table entry\n  *\n- * Used to insert an application programmed index table entry into a\n- * previous allocated table location.  A shadow copy of the table\n- * is maintained (if enabled) (only for internal objects)\n+ * Used to set an application programmed index table entry into a\n+ * previous allocated table location.\n  *\n  * Returns success or failure code.\n  */\ndiff --git a/drivers/net/bnxt/tf_core/tf_device.c b/drivers/net/bnxt/tf_core/tf_device.c\nindex 498e668b16..25a7166bbb 100644\n--- a/drivers/net/bnxt/tf_core/tf_device.c\n+++ b/drivers/net/bnxt/tf_core/tf_device.c\n@@ -11,10 +11,14 @@\n #include \"tf_rm.h\"\n #ifdef TF_TCAM_SHARED\n #include \"tf_tcam_shared.h\"\n+#include \"tf_tbl_sram.h\"\n #endif /* TF_TCAM_SHARED */\n \n struct tf;\n \n+/* Number of slices per row for WC TCAM */\n+uint16_t g_wc_num_slices_per_row = TF_WC_TCAM_1_SLICE_PER_ROW;\n+\n /* Forward declarations */\n static int tf_dev_unbind_p4(struct tf *tfp);\n static int tf_dev_unbind_p58(struct tf *tfp);\n@@ -83,7 +87,8 @@ static int\n tf_dev_bind_p4(struct tf *tfp,\n \t       bool shadow_copy,\n \t       struct tf_session_resources *resources,\n-\t       struct tf_dev_info *dev_handle)\n+\t       struct tf_dev_info *dev_handle,\n+\t       enum tf_wc_num_slice wc_num_slices)\n {\n \tint rc;\n \tint frc;\n@@ -131,7 +136,6 @@ tf_dev_bind_p4(struct tf *tfp,\n \tif (rsv_cnt) {\n \t\ttbl_cfg.num_elements = TF_TBL_TYPE_MAX;\n \t\ttbl_cfg.cfg = tf_tbl_p4;\n-\t\ttbl_cfg.shadow_copy = shadow_copy;\n \t\ttbl_cfg.resources = resources;\n \t\trc = tf_tbl_bind(tfp, &tbl_cfg);\n \t\tif (rc) {\n@@ -151,6 +155,7 @@ tf_dev_bind_p4(struct tf *tfp,\n \t\ttcam_cfg.cfg = tf_tcam_p4;\n \t\ttcam_cfg.shadow_copy = shadow_copy;\n \t\ttcam_cfg.resources = resources;\n+\t\ttcam_cfg.wc_num_slices = wc_num_slices;\n #ifdef TF_TCAM_SHARED\n \t\trc = tf_tcam_shared_bind(tfp, &tcam_cfg);\n #else /* !TF_TCAM_SHARED */\n@@ -369,7 +374,8 @@ static int\n tf_dev_bind_p58(struct tf *tfp,\n \t\tbool shadow_copy,\n \t\tstruct tf_session_resources *resources,\n-\t\tstruct tf_dev_info *dev_handle)\n+\t\tstruct tf_dev_info *dev_handle,\n+\t\tenum tf_wc_num_slice wc_num_slices)\n {\n \tint rc;\n \tint frc;\n@@ -414,7 +420,6 @@ tf_dev_bind_p58(struct tf *tfp,\n \tif (rsv_cnt) {\n \t\ttbl_cfg.num_elements = TF_TBL_TYPE_MAX;\n \t\ttbl_cfg.cfg = tf_tbl_p58;\n-\t\ttbl_cfg.shadow_copy = shadow_copy;\n \t\ttbl_cfg.resources = resources;\n \t\trc = tf_tbl_bind(tfp, &tbl_cfg);\n \t\tif (rc) {\n@@ -423,6 +428,13 @@ tf_dev_bind_p58(struct tf *tfp,\n \t\t\tgoto fail;\n \t\t}\n \t\tno_rsv_flag = false;\n+\n+\t\trc = tf_tbl_sram_bind(tfp);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"SRAM table initialization failure\\n\");\n+\t\t\tgoto fail;\n+\t\t}\n \t}\n \n \trsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX,\n@@ -433,6 +445,7 @@ tf_dev_bind_p58(struct tf *tfp,\n \t\ttcam_cfg.cfg = tf_tcam_p58;\n \t\ttcam_cfg.shadow_copy = shadow_copy;\n \t\ttcam_cfg.resources = resources;\n+\t\ttcam_cfg.wc_num_slices = wc_num_slices;\n #ifdef TF_TCAM_SHARED\n \t\trc = tf_tcam_shared_bind(tfp, &tcam_cfg);\n #else /* !TF_TCAM_SHARED */\n@@ -565,6 +578,18 @@ tf_dev_unbind_p58(struct tf *tfp)\n \t\tfail = true;\n \t}\n \n+\t/* Unbind the SRAM table prior to table as the table manager\n+\t * owns and frees the table DB while the SRAM table manager owns\n+\t * and manages it's internal data structures.  SRAM table manager\n+\t * relies on the table rm_db to exist.\n+\t */\n+\trc = tf_tbl_sram_unbind(tfp);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Device unbind failed, SRAM table\\n\");\n+\t\tfail = true;\n+\t}\n+\n \trc = tf_tbl_unbind(tfp);\n \tif (rc) {\n \t\tTFP_DRV_LOG(INFO,\n@@ -606,6 +631,7 @@ tf_dev_bind(struct tf *tfp __rte_unused,\n \t    enum tf_device_type type,\n \t    bool shadow_copy,\n \t    struct tf_session_resources *resources,\n+\t    uint16_t wc_num_slices,\n \t    struct tf_dev_info *dev_handle)\n {\n \tswitch (type) {\n@@ -615,13 +641,15 @@ tf_dev_bind(struct tf *tfp __rte_unused,\n \t\treturn tf_dev_bind_p4(tfp,\n \t\t\t\t      shadow_copy,\n \t\t\t\t      resources,\n-\t\t\t\t      dev_handle);\n+\t\t\t\t      dev_handle,\n+\t\t\t\t      wc_num_slices);\n \tcase TF_DEVICE_TYPE_THOR:\n \t\tdev_handle->type = type;\n \t\treturn tf_dev_bind_p58(tfp,\n \t\t\t\t       shadow_copy,\n \t\t\t\t       resources,\n-\t\t\t\t       dev_handle);\n+\t\t\t\t       dev_handle,\n+\t\t\t\t       wc_num_slices);\n \tdefault:\n \t\tTFP_DRV_LOG(ERR,\n \t\t\t    \"No such device\\n\");\ndiff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h\nindex b43cfc6925..9b0c037db0 100644\n--- a/drivers/net/bnxt/tf_core/tf_device.h\n+++ b/drivers/net/bnxt/tf_core/tf_device.h\n@@ -57,6 +57,9 @@ struct tf_dev_info {\n  * [in] resources\n  *   Pointer to resource allocation information\n  *\n+ * [in] wc_num_slices\n+ *   Number of slices per row for WC\n+ *\n  * [out] dev_handle\n  *   Device handle\n  *\n@@ -69,6 +72,7 @@ int tf_dev_bind(struct tf *tfp,\n \t\tenum tf_device_type type,\n \t\tbool shadow_copy,\n \t\tstruct tf_session_resources *resources,\n+\t\tuint16_t wc_num_slices,\n \t\tstruct tf_dev_info *dev_handle);\n \n /**\n@@ -139,6 +143,23 @@ struct tf_dev_ops {\n \t\t\t\t       uint16_t resource_id,\n \t\t\t\t       const char **resource_str);\n \n+\t/**\n+\t * Set the WC TCAM slice information that the device\n+\t * supports.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] num_slices_per_row\n+\t *   Number of slices per row the device supports\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_set_tcam_slice_info)(struct tf *tfp,\n+\t\t\t\t\t  enum tf_wc_num_slice num_slices_per_row);\n+\n \t/**\n \t * Retrieves the WC TCAM slice information that the device\n \t * supports.\n@@ -241,6 +262,22 @@ struct tf_dev_ops {\n \tint (*tf_dev_get_ident_resc_info)(struct tf *tfp,\n \t\t\t\t\t  struct tf_identifier_resource_info *parms);\n \n+\t/**\n+\t * Indicates whether the index table type is SRAM managed\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] type\n+\t *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD\n+\t *\n+\t * Returns\n+\t *   - (0) if the table is not managed by the SRAM manager\n+\t *   - (1) if the table is managed by the SRAM manager\n+\t */\n+\tbool (*tf_dev_is_sram_managed)(struct tf *tfp,\n+\t\t\t\t       enum tf_tbl_type tbl_type);\n+\n \t/**\n \t * Get SRAM table information.\n \t *\n@@ -289,6 +326,25 @@ struct tf_dev_ops {\n \tint (*tf_dev_alloc_tbl)(struct tf *tfp,\n \t\t\t\tstruct tf_tbl_alloc_parms *parms);\n \n+\t/**\n+\t * Allocation of an SRAM index table type element.\n+\t *\n+\t * This API allocates the specified table type element from a\n+\t * device specific table type DB. The allocated element is\n+\t * returned.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] parms\n+\t *   Pointer to table allocation parameters\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_alloc_sram_tbl)(struct tf *tfp,\n+\t\t\t\t     struct tf_tbl_alloc_parms *parms);\n \t/**\n \t * Allocation of a external table type element.\n \t *\n@@ -327,7 +383,24 @@ struct tf_dev_ops {\n \t */\n \tint (*tf_dev_free_tbl)(struct tf *tfp,\n \t\t\t       struct tf_tbl_free_parms *parms);\n-\n+\t/**\n+\t * Free of an SRAM table type element.\n+\t *\n+\t * This API free's a previous allocated table type element from a\n+\t * device specific table type DB.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] parms\n+\t *   Pointer to table free parameters\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_free_sram_tbl)(struct tf *tfp,\n+\t\t\t\t    struct tf_tbl_free_parms *parms);\n \t/**\n \t * Free of a external table type element.\n \t *\n@@ -385,6 +458,25 @@ struct tf_dev_ops {\n \tint (*tf_dev_set_ext_tbl)(struct tf *tfp,\n \t\t\t\t  struct tf_tbl_set_parms *parms);\n \n+\t/**\n+\t * Sets the specified SRAM table type element.\n+\t *\n+\t * This API sets the specified element data by invoking the\n+\t * firmware.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] parms\n+\t *   Pointer to table set parameters\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_set_sram_tbl)(struct tf *tfp,\n+\t\t\t\t   struct tf_tbl_set_parms *parms);\n+\n \t/**\n \t * Retrieves the specified table type element.\n \t *\n@@ -404,6 +496,25 @@ struct tf_dev_ops {\n \tint (*tf_dev_get_tbl)(struct tf *tfp,\n \t\t\t      struct tf_tbl_get_parms *parms);\n \n+\t/**\n+\t * Retrieves the specified SRAM table type element.\n+\t *\n+\t * This API retrieves the specified element data by invoking the\n+\t * firmware.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] parms\n+\t *   Pointer to table get parameters\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_get_sram_tbl)(struct tf *tfp,\n+\t\t\t\t   struct tf_tbl_get_parms *parms);\n+\n \t/**\n \t * Retrieves the specified table type element using 'bulk'\n \t * mechanism.\n@@ -424,6 +535,26 @@ struct tf_dev_ops {\n \tint (*tf_dev_get_bulk_tbl)(struct tf *tfp,\n \t\t\t\t   struct tf_tbl_get_bulk_parms *parms);\n \n+\t/**\n+\t * Retrieves the specified SRAM table type element using 'bulk'\n+\t * mechanism.\n+\t *\n+\t * This API retrieves the specified element data by invoking the\n+\t * firmware.\n+\t *\n+\t * [in] tfp\n+\t *   Pointer to TF handle\n+\t *\n+\t * [in] parms\n+\t *   Pointer to table get bulk parameters\n+\t *\n+\t * Returns\n+\t *   - (0) if successful.\n+\t *   - (-EINVAL) on failure.\n+\t */\n+\tint (*tf_dev_get_bulk_sram_tbl)(struct tf *tfp,\n+\t\t\t\t\tstruct tf_tbl_get_bulk_parms *parms);\n+\n \t/**\n \t * Gets the increment value to add to the shared session resource\n \t * start offset by for each count in the \"stride\"\ndiff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c\nindex 2e7ccec123..826cd0cdbc 100644\n--- a/drivers/net/bnxt/tf_core/tf_device_p4.c\n+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c\n@@ -118,14 +118,48 @@ tf_dev_p4_get_resource_str(struct tf *tfp __rte_unused,\n }\n \n /**\n- * Device specific function that retrieves the WC TCAM slices the\n+ * Device specific function that set the WC TCAM slices the\n  * device supports.\n  *\n  * [in] tfp\n  *   Pointer to TF handle\n  *\n- * [out] slice_size\n- *   Pointer to the WC TCAM slice size\n+ * [in] num_slices_per_row\n+ *   The WC TCAM row slice configuration\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+static int\n+tf_dev_p4_set_tcam_slice_info(struct tf *tfp __rte_unused,\n+\t\t\t      enum tf_wc_num_slice num_slices_per_row)\n+{\n+\tswitch (num_slices_per_row) {\n+\tcase TF_WC_TCAM_1_SLICE_PER_ROW:\n+\tcase TF_WC_TCAM_2_SLICE_PER_ROW:\n+\tcase TF_WC_TCAM_4_SLICE_PER_ROW:\n+\t\tg_wc_num_slices_per_row = num_slices_per_row;\n+\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Device specific function that retrieves the TCAM slices the\n+ * device supports.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle\n+ *\n+ * [in] type\n+ *   TF TCAM type\n+ *\n+ * [in] key_sz\n+ *   The key size\n  *\n  * [out] num_slices_per_row\n  *   Pointer to the WC TCAM row slice configuration\n@@ -141,11 +175,10 @@ tf_dev_p4_get_tcam_slice_info(struct tf *tfp __rte_unused,\n \t\t\t      uint16_t *num_slices_per_row)\n {\n /* Single slice support */\n-#define CFA_P4_WC_TCAM_SLICES_PER_ROW 1\n #define CFA_P4_WC_TCAM_SLICE_SIZE     12\n \n \tif (type == TF_TCAM_TBL_TYPE_WC_TCAM) {\n-\t\t*num_slices_per_row = CFA_P4_WC_TCAM_SLICES_PER_ROW;\n+\t\t*num_slices_per_row = g_wc_num_slices_per_row;\n \t\tif (key_sz > *num_slices_per_row * CFA_P4_WC_TCAM_SLICE_SIZE)\n \t\t\treturn -ENOTSUP;\n \t} else { /* for other type of tcam */\n@@ -220,26 +253,51 @@ static int tf_dev_p4_word_align(uint16_t size)\n \treturn ((((size) + 31) >> 5) * 4);\n }\n \n+/**\n+ * Indicates whether the index table type is SRAM managed\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle\n+ *\n+ * [in] type\n+ *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD\n+ *\n+ * Returns\n+ *   - (0) if the table is not managed by the SRAM manager\n+ *   - (1) if the table is managed by the SRAM manager\n+ */\n+static bool tf_dev_p4_is_sram_managed(struct tf *tfp __rte_unused,\n+\t\t\t\t      enum tf_tbl_type type __rte_unused)\n+{\n+\treturn false;\n+}\n /**\n  * Truflow P4 device specific functions\n  */\n const struct tf_dev_ops tf_dev_ops_p4_init = {\n \t.tf_dev_get_max_types = tf_dev_p4_get_max_types,\n \t.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,\n+\t.tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,\n \t.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,\n \t.tf_dev_alloc_ident = NULL,\n \t.tf_dev_free_ident = NULL,\n \t.tf_dev_search_ident = NULL,\n \t.tf_dev_get_ident_resc_info = NULL,\n \t.tf_dev_get_tbl_info = NULL,\n+\t.tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,\n \t.tf_dev_alloc_ext_tbl = NULL,\n \t.tf_dev_alloc_tbl = NULL,\n+\t.tf_dev_alloc_sram_tbl = NULL,\n \t.tf_dev_free_ext_tbl = NULL,\n \t.tf_dev_free_tbl = NULL,\n+\t.tf_dev_free_sram_tbl = NULL,\n \t.tf_dev_set_tbl = NULL,\n \t.tf_dev_set_ext_tbl = NULL,\n+\t.tf_dev_set_sram_tbl = NULL,\n \t.tf_dev_get_tbl = NULL,\n+\t.tf_dev_get_sram_tbl = NULL,\n \t.tf_dev_get_bulk_tbl = NULL,\n+\t.tf_dev_get_bulk_sram_tbl = NULL,\n \t.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,\n \t.tf_dev_get_tbl_resc_info = NULL,\n \t.tf_dev_alloc_tcam = NULL,\n@@ -271,20 +329,27 @@ const struct tf_dev_ops tf_dev_ops_p4_init = {\n const struct tf_dev_ops tf_dev_ops_p4 = {\n \t.tf_dev_get_max_types = tf_dev_p4_get_max_types,\n \t.tf_dev_get_resource_str = tf_dev_p4_get_resource_str,\n+\t.tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info,\n \t.tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info,\n \t.tf_dev_alloc_ident = tf_ident_alloc,\n \t.tf_dev_free_ident = tf_ident_free,\n \t.tf_dev_search_ident = tf_ident_search,\n \t.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,\n \t.tf_dev_get_tbl_info = NULL,\n+\t.tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed,\n \t.tf_dev_alloc_tbl = tf_tbl_alloc,\n \t.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,\n+\t.tf_dev_alloc_sram_tbl = tf_tbl_alloc,\n \t.tf_dev_free_tbl = tf_tbl_free,\n \t.tf_dev_free_ext_tbl = tf_tbl_ext_free,\n+\t.tf_dev_free_sram_tbl = tf_tbl_free,\n \t.tf_dev_set_tbl = tf_tbl_set,\n \t.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,\n+\t.tf_dev_set_sram_tbl = NULL,\n \t.tf_dev_get_tbl = tf_tbl_get,\n+\t.tf_dev_get_sram_tbl = NULL,\n \t.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,\n+\t.tf_dev_get_bulk_sram_tbl = NULL,\n \t.tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment,\n \t.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,\n #ifdef TF_TCAM_SHARED\ndiff --git a/drivers/net/bnxt/tf_core/tf_device_p4.h b/drivers/net/bnxt/tf_core/tf_device_p4.h\nindex a73ba3cd70..c1357913f1 100644\n--- a/drivers/net/bnxt/tf_core/tf_device_p4.h\n+++ b/drivers/net/bnxt/tf_core/tf_device_p4.h\n@@ -15,101 +15,101 @@\n struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = {\n \t[TF_IDENT_TYPE_L2_CTXT_HIGH] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_L2_CTXT_LOW] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_PROF_FUNC] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_WC_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_EM_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \n struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = {\n \t[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_PROF_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_WC_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_SP_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \n struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {\n \t[TF_TBL_TYPE_FULL_ACT_RECORD] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_MCAST_GROUPS] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_8B] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_16B] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_64B] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_SP_SMAC] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_STATS_64] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_ACT_MODIFY_IPV4] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METER_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METER_INST] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_MIRROR_CONFIG] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \n };\n@@ -117,14 +117,14 @@ struct tf_rm_element_cfg tf_tbl_p4[TF_TBL_TYPE_MAX] = {\n struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = {\n \t[TF_EM_TBL_TYPE_TBL_SCOPE] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \n struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = {\n \t[TF_EM_TBL_TYPE_EM_RECORD] = {\n \t\tTF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \ndiff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c\nindex a492c62bff..47d7836a58 100644\n--- a/drivers/net/bnxt/tf_core/tf_device_p58.c\n+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c\n@@ -17,6 +17,7 @@\n #include \"tf_if_tbl.h\"\n #include \"tfp.h\"\n #include \"tf_msg_common.h\"\n+#include \"tf_tbl_sram.h\"\n \n #define TF_DEV_P58_PARIF_MAX 16\n #define TF_DEV_P58_PF_MASK 0xfUL\n@@ -105,14 +106,48 @@ tf_dev_p58_get_resource_str(struct tf *tfp __rte_unused,\n }\n \n /**\n- * Device specific function that retrieves the WC TCAM slices the\n+ * Device specific function that set the WC TCAM slices the\n  * device supports.\n  *\n  * [in] tfp\n  *   Pointer to TF handle\n  *\n- * [out] slice_size\n- *   Pointer to the WC TCAM slice size\n+ * [in] num_slices_per_row\n+ *   The WC TCAM row slice configuration\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+static int\n+tf_dev_p58_set_tcam_slice_info(struct tf *tfp __rte_unused,\n+\t\t\t       enum tf_wc_num_slice num_slices_per_row)\n+{\n+\tswitch (num_slices_per_row) {\n+\tcase TF_WC_TCAM_1_SLICE_PER_ROW:\n+\tcase TF_WC_TCAM_2_SLICE_PER_ROW:\n+\tcase TF_WC_TCAM_4_SLICE_PER_ROW:\n+\t\tg_wc_num_slices_per_row = num_slices_per_row;\n+\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Device specific function that retrieves the TCAM slices the\n+ * device supports.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle\n+ *\n+ * [in] type\n+ *   TF TCAM type\n+ *\n+ * [in] key_sz\n+ *   The key size\n  *\n  * [out] num_slices_per_row\n  *   Pointer to the WC TCAM row slice configuration\n@@ -123,16 +158,13 @@ tf_dev_p58_get_resource_str(struct tf *tfp __rte_unused,\n  */\n static int\n tf_dev_p58_get_tcam_slice_info(struct tf *tfp __rte_unused,\n-\t\t\t      enum tf_tcam_tbl_type type,\n-\t\t\t      uint16_t key_sz,\n-\t\t\t      uint16_t *num_slices_per_row)\n+\t\t\t       enum tf_tcam_tbl_type type,\n+\t\t\t       uint16_t key_sz,\n+\t\t\t       uint16_t *num_slices_per_row)\n {\n-#define CFA_P58_WC_TCAM_SLICES_PER_ROW 1\n #define CFA_P58_WC_TCAM_SLICE_SIZE     24\n-\n \tif (type == TF_TCAM_TBL_TYPE_WC_TCAM) {\n-\t\t/* only support single slice key size now */\n-\t\t*num_slices_per_row = CFA_P58_WC_TCAM_SLICES_PER_ROW;\n+\t\t*num_slices_per_row = g_wc_num_slices_per_row;\n \t\tif (key_sz > *num_slices_per_row * CFA_P58_WC_TCAM_SLICE_SIZE)\n \t\t\treturn -ENOTSUP;\n \t} else { /* for other type of tcam */\n@@ -194,6 +226,44 @@ static int tf_dev_p58_get_shared_tbl_increment(struct tf *tfp __rte_unused,\n \treturn 0;\n }\n \n+/**\n+ * Indicates whether the index table type is SRAM managed\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle\n+ *\n+ * [in] type\n+ *   Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD\n+ *\n+ * Returns\n+ *   - (0) if the table is not managed by the SRAM manager\n+ *   - (1) if the table is managed by the SRAM manager\n+ */\n+static bool tf_dev_p58_is_sram_managed(struct tf *tfp __rte_unused,\n+\t\t\t\t       enum tf_tbl_type type)\n+{\n+\tswitch (type) {\n+\tcase TF_TBL_TYPE_FULL_ACT_RECORD:\n+\tcase TF_TBL_TYPE_COMPACT_ACT_RECORD:\n+\tcase TF_TBL_TYPE_ACT_ENCAP_8B:\n+\tcase TF_TBL_TYPE_ACT_ENCAP_16B:\n+\tcase TF_TBL_TYPE_ACT_ENCAP_32B:\n+\tcase TF_TBL_TYPE_ACT_ENCAP_64B:\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC:\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC_IPV4:\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC_IPV6:\n+\tcase TF_TBL_TYPE_ACT_STATS_64:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_IPV4:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_8B:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_16B:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_32B:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_64B:\n+\t\treturn true;\n+\tdefault:\n+\t\treturn false;\n+\t}\n+}\n+\n #define TF_DEV_P58_BANK_SZ_64B 2048\n /**\n  * Get SRAM table information.\n@@ -265,26 +335,34 @@ static int tf_dev_p58_get_sram_tbl_info(struct tf *tfp __rte_unused,\n \t}\n \treturn 0;\n }\n+\n /**\n  * Truflow P58 device specific functions\n  */\n const struct tf_dev_ops tf_dev_ops_p58_init = {\n \t.tf_dev_get_max_types = tf_dev_p58_get_max_types,\n \t.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,\n+\t.tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,\n \t.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,\n \t.tf_dev_alloc_ident = NULL,\n \t.tf_dev_free_ident = NULL,\n \t.tf_dev_search_ident = NULL,\n \t.tf_dev_get_ident_resc_info = NULL,\n \t.tf_dev_get_tbl_info = NULL,\n+\t.tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,\n \t.tf_dev_alloc_ext_tbl = NULL,\n \t.tf_dev_alloc_tbl = NULL,\n+\t.tf_dev_alloc_sram_tbl = NULL,\n \t.tf_dev_free_ext_tbl = NULL,\n \t.tf_dev_free_tbl = NULL,\n+\t.tf_dev_free_sram_tbl = NULL,\n \t.tf_dev_set_tbl = NULL,\n \t.tf_dev_set_ext_tbl = NULL,\n+\t.tf_dev_set_sram_tbl = NULL,\n \t.tf_dev_get_tbl = NULL,\n+\t.tf_dev_get_sram_tbl = NULL,\n \t.tf_dev_get_bulk_tbl = NULL,\n+\t.tf_dev_get_bulk_sram_tbl = NULL,\n \t.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,\n \t.tf_dev_get_tbl_resc_info = NULL,\n \t.tf_dev_alloc_tcam = NULL,\n@@ -316,20 +394,27 @@ const struct tf_dev_ops tf_dev_ops_p58_init = {\n const struct tf_dev_ops tf_dev_ops_p58 = {\n \t.tf_dev_get_max_types = tf_dev_p58_get_max_types,\n \t.tf_dev_get_resource_str = tf_dev_p58_get_resource_str,\n+\t.tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info,\n \t.tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info,\n \t.tf_dev_alloc_ident = tf_ident_alloc,\n \t.tf_dev_free_ident = tf_ident_free,\n \t.tf_dev_search_ident = tf_ident_search,\n \t.tf_dev_get_ident_resc_info = tf_ident_get_resc_info,\n+\t.tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed,\n \t.tf_dev_get_tbl_info = tf_dev_p58_get_sram_tbl_info,\n \t.tf_dev_alloc_tbl = tf_tbl_alloc,\n+\t.tf_dev_alloc_sram_tbl = tf_tbl_sram_alloc,\n \t.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,\n \t.tf_dev_free_tbl = tf_tbl_free,\n \t.tf_dev_free_ext_tbl = tf_tbl_ext_free,\n+\t.tf_dev_free_sram_tbl = tf_tbl_sram_free,\n \t.tf_dev_set_tbl = tf_tbl_set,\n \t.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,\n+\t.tf_dev_set_sram_tbl = tf_tbl_sram_set,\n \t.tf_dev_get_tbl = tf_tbl_get,\n+\t.tf_dev_get_sram_tbl = tf_tbl_sram_get,\n \t.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,\n+\t.tf_dev_get_bulk_sram_tbl = tf_tbl_sram_bulk_get,\n \t.tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment,\n \t.tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info,\n #ifdef TF_TCAM_SHARED\ndiff --git a/drivers/net/bnxt/tf_core/tf_device_p58.h b/drivers/net/bnxt/tf_core/tf_device_p58.h\nindex 8c2e07aa34..3e8759f2df 100644\n--- a/drivers/net/bnxt/tf_core/tf_device_p58.h\n+++ b/drivers/net/bnxt/tf_core/tf_device_p58.h\n@@ -15,107 +15,107 @@\n struct tf_rm_element_cfg tf_ident_p58[TF_IDENT_TYPE_MAX] = {\n \t[TF_IDENT_TYPE_L2_CTXT_HIGH] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_L2_CTXT_LOW] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_PROF_FUNC] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_WC_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_IDENT_TYPE_EM_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \n struct tf_rm_element_cfg tf_tcam_p58[TF_TCAM_TBL_TYPE_MAX] = {\n \t[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_PROF_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_WC_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TCAM_TBL_TYPE_VEB_TCAM] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \n struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {\n \t[TF_TBL_TYPE_EM_FKB] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_WC_FKB] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METER_PROF] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METER_INST] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METER_DROP_CNT] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_MIRROR_CONFIG] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t[TF_TBL_TYPE_METADATA] = {\n \t\tTF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n \t/* Policy - ARs in bank 1 */\n \t[TF_TBL_TYPE_FULL_ACT_RECORD] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,\n-\t\t.slices          = 1,\n+\t\t.slices          = 4,\n \t},\n \t[TF_TBL_TYPE_COMPACT_ACT_RECORD] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_FULL_ACT_RECORD,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,\n-\t\t.slices          = 1,\n+\t\t.slices          = 8,\n \t},\n \t/* Policy - Encaps in bank 2 */\n \t[TF_TBL_TYPE_ACT_ENCAP_8B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 8,\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_16B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 4,\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_32B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 2,\n \t},\n \t[TF_TBL_TYPE_ACT_ENCAP_64B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n@@ -128,19 +128,19 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 8,\n \t},\n \t[TF_TBL_TYPE_ACT_MODIFY_16B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 4,\n \t},\n \t[TF_TBL_TYPE_ACT_MODIFY_32B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,\n-\t\t.slices          = 1,\n+\t\t.slices          = 2,\n \t},\n \t[TF_TBL_TYPE_ACT_MODIFY_64B] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n@@ -152,32 +152,32 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {\n \t[TF_TBL_TYPE_ACT_SP_SMAC] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,\n-\t\t.slices          = 1,\n+\t\t.slices          = 8,\n \t},\n \t[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,\n-\t\t.slices          = 1,\n+\t\t.slices          = 4,\n \t},\n \t[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,\n \t\t.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,\n-\t\t.slices          = 1,\n+\t\t.slices          = 2,\n \t},\n \t/* Policy - Stats in bank 3 */\n \t[TF_TBL_TYPE_ACT_STATS_64] = {\n \t\t.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,\n \t\t.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3,\n-\t\t.slices          = 1,\n+\t\t.slices          = 8,\n \t},\n };\n \n struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {\n \t[TF_EM_TBL_TYPE_EM_RECORD] = {\n \t\tTF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC,\n-\t\t0, 0, 0\n+\t\t0, 0\n \t},\n };\n \ndiff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c\nindex e07d9168be..0fbb2fe837 100644\n--- a/drivers/net/bnxt/tf_core/tf_msg.c\n+++ b/drivers/net/bnxt/tf_core/tf_msg.c\n@@ -2231,7 +2231,7 @@ tf_msg_get_if_tbl_entry(struct tf *tfp,\n \tif (rc != 0)\n \t\treturn rc;\n \n-\ttfp_memcpy(params->data, resp.data, req.size);\n+\ttfp_memcpy(&params->data[0], resp.data, req.size);\n \n \treturn 0;\n }\ndiff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c\nindex 0a46e2a343..03c958a7d6 100644\n--- a/drivers/net/bnxt/tf_core/tf_rm.c\n+++ b/drivers/net/bnxt/tf_core/tf_rm.c\n@@ -34,6 +34,12 @@ struct tf_rm_element {\n \t */\n \tuint16_t hcapi_type;\n \n+\t/**\n+\t * Resource slices.  How many slices will fit in the\n+\t * resource pool chunk size.\n+\t */\n+\tuint8_t slices;\n+\n \t/**\n \t * HCAPI RM allocated range information for the element.\n \t */\n@@ -356,12 +362,15 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,\n  *     -          - Failure if negative\n  */\n static int\n-tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,\n+tf_rm_update_parent_reservations(struct tf *tfp,\n+\t\t\t\t struct tf_dev_info *dev,\n+\t\t\t\t struct tf_rm_element_cfg *cfg,\n \t\t\t\t uint16_t *alloc_cnt,\n \t\t\t\t uint16_t num_elements,\n \t\t\t\t uint16_t *req_cnt)\n {\n \tint parent, child;\n+\tconst char *type_str;\n \n \t/* Search through all the elements */\n \tfor (parent = 0; parent < num_elements; parent++) {\n@@ -377,15 +386,25 @@ tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,\n \t\t\tif (alloc_cnt[parent] % cfg[parent].slices)\n \t\t\t\tcombined_cnt++;\n \n+\t\t\tif (alloc_cnt[parent]) {\n+\t\t\t\tdev->ops->tf_dev_get_resource_str(tfp,\n+\t\t\t\t\t\t\t cfg[parent].hcapi_type,\n+\t\t\t\t\t\t\t &type_str);\n+\t\t\t}\n+\n \t\t\t/* Search again through all the elements */\n \t\t\tfor (child = 0; child < num_elements; child++) {\n \t\t\t\t/* If this is one of my children */\n \t\t\t\tif (cfg[child].cfg_type ==\n \t\t\t\t    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&\n-\t\t\t\t    cfg[child].parent_subtype == parent) {\n+\t\t\t\t    cfg[child].parent_subtype == parent &&\n+\t\t\t\t    alloc_cnt[child]) {\n \t\t\t\t\tuint16_t cnt = 0;\n \t\t\t\t\tRTE_ASSERT(cfg[child].slices);\n \n+\t\t\t\t\tdev->ops->tf_dev_get_resource_str(tfp,\n+\t\t\t\t\t\t\t  cfg[child].hcapi_type,\n+\t\t\t\t\t\t\t   &type_str);\n \t\t\t\t\t/* Increment the parents combined count\n \t\t\t\t\t * with each child's count adjusted for\n \t\t\t\t\t * number of slices per RM allocated item.\n@@ -479,7 +498,7 @@ tf_rm_create_db(struct tf *tfp,\n \n \t/* Update the req_cnt based upon the element configuration\n \t */\n-\ttf_rm_update_parent_reservations(parms->cfg,\n+\ttf_rm_update_parent_reservations(tfp, dev, parms->cfg,\n \t\t\t\t\t parms->alloc_cnt,\n \t\t\t\t\t parms->num_elements,\n \t\t\t\t\t req_cnt);\n@@ -594,6 +613,7 @@ tf_rm_create_db(struct tf *tfp,\n \n \t\tdb[i].cfg_type = cfg->cfg_type;\n \t\tdb[i].hcapi_type = cfg->hcapi_type;\n+\t\tdb[i].slices = cfg->slices;\n \n \t\t/* Save the parent subtype for later use to find the pool\n \t\t */\n@@ -1271,6 +1291,26 @@ tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)\n \n \treturn 0;\n }\n+int\n+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms)\n+{\n+\tstruct tf_rm_new_db *rm_db;\n+\tenum tf_rm_elem_cfg_type cfg_type;\n+\n+\tTF_CHECK_PARMS2(parms, parms->rm_db);\n+\trm_db = (struct tf_rm_new_db *)parms->rm_db;\n+\tTF_CHECK_PARMS1(rm_db->db);\n+\n+\tcfg_type = rm_db->db[parms->subtype].cfg_type;\n+\n+\t/* Bail out if not controlled by HCAPI */\n+\tif (cfg_type == TF_RM_ELEM_CFG_NULL)\n+\t\treturn -ENOTSUP;\n+\n+\t*parms->slices = rm_db->db[parms->subtype].slices;\n+\n+\treturn 0;\n+}\n \n int\n tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)\ndiff --git a/drivers/net/bnxt/tf_core/tf_rm.h b/drivers/net/bnxt/tf_core/tf_rm.h\nindex 8b984112e8..da7d0c7211 100644\n--- a/drivers/net/bnxt/tf_core/tf_rm.h\n+++ b/drivers/net/bnxt/tf_core/tf_rm.h\n@@ -43,16 +43,6 @@ struct tf;\n  * support module, not called directly.\n  */\n \n-/**\n- * Resource reservation single entry result. Used when accessing HCAPI\n- * RM on the firmware.\n- */\n-struct tf_rm_new_entry {\n-\t/** Starting index of the allocated resource */\n-\tuint16_t start;\n-\t/** Number of allocated elements */\n-\tuint16_t stride;\n-};\n \n /**\n  * RM Element configuration enumeration. Used by the Device to\n@@ -114,10 +104,6 @@ struct tf_rm_element_cfg {\n \t */\n \tenum tf_rm_elem_cfg_type cfg_type;\n \n-\t/* If a HCAPI to TF type conversion is required then TF type\n-\t * can be added here.\n-\t */\n-\n \t/**\n \t * HCAPI RM Type for the element. Used for TF to HCAPI type\n \t * conversion.\n@@ -125,28 +111,19 @@ struct tf_rm_element_cfg {\n \tuint16_t hcapi_type;\n \n \t/**\n-\t * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD\n+\t * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT\n \t *\n \t * Parent Truflow module subtype associated with this resource type.\n \t */\n \tuint16_t parent_subtype;\n \n \t/**\n-\t * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD\n+\t * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT\n \t *\n \t * Resource slices.  How many slices will fit in the\n \t * resource pool chunk size.\n \t */\n \tuint8_t slices;\n-\n-\t/**\n-\t * Pool element divider count\n-\t * If 0 or 1, there is 1:1 correspondence between the RM\n-\t * BA pool resource element and the HCAPI RM firmware\n-\t * resource.  If > 1, the RM BA pool element has a 1:n\n-\t * correspondence to the HCAPI RM firmware resource.\n-\t */\n-\tuint8_t divider;\n };\n \n /**\n@@ -160,7 +137,7 @@ struct tf_rm_alloc_info {\n \t * In case of dynamic allocation support this would have\n \t * to be changed to linked list of tf_rm_entry instead.\n \t */\n-\tstruct tf_rm_new_entry entry;\n+\tstruct tf_resource_info entry;\n };\n \n /**\n@@ -331,6 +308,25 @@ struct tf_rm_get_hcapi_parms {\n \t */\n \tuint16_t *hcapi_type;\n };\n+/**\n+ * Get Slices parameters for a single element\n+ */\n+struct tf_rm_get_slices_parms {\n+\t/**\n+\t * [in] RM DB Handle\n+\t */\n+\tvoid *rm_db;\n+\t/**\n+\t * [in] TF subtype indicates which DB entry to perform the\n+\t * action on. (e.g. TF_TBL_TYPE_FULL_ACTION subtype of module\n+\t * TF_MODULE_TYPE_TABLE)\n+\t */\n+\tuint16_t subtype;\n+\t/**\n+\t * [in/out] Pointer to number of slices for the given type\n+\t */\n+\tuint16_t *slices;\n+};\n \n /**\n  * Get InUse count parameters for single element\n@@ -394,6 +390,8 @@ struct tf_rm_check_indexes_in_range_parms {\n  * @ref tf_rm_get_hcapi_type\n  *\n  * @ref tf_rm_get_inuse_count\n+ *\n+ * @ref tf_rm_get_slice_size\n  */\n \n /**\n@@ -571,5 +569,17 @@ int tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms);\n int\n tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms);\n \n+/**\n+ * Get the number of slices per resource bit allocator for the resource type\n+ *\n+ * [in] parms\n+ *   Pointer to get inuse parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int\n+tf_rm_get_slices(struct tf_rm_get_slices_parms *parms);\n \n #endif /* TF_RM_NEW_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_session.c b/drivers/net/bnxt/tf_core/tf_session.c\nindex 90b65c59e6..3e6664e9f2 100644\n--- a/drivers/net/bnxt/tf_core/tf_session.c\n+++ b/drivers/net/bnxt/tf_core/tf_session.c\n@@ -202,6 +202,7 @@ tf_session_create(struct tf *tfp,\n \t\t\t parms->open_cfg->device_type,\n \t\t\t session->shadow_copy,\n \t\t\t &parms->open_cfg->resources,\n+\t\t\t parms->open_cfg->wc_num_slices,\n \t\t\t &session->dev);\n \n \t/* Logging handled by dev_bind */\n@@ -705,6 +706,22 @@ tf_session_get_session(struct tf *tfp,\n \treturn rc;\n }\n \n+int tf_session_get(struct tf *tfp,\n+\t\t   struct tf_session **tfs,\n+\t\t   struct tf_dev_info **tfd)\n+{\n+\tint rc;\n+\trc = tf_session_get_session_internal(tfp, tfs);\n+\n+\t/* Logging done by tf_session_get_session_internal */\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_device(*tfs, tfd);\n+\n+\treturn rc;\n+}\n+\n struct tf_session_client *\n tf_session_get_session_client(struct tf_session *tfs,\n \t\t\t      union tf_session_client_id session_client_id)\n@@ -1012,4 +1029,43 @@ tf_session_set_tcam_shared_db(struct tf *tfp,\n \ttfs->tcam_shared_db_handle = tcam_shared_db_handle;\n \treturn rc;\n }\n+\n+int\n+tf_session_get_sram_db(struct tf *tfp,\n+\t\t       void **sram_handle)\n+{\n+\tstruct tf_session *tfs = NULL;\n+\tint rc = 0;\n+\n+\t*sram_handle = NULL;\n+\n+\tif (tfp == NULL)\n+\t\treturn (-EINVAL);\n+\n+\trc = tf_session_get_session_internal(tfp, &tfs);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\t*sram_handle = tfs->sram_handle;\n+\treturn rc;\n+}\n+\n+int\n+tf_session_set_sram_db(struct tf *tfp,\n+\t\t       void *sram_handle)\n+{\n+\tstruct tf_session *tfs = NULL;\n+\tint rc = 0;\n+\n+\tif (tfp == NULL)\n+\t\treturn (-EINVAL);\n+\n+\trc = tf_session_get_session_internal(tfp, &tfs);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\ttfs->sram_handle = sram_handle;\n+\treturn rc;\n+}\n+\n #endif /* TF_TCAM_SHARED */\ndiff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h\nindex d68421cd13..c1d7f70060 100644\n--- a/drivers/net/bnxt/tf_core/tf_session.h\n+++ b/drivers/net/bnxt/tf_core/tf_session.h\n@@ -166,6 +166,10 @@ struct tf_session {\n \t */\n \tvoid *tcam_shared_db_handle;\n #endif /* TF_TCAM_SHARED */\n+\t/**\n+\t * SRAM db reference for the session\n+\t */\n+\tvoid *sram_handle;\n };\n \n /**\n@@ -278,6 +282,10 @@ struct tf_session_close_session_parms {\n  *\n  * @ref tf_session_set_tcam_shared_db\n  * #endif\n+ *\n+ * @ref tf_session_get_sram_db\n+ *\n+ * @ref tf_session_set_sram_db\n  */\n \n /**\n@@ -435,11 +443,11 @@ tf_session_find_session_client_by_fid(struct tf_session *tfs,\n /**\n  * Looks up the device information from the TF Session.\n  *\n- * [in] tfp\n- *   Pointer to TF handle\n+ * [in] tfs\n+ *   Pointer to session handle\n  *\n  * [out] tfd\n- *   Pointer pointer to the device\n+ *   Pointer to the device\n  *\n  * Returns\n  *   - (0) if successful.\n@@ -448,6 +456,26 @@ tf_session_find_session_client_by_fid(struct tf_session *tfs,\n int tf_session_get_device(struct tf_session *tfs,\n \t\t\t  struct tf_dev_info **tfd);\n \n+/**\n+ * Returns the session and the device from the tfp.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle\n+ *\n+ * [out] tfs\n+ *   Pointer to the session\n+ *\n+ * [out] tfd\n+ *   Pointer to the device\n+\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_session_get(struct tf *tfp,\n+\t\t   struct tf_session **tfs,\n+\t\t   struct tf_dev_info **tfd);\n+\n /**\n  * Looks up the FW Session id the requested TF handle.\n  *\n@@ -614,4 +642,28 @@ int\n tf_session_get_tcam_shared_db(struct tf *tfp,\n \t\t\t      void **tcam_shared_db_handle);\n \n+/**\n+ * Set the pointer to the SRAM database\n+ *\n+ * [in] session, pointer to the session\n+ *\n+ * Returns:\n+ *   - the pointer to the parent bnxt struct\n+ */\n+int\n+tf_session_set_sram_db(struct tf *tfp,\n+\t\t       void *sram_handle);\n+\n+/**\n+ * Get the pointer to the SRAM database\n+ *\n+ * [in] session, pointer to the session\n+ *\n+ * Returns:\n+ *   - the pointer to the parent bnxt struct\n+ */\n+int\n+tf_session_get_sram_db(struct tf *tfp,\n+\t\t       void **sram_handle);\n+\n #endif /* _TF_SESSION_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_sram_mgr.c b/drivers/net/bnxt/tf_core/tf_sram_mgr.c\nnew file mode 100644\nindex 0000000000..f633a78b25\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_sram_mgr.c\n@@ -0,0 +1,971 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2021 Broadcom\n+ * All rights reserved.\n+ */\n+#include <stdlib.h>\n+#include <stdio.h>\n+#include \"tf_sram_mgr.h\"\n+#include \"tf_core.h\"\n+#include \"tf_rm.h\"\n+#include \"tf_common.h\"\n+#include \"assert.h\"\n+#include \"tf_util.h\"\n+#include \"tfp.h\"\n+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)\n+#include \"tf_msg.h\"\n+#endif\n+/***************************\n+ * Internal Data Structures\n+ ***************************/\n+\n+/**\n+ * TF SRAM block info\n+ *\n+ * Contains all the information about a particular 64B SRAM\n+ * block and the slices within it.\n+ */\n+struct tf_sram_block {\n+\t/* Previous block\n+\t */\n+\tstruct tf_sram_block *prev;\n+\t/* Next block\n+\t */\n+\tstruct tf_sram_block *next;\n+\n+\t/** Bitmap indicating which slices are in use\n+\t *  If a bit is set, it indicates the slice\n+\t *  in the row is in use.\n+\t */\n+\tuint8_t in_use_mask;\n+\n+\t/** Block id - this is a 64B offset\n+\t */\n+\tuint16_t block_id;\n+};\n+\n+/**\n+ * TF SRAM block list\n+ *\n+ * List of 64B SRAM blocks used for fixed size slices (8, 16, 32, 64B)\n+ */\n+struct tf_sram_slice_list {\n+\t/** Pointer to head of linked list of blocks.\n+\t */\n+\tstruct tf_sram_block *head;\n+\n+\t/** Pointer to tail of linked list of blocks.\n+\t */\n+\tstruct tf_sram_block *tail;\n+\n+\t/** Total count of blocks\n+\t */\n+\tuint32_t cnt;\n+\n+\t/** First non-full block in the list\n+\t */\n+\tstruct tf_sram_block *first_not_full_block;\n+\n+\t/** Entry slice size for this list\n+\t */\n+\tenum tf_sram_slice_size size;\n+};\n+\n+\n+/**\n+ * TF SRAM bank info consists of lists of different slice sizes per bank\n+ */\n+struct tf_sram_bank_info {\n+\tstruct tf_sram_slice_list slice[TF_SRAM_SLICE_SIZE_MAX];\n+};\n+\n+/**\n+ * SRAM banks consist of SRAM bank information\n+ */\n+struct tf_sram_bank {\n+\tstruct tf_sram_bank_info bank[TF_SRAM_BANK_ID_MAX];\n+};\n+\n+/**\n+ * SRAM banks consist of SRAM bank information\n+ */\n+struct tf_sram {\n+\tstruct tf_sram_bank dir[TF_DIR_MAX];\n+};\n+\n+/**********************\n+ * Internal functions\n+ **********************/\n+\n+/**\n+ * Get slice size in string format\n+ */\n+const char\n+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size)\n+{\n+\tswitch (slice_size) {\n+\tcase TF_SRAM_SLICE_SIZE_8B:\n+\t\treturn \"8B slice\";\n+\tcase TF_SRAM_SLICE_SIZE_16B:\n+\t\treturn \"16B slice\";\n+\tcase TF_SRAM_SLICE_SIZE_32B:\n+\t\treturn \"32B slice\";\n+\tcase TF_SRAM_SLICE_SIZE_64B:\n+\t\treturn \"64B slice\";\n+\tdefault:\n+\t\treturn \"Invalid slice size\";\n+\t}\n+}\n+\n+/**\n+ * Get bank in string format\n+ */\n+const char\n+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id)\n+{\n+\tswitch (bank_id) {\n+\tcase TF_SRAM_BANK_ID_0:\n+\t\treturn \"bank_0\";\n+\tcase TF_SRAM_BANK_ID_1:\n+\t\treturn \"bank_1\";\n+\tcase TF_SRAM_BANK_ID_2:\n+\t\treturn \"bank_2\";\n+\tcase TF_SRAM_BANK_ID_3:\n+\t\treturn \"bank_3\";\n+\tdefault:\n+\t\treturn \"Invalid bank_id\";\n+\t}\n+}\n+\n+/**\n+ * TF SRAM get slice list\n+ */\n+static int\n+tf_sram_get_slice_list(struct tf_sram *sram,\n+\t\t       struct tf_sram_slice_list **slice_list,\n+\t\t       enum tf_sram_slice_size slice_size,\n+\t\t       enum tf_dir dir,\n+\t\t       enum tf_sram_bank_id bank_id)\n+{\n+\tint rc = 0;\n+\n+\tTF_CHECK_PARMS2(sram, slice_list);\n+\n+\t*slice_list = &sram->dir[dir].bank[bank_id].slice[slice_size];\n+\n+\treturn rc;\n+}\n+\n+uint16_t tf_sram_bank_2_base_offset[TF_SRAM_BANK_ID_MAX] = {\n+\t0,\n+\t2048,\n+\t4096,\n+\t6144\n+};\n+\n+/**\n+ * Translate a block id and bank_id to an 8B offset\n+ */\n+static void\n+tf_sram_block_id_2_offset(enum tf_sram_bank_id bank_id, uint16_t block_id,\n+\t\t\t  uint16_t *offset)\n+{\n+\t*offset = (block_id + tf_sram_bank_2_base_offset[bank_id]) << 3;\n+}\n+\n+/**\n+ * Translates an 8B offset and bank_id to a block_id\n+ */\n+static void\n+tf_sram_offset_2_block_id(enum tf_sram_bank_id bank_id, uint16_t offset,\n+\t\t\t  uint16_t *block_id, uint16_t *slice_offset)\n+{\n+\t*slice_offset = offset & 0x7;\n+\t*block_id = ((offset & ~0x7) >> 3) -\n+\t\t    tf_sram_bank_2_base_offset[bank_id];\n+}\n+\n+/**\n+ * Find a matching block_id within the slice list\n+ */\n+static struct tf_sram_block\n+*tf_sram_find_block(uint16_t block_id, struct tf_sram_slice_list *slice_list)\n+{\n+\tuint32_t cnt;\n+\tstruct tf_sram_block *block;\n+\n+\tcnt = slice_list->cnt;\n+\tblock = slice_list->head;\n+\n+\twhile (cnt > 0 && block) {\n+\t\tif (block->block_id == block_id)\n+\t\t\treturn block;\n+\t\tblock = block->next;\n+\t\tcnt--;\n+\t}\n+\treturn NULL;\n+}\n+\n+/**\n+ * Given the current block get the next block within the slice list\n+ *\n+ * List is not changed.\n+ */\n+static struct tf_sram_block\n+*tf_sram_get_next_block(struct tf_sram_block *block)\n+{\n+\tstruct tf_sram_block *nblock;\n+\n+\tif (block != NULL)\n+\t\tnblock = block->next;\n+\telse\n+\t\tnblock = NULL;\n+\treturn nblock;\n+}\n+\n+/**\n+ * Free an allocated slice from a block and if the block is empty,\n+ * return an indication so that the block can be freed.\n+ */\n+static int\n+tf_sram_free_slice(enum tf_sram_slice_size slice_size,\n+\t\t   uint16_t slice_offset, struct tf_sram_block *block,\n+\t\t   bool *block_is_empty)\n+{\n+\tint rc = 0;\n+\tuint8_t shift;\n+\tuint8_t slice_mask = 0;\n+\n+\tTF_CHECK_PARMS2(block, block_is_empty);\n+\n+\tswitch (slice_size) {\n+\tcase TF_SRAM_SLICE_SIZE_8B:\n+\t\tshift = slice_offset >> 0;\n+\t\tassert(shift < 8);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_16B:\n+\t\tshift = slice_offset >> 1;\n+\t\tassert(shift < 4);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_32B:\n+\t\tshift = slice_offset >> 2;\n+\t\tassert(shift < 2);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_64B:\n+\tdefault:\n+\t\tshift = slice_offset >> 0;\n+\t\tassert(shift < 1);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\t}\n+\n+\tif ((block->in_use_mask & slice_mask) == 0) {\n+\t\trc = -EINVAL;\n+\t\tTFP_DRV_LOG(ERR, \"block_id(0x%x) slice(%d) was not allocated\\n\",\n+\t\t\t    block->block_id, slice_offset);\n+\t\treturn rc;\n+\t}\n+\n+\tblock->in_use_mask &= ~slice_mask;\n+\n+\tif (block->in_use_mask == 0)\n+\t\t*block_is_empty = true;\n+\telse\n+\t\t*block_is_empty = false;\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * TF SRAM get next slice\n+ *\n+ * Gets the next slice_offset available in the block\n+ * and updates the in_use_mask.\n+ */\n+static int\n+tf_sram_get_next_slice_in_block(struct tf_sram_block *block,\n+\t\t\t\tenum tf_sram_slice_size slice_size,\n+\t\t\t\tuint16_t *slice_offset,\n+\t\t\t\tbool *block_is_full)\n+{\n+\tint rc, free_id = -1;\n+\tuint8_t shift, max_slices, mask, i, full_mask;\n+\n+\tTF_CHECK_PARMS3(block, slice_offset, block_is_full);\n+\n+\tswitch (slice_size) {\n+\tcase TF_SRAM_SLICE_SIZE_8B:\n+\t\tshift      = 0;\n+\t\tmax_slices = 8;\n+\t\tfull_mask  = 0xff;\n+\t\tbreak;\n+\tcase TF_SRAM_SLICE_SIZE_16B:\n+\t\tshift      = 1;\n+\t\tmax_slices = 4;\n+\t\tfull_mask  = 0xf;\n+\t\tbreak;\n+\tcase TF_SRAM_SLICE_SIZE_32B:\n+\t\tshift      = 2;\n+\t\tmax_slices = 2;\n+\t\tfull_mask  = 0x3;\n+\t\tbreak;\n+\tcase TF_SRAM_SLICE_SIZE_64B:\n+\tdefault:\n+\t\tshift      = 0;\n+\t\tmax_slices = 1;\n+\t\tfull_mask  = 1;\n+\t\tbreak;\n+\t}\n+\n+\tmask = block->in_use_mask;\n+\n+\tfor (i = 0; i < max_slices; i++) {\n+\t\tif ((mask & 1) == 0) {\n+\t\t\tfree_id = i;\n+\t\t\tblock->in_use_mask |= 1 << free_id;\n+\t\t\tbreak;\n+\t\t}\n+\t\tmask = mask >> 1;\n+\t}\n+\n+\tif (block->in_use_mask == full_mask)\n+\t\t*block_is_full = true;\n+\telse\n+\t\t*block_is_full = false;\n+\n+\n+\tif (free_id >= 0) {\n+\t\t*slice_offset = free_id << shift;\n+\t\trc = 0;\n+\t} else {\n+\t\t*slice_offset = 0;\n+\t\trc = -ENOMEM;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * TF SRAM get indication as to whether the slice offset is\n+ * allocated in the block.\n+ *\n+ */\n+static int\n+tf_sram_is_slice_allocated_in_block(struct tf_sram_block *block,\n+\t\t\t\t    enum tf_sram_slice_size slice_size,\n+\t\t\t\t    uint16_t slice_offset,\n+\t\t\t\t    bool *is_allocated)\n+{\n+\tint rc = 0;\n+\tuint8_t shift;\n+\tuint8_t slice_mask = 0;\n+\n+\tTF_CHECK_PARMS2(block, is_allocated);\n+\n+\t*is_allocated = false;\n+\n+\tswitch (slice_size) {\n+\tcase TF_SRAM_SLICE_SIZE_8B:\n+\t\tshift = slice_offset >> 0;\n+\t\tassert(shift < 8);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_16B:\n+\t\tshift = slice_offset >> 1;\n+\t\tassert(shift < 4);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_32B:\n+\t\tshift = slice_offset >> 2;\n+\t\tassert(shift < 2);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_64B:\n+\tdefault:\n+\t\tshift = slice_offset >> 0;\n+\t\tassert(shift < 1);\n+\t\tslice_mask = 1 << shift;\n+\t\tbreak;\n+\t}\n+\n+\tif ((block->in_use_mask & slice_mask) == 0) {\n+\t\tTFP_DRV_LOG(ERR, \"block_id(0x%x) slice(%d) was not allocated\\n\",\n+\t\t\t    block->block_id, slice_offset);\n+\t\t*is_allocated = false;\n+\t} else {\n+\t\t*is_allocated = true;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * Initialize slice list\n+ */\n+static void\n+tf_sram_init_slice_list(struct tf_sram_slice_list *slice_list,\n+\t\t\tenum tf_sram_slice_size slice_size)\n+{\n+\tslice_list->head = NULL;\n+\tslice_list->tail = NULL;\n+\tslice_list->cnt = 0;\n+\tslice_list->size = slice_size;\n+}\n+\n+/**\n+ * Get the block count\n+ */\n+static uint32_t\n+tf_sram_get_block_cnt(struct tf_sram_slice_list *slice_list)\n+{\n+\treturn slice_list->cnt;\n+}\n+\n+\n+/**\n+ * Free a block data structure - does not free to the RM\n+ */\n+static void\n+tf_sram_free_block(struct tf_sram_slice_list *slice_list,\n+\t\t   struct tf_sram_block *block)\n+{\n+\tif (slice_list->head == block && slice_list->tail == block) {\n+\t\tslice_list->head = NULL;\n+\t\tslice_list->tail = NULL;\n+\t} else if (slice_list->head == block) {\n+\t\tslice_list->head = block->next;\n+\t\tslice_list->head->prev = NULL;\n+\t} else if (slice_list->tail == block) {\n+\t\tslice_list->tail = block->prev;\n+\t\tslice_list->tail->next = NULL;\n+\t} else {\n+\t\tblock->prev->next = block->next;\n+\t\tblock->next->prev = block->prev;\n+\t}\n+\ttfp_free(block);\n+\tslice_list->cnt--;\n+}\n+/**\n+ * Free the entire slice_list\n+ */\n+static void\n+tf_sram_free_slice_list(struct tf_sram_slice_list *slice_list)\n+{\n+\tuint32_t i, block_cnt;\n+\tstruct tf_sram_block *nblock, *block;\n+\n+\tblock_cnt = tf_sram_get_block_cnt(slice_list);\n+\tblock = slice_list->head;\n+\n+\tfor (i = 0; i < block_cnt; i++) {\n+\t\tnblock = block->next;\n+\t\ttf_sram_free_block(slice_list, block);\n+\t\tblock = nblock;\n+\t}\n+}\n+\n+/**\n+ * Allocate a single SRAM block from memory and add it to the slice list\n+ */\n+static struct tf_sram_block\n+*tf_sram_alloc_block(struct tf_sram_slice_list *slice_list,\n+\t\t     uint16_t block_id)\n+{\n+\tstruct tf_sram_block *block;\n+\tstruct tfp_calloc_parms cparms;\n+\tint rc;\n+\n+\tcparms.nitems = 1;\n+\tcparms.size = sizeof(struct tf_sram_block);\n+\tcparms.alignment = 0;\n+\trc = tfp_calloc(&cparms);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to allocate block, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn NULL;\n+\t}\n+\tblock = (struct tf_sram_block *)cparms.mem_va;\n+\tblock->block_id = block_id;\n+\n+\tif (slice_list->head == NULL) {\n+\t\tslice_list->head = block;\n+\t\tslice_list->tail = block;\n+\t\tblock->next = NULL;\n+\t\tblock->prev = NULL;\n+\t} else {\n+\t\tblock->next = slice_list->head;\n+\t\tblock->prev = NULL;\n+\t\tblock->next->prev = block;\n+\t\tslice_list->head = block->next->prev;\n+\t}\n+\tslice_list->cnt++;\n+\treturn block;\n+}\n+\n+/**\n+ * Find the first not full block in the slice list\n+ */\n+static void\n+tf_sram_find_first_not_full_block(struct tf_sram_slice_list *slice_list,\n+\t\t\t\t  enum tf_sram_slice_size slice_size,\n+\t\t\t\t  struct tf_sram_block **first_not_full_block)\n+{\n+\tstruct tf_sram_block *block = slice_list->head;\n+\tuint8_t slice_mask, mask;\n+\n+\tswitch (slice_size) {\n+\tcase TF_SRAM_SLICE_SIZE_8B:\n+\t\tslice_mask = 0xff;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_16B:\n+\t\tslice_mask = 0xf;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_32B:\n+\t\tslice_mask = 0x3;\n+\t\tbreak;\n+\n+\tcase TF_SRAM_SLICE_SIZE_64B:\n+\tdefault:\n+\t\tslice_mask = 0x1;\n+\t\tbreak;\n+\t}\n+\n+\t*first_not_full_block = NULL;\n+\n+\twhile (block) {\n+\t\tmask = block->in_use_mask & slice_mask;\n+\t\tif (mask != slice_mask) {\n+\t\t\t*first_not_full_block = block;\n+\t\t\tbreak;\n+\t\t}\n+\t\tblock = block->next;\n+\t}\n+}\n+static void\n+tf_sram_dump_block(struct tf_sram_block *block)\n+{\n+\tTFP_DRV_LOG(INFO, \"block_id(0x%x) in_use_mask(0x%02x)\\n\",\n+\t\t    block->block_id,\n+\t\t    block->in_use_mask);\n+}\n+\n+/**********************\n+ * External functions\n+ **********************/\n+int\n+tf_sram_mgr_bind(void **sram_handle)\n+{\n+\tint rc = 0;\n+\tenum tf_sram_bank_id bank_id;\n+\tenum tf_sram_slice_size slice_size;\n+\tstruct tf_sram *sram;\n+\tstruct tf_sram_slice_list *slice_list;\n+\tenum tf_dir dir;\n+\tstruct tfp_calloc_parms cparms;\n+\n+\tTF_CHECK_PARMS1(sram_handle);\n+\n+\tcparms.nitems = 1;\n+\tcparms.size = sizeof(struct tf_sram);\n+\tcparms.alignment = 0;\n+\trc = tfp_calloc(&cparms);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to allocate SRAM mgmt data, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\tsram = (struct tf_sram *)cparms.mem_va;\n+\n+\t/* For each direction\n+\t */\n+\tfor (dir = 0; dir < TF_DIR_MAX; dir++) {\n+\t\t/* For each bank\n+\t\t */\n+\t\tfor (bank_id = TF_SRAM_BANK_ID_0;\n+\t\t     bank_id < TF_SRAM_BANK_ID_MAX;\n+\t\t     bank_id++) {\n+\t\t\t/* Create each sized slice empty list\n+\t\t\t */\n+\t\t\tfor (slice_size = TF_SRAM_SLICE_SIZE_8B;\n+\t\t\t     slice_size < TF_SRAM_SLICE_SIZE_MAX;\n+\t\t\t     slice_size++) {\n+\t\t\t\trc = tf_sram_get_slice_list(sram, &slice_list,\n+\t\t\t\t\t\t\t    slice_size, dir,\n+\t\t\t\t\t\t\t    bank_id);\n+\t\t\t\tif (rc) {\n+\t\t\t\t\t/* Log error */\n+\t\t\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t\t\t  \"No SRAM slice list, rc:%s\\n\",\n+\t\t\t\t\t\t  strerror(-rc));\n+\t\t\t\t\treturn rc;\n+\t\t\t\t}\n+\t\t\t\ttf_sram_init_slice_list(slice_list, slice_size);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t*sram_handle = sram;\n+\n+\treturn rc;\n+}\n+\n+int\n+tf_sram_mgr_unbind(void *sram_handle)\n+{\n+\tint rc = 0;\n+\tstruct tf_sram *sram;\n+\tenum tf_sram_bank_id bank_id;\n+\tenum tf_sram_slice_size slice_size;\n+\tenum tf_dir dir;\n+\tstruct tf_sram_slice_list *slice_list;\n+\n+\tTF_CHECK_PARMS1(sram_handle);\n+\n+\tsram = (struct tf_sram *)sram_handle;\n+\n+\tfor (dir = 0; dir < TF_DIR_MAX; dir++) {\n+\t\t/* For each bank\n+\t\t */\n+\t\tfor (bank_id = TF_SRAM_BANK_ID_0;\n+\t\t     bank_id < TF_SRAM_BANK_ID_MAX;\n+\t\t     bank_id++) {\n+\t\t\t/* For each slice size\n+\t\t\t */\n+\t\t\tfor (slice_size = TF_SRAM_SLICE_SIZE_8B;\n+\t\t\t     slice_size < TF_SRAM_SLICE_SIZE_MAX;\n+\t\t\t     slice_size++) {\n+\t\t\t\trc = tf_sram_get_slice_list(sram, &slice_list,\n+\t\t\t\t\t\t\t    slice_size, dir,\n+\t\t\t\t\t\t\t    bank_id);\n+\t\t\t\tif (rc) {\n+\t\t\t\t\t/* Log error */\n+\t\t\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t\t\t  \"No SRAM slice list, rc:%s\\n\",\n+\t\t\t\t\t\t  strerror(-rc));\n+\t\t\t\t\treturn rc;\n+\t\t\t\t}\n+\t\t\t\tif (tf_sram_get_block_cnt(slice_list))\n+\t\t\t\t\ttf_sram_free_slice_list(slice_list);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\ttfp_free(sram);\n+\tsram_handle = NULL;\n+\n+\t/* Freeing of the RM resources is handled by the table manager */\n+\treturn rc;\n+}\n+\n+int tf_sram_mgr_alloc(void *sram_handle,\n+\t\t      struct tf_sram_mgr_alloc_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_sram *sram;\n+\tstruct tf_sram_slice_list *slice_list;\n+\tuint16_t block_id, slice_offset = 0;\n+\tuint32_t index;\n+\tstruct tf_sram_block *block;\n+\tstruct tf_rm_allocate_parms aparms = { 0 };\n+\tbool block_is_full;\n+\tuint16_t block_offset;\n+\n+\tTF_CHECK_PARMS3(sram_handle, parms, parms->sram_offset);\n+\n+\tsram = (struct tf_sram *)sram_handle;\n+\n+\t/* Check the current slice list\n+\t */\n+\trc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,\n+\t\t\t\t    parms->dir, parms->bank_id);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"No SRAM slice list, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\t/* If the list is empty or all entries are full allocate a new block\n+\t */\n+\tif (!slice_list->first_not_full_block) {\n+\t\t/* Allocate and insert a new block\n+\t\t */\n+\t\taparms.index = &index;\n+\t\taparms.subtype = parms->tbl_type;\n+\t\taparms.rm_db = parms->rm_db;\n+\t\trc = tf_rm_allocate(&aparms);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\tblock_id = index;\n+\t\tblock = tf_sram_alloc_block(slice_list, block_id);\n+\t} else {\n+\t\t/* Block exists\n+\t\t */\n+\t\tblock =\n+\t\t (struct tf_sram_block *)(slice_list->first_not_full_block);\n+\t}\n+\trc = tf_sram_get_next_slice_in_block(block,\n+\t\t\t\t\t     parms->slice_size,\n+\t\t\t\t\t     &slice_offset,\n+\t\t\t\t\t     &block_is_full);\n+\n+\t/* Find the new first non-full block in the list\n+\t */\n+\ttf_sram_find_first_not_full_block(slice_list,\n+\t\t\t\t\t  parms->slice_size,\n+\t\t\t\t\t  &slice_list->first_not_full_block);\n+\n+\ttf_sram_block_id_2_offset(parms->bank_id, block->block_id,\n+\t\t\t\t  &block_offset);\n+\n+\t*parms->sram_offset = block_offset + slice_offset;\n+\treturn rc;\n+}\n+\n+int\n+tf_sram_mgr_free(void *sram_handle,\n+\t\t struct tf_sram_mgr_free_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_sram *sram;\n+\tstruct tf_sram_slice_list *slice_list;\n+\tuint16_t block_id, slice_offset;\n+\tstruct tf_sram_block *block;\n+\tbool block_is_empty;\n+\tstruct tf_rm_free_parms fparms = { 0 };\n+\n+\tTF_CHECK_PARMS2(sram_handle, parms);\n+\n+\tsram = (struct tf_sram *)sram_handle;\n+\n+\t/* Check the current slice list\n+\t */\n+\trc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,\n+\t\t\t\t    parms->dir, parms->bank_id);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"No SRAM slice list, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\t/* Determine the block id and slice offset from the SRAM offset\n+\t */\n+\ttf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,\n+\t\t\t\t  &slice_offset);\n+\n+\t/* Search the list of blocks for the matching block id\n+\t */\n+\tblock = tf_sram_find_block(block_id, slice_list);\n+\tif (block == NULL) {\n+\t\tTFP_DRV_LOG(ERR, \"block not found 0x%x\\n\", block_id);\n+\t\treturn rc;\n+\t}\n+\n+\t/* If found, search for the matching SRAM slice in use.\n+\t */\n+\trc = tf_sram_free_slice(parms->slice_size, slice_offset,\n+\t\t\t\tblock, &block_is_empty);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR, \"Error freeing slice (%s)\\n\", strerror(-rc));\n+\t\treturn rc;\n+\t}\n+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)\n+\t/* If this is a counter, clear it.  In the future we need to switch to\n+\t * using the special access registers on Thor to automatically clear on\n+\t * read.\n+\t */\n+\t/* If this is counter table, clear the entry on free */\n+\tif (parms->tbl_type == TF_TBL_TYPE_ACT_STATS_64) {\n+\t\tuint8_t data[8] = { 0 };\n+\t\tuint16_t hcapi_type = 0;\n+\t\tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n+\n+\t\t/* Get the hcapi type */\n+\t\thparms.rm_db = parms->rm_db;\n+\t\thparms.subtype = parms->tbl_type;\n+\t\thparms.hcapi_type = &hcapi_type;\n+\t\trc = tf_rm_get_hcapi_type(&hparms);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    tf_tbl_type_2_str(parms->tbl_type),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn rc;\n+\t\t}\n+\t\t/* Clear the counter\n+\t\t */\n+\t\trc = tf_msg_set_tbl_entry(parms->tfp,\n+\t\t\t\t\t  parms->dir,\n+\t\t\t\t\t  hcapi_type,\n+\t\t\t\t\t  sizeof(data),\n+\t\t\t\t\t  data,\n+\t\t\t\t\t  parms->sram_offset);\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"%s, Set failed, type:%s, rc:%s\\n\",\n+\t\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t\t    tf_tbl_type_2_str(parms->tbl_type),\n+\t\t\t\t    strerror(-rc));\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+#endif\n+\t/* If the block is empty, free the block to the RM\n+\t */\n+\tif (block_is_empty) {\n+\t\tfparms.rm_db = parms->rm_db;\n+\t\tfparms.subtype = parms->tbl_type;\n+\t\tfparms.index = block_id;\n+\t\trc = tf_rm_free(&fparms);\n+\n+\t\tif (rc) {\n+\t\t\tTFP_DRV_LOG(ERR, \"Free block_id(%d) failed error(%s)\\n\",\n+\t\t\t\t    block_id, strerror(-rc));\n+\t\t}\n+\t\t/* Free local entry regardless\n+\t\t */\n+\t\ttf_sram_free_block(slice_list, block);\n+\n+\t\t/* Find the next non-full block in the list\n+\t\t */\n+\t\ttf_sram_find_first_not_full_block(slice_list,\n+\t\t\t\t\t     parms->slice_size,\n+\t\t\t\t\t     &slice_list->first_not_full_block);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+int\n+tf_sram_mgr_dump(void *sram_handle,\n+\t\t struct tf_sram_mgr_dump_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_sram *sram;\n+\tstruct tf_sram_slice_list *slice_list;\n+\tuint32_t block_cnt, i;\n+\tstruct tf_sram_block *block;\n+\n+\tTF_CHECK_PARMS2(sram_handle, parms);\n+\n+\tsram = (struct tf_sram *)sram_handle;\n+\n+\trc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,\n+\t\t\t\t    parms->dir, parms->bank_id);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (slice_list->cnt || slice_list->first_not_full_block) {\n+\t\tTFP_DRV_LOG(INFO, \"\\n********** %s: %s: %s ***********\\n\",\n+\t\t\t    tf_sram_bank_2_str(parms->bank_id),\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_sram_slice_2_str(parms->slice_size));\n+\n+\t\tblock_cnt = tf_sram_get_block_cnt(slice_list);\n+\t\tTFP_DRV_LOG(INFO, \"block_cnt(%d)\\n\", block_cnt);\n+\t\tif (slice_list->first_not_full_block)\n+\t\t\tTFP_DRV_LOG(INFO, \"first_not_full_block(0x%x)\\n\",\n+\t\t\t    slice_list->first_not_full_block->block_id);\n+\t\tblock = slice_list->head;\n+\t\tfor (i = 0; i < block_cnt; i++) {\n+\t\t\ttf_sram_dump_block(block);\n+\t\t\tblock = tf_sram_get_next_block(block);\n+\t\t}\n+\t\tTFP_DRV_LOG(INFO, \"*********************************\\n\");\n+\t}\n+\treturn rc;\n+}\n+/**\n+ * Validate an SRAM Slice is allocated\n+ *\n+ * Validate whether the SRAM slice is allocated\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM alloc parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+int tf_sram_mgr_is_allocated(void *sram_handle,\n+\t\t\t     struct tf_sram_mgr_is_allocated_parms *parms)\n+{\n+\tint rc = 0;\n+\tstruct tf_sram *sram;\n+\tstruct tf_sram_slice_list *slice_list;\n+\tuint16_t block_id, slice_offset;\n+\tstruct tf_sram_block *block;\n+\n+\tTF_CHECK_PARMS3(sram_handle, parms, parms->is_allocated);\n+\n+\tsram = (struct tf_sram *)sram_handle;\n+\n+\t/* Check the current slice list\n+\t */\n+\trc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size,\n+\t\t\t\t    parms->dir, parms->bank_id);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"No SRAM slice list, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\t/* If the list is empty, then it cannot be allocated\n+\t */\n+\tif (!slice_list->cnt) {\n+\t\tTFP_DRV_LOG(ERR, \"List is empty for %s:%s:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_sram_slice_2_str(parms->slice_size),\n+\t\t\t    tf_sram_bank_2_str(parms->bank_id));\n+\n+\t\tparms->is_allocated = false;\n+\t\tgoto done;\n+\t}\n+\n+\t/* Determine the block id and slice offset from the SRAM offset\n+\t */\n+\ttf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id,\n+\t\t\t\t  &slice_offset);\n+\n+\t/* Search the list of blocks for the matching block id\n+\t */\n+\tblock = tf_sram_find_block(block_id, slice_list);\n+\tif (block == NULL) {\n+\t\tTFP_DRV_LOG(ERR, \"block not found in list 0x%x\\n\",\n+\t\t\t    parms->sram_offset);\n+\t\tparms->is_allocated = false;\n+\t\tgoto done;\n+\t}\n+\n+\trc = tf_sram_is_slice_allocated_in_block(block,\n+\t\t\t\t\t\t parms->slice_size,\n+\t\t\t\t\t\t slice_offset,\n+\t\t\t\t\t\t parms->is_allocated);\n+done:\n+\treturn rc;\n+}\ndiff --git a/drivers/net/bnxt/tf_core/tf_sram_mgr.h b/drivers/net/bnxt/tf_core/tf_sram_mgr.h\nnew file mode 100644\nindex 0000000000..4abe3fb468\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_sram_mgr.h\n@@ -0,0 +1,317 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2021 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _TF_SRAM_MGR_H_\n+#define _TF_SRAM_MGR_H_\n+\n+#include <string.h>\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <unistd.h>\n+#include <stdlib.h>\n+#include <ctype.h>\n+#include <limits.h>\n+#include <errno.h>\n+#include \"tf_core.h\"\n+#include \"tf_rm.h\"\n+\n+/* When special access registers are used to access the SRAM, stats can be\n+ * automatically cleared on read by the hardware.  This requires additional\n+ * support to be added in the firmware to use these registers for statistics.\n+ * The support entails using the special access registers to read the stats.\n+ * These are stored in bank 3 currently but may move depending upon the\n+ * policy defined in tf_device_p58.h\n+ */\n+#define STATS_CLEAR_ON_READ_SUPPORT 0\n+\n+#define TF_SRAM_MGR_BLOCK_SZ_BYTES 64\n+#define TF_SRAM_MGR_MIN_SLICE_BYTES 8\n+/**\n+ * Bank identifier\n+ */\n+enum tf_sram_bank_id {\n+\tTF_SRAM_BANK_ID_0,\t\t/**< SRAM Bank 0 id */\n+\tTF_SRAM_BANK_ID_1,\t\t/**< SRAM Bank 1 id */\n+\tTF_SRAM_BANK_ID_2,\t\t/**< SRAM Bank 2 id */\n+\tTF_SRAM_BANK_ID_3,\t\t/**< SRAM Bank 3 id */\n+\tTF_SRAM_BANK_ID_MAX\t\t/**< SRAM Bank index limit */\n+};\n+\n+/**\n+ * TF slice size.\n+ *\n+ * A slice is part of a 64B row\n+ *\n+ * Each slice is a multiple of 8B\n+ */\n+enum tf_sram_slice_size {\n+\tTF_SRAM_SLICE_SIZE_8B,\t/**< 8 byte SRAM slice */\n+\tTF_SRAM_SLICE_SIZE_16B,\t/**< 16 byte SRAM slice */\n+\tTF_SRAM_SLICE_SIZE_32B,\t/**< 32 byte SRAM slice */\n+\tTF_SRAM_SLICE_SIZE_64B,\t/**< 64 byte SRAM slice */\n+\tTF_SRAM_SLICE_SIZE_MAX  /**< slice limit */\n+};\n+\n+\n+/** Initialize the SRAM slice manager\n+ *\n+ *  The SRAM slice manager manages slices within 64B rows. Slices are of size\n+ *  tf_sram_slice_size.  This function provides a handle to the SRAM manager\n+ *  data.\n+ *\n+ *  SRAM manager data may dynamically allocate data upon initialization if\n+ *  running on the host.\n+ *\n+ * [in/out] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ * Returns the handle for the SRAM slice manager\n+ */\n+int tf_sram_mgr_bind(void **sram_handle);\n+\n+/** Uninitialize the SRAM slice manager\n+ *\n+ * Frees any dynamically allocated data structures for SRAM slice management.\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ */\n+int tf_sram_mgr_unbind(void *sram_handle);\n+\n+/**\n+ * tf_sram_mgr_alloc_parms parameter definition\n+ */\n+struct tf_sram_mgr_alloc_parms {\n+\t/**\n+\t * [in] dir\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] bank\n+\t *\n+\t *  the SRAM bank to allocate from\n+\t */\n+\tenum tf_sram_bank_id bank_id;\n+\t/**\n+\t * [in] slice_size\n+\t *\n+\t *  the slice size to allocate\n+\t */\n+\tenum tf_sram_slice_size slice_size;\n+\t/**\n+\t * [in/out] sram_slice\n+\t *\n+\t *  A pointer to be filled with an 8B sram slice offset\n+\t */\n+\tuint16_t *sram_offset;\n+\t/**\n+\t * [in] RM DB Handle required for RM allocation\n+\t */\n+\tvoid *rm_db;\n+\t/**\n+\t * [in] tf table type\n+\t */\n+\tenum tf_tbl_type tbl_type;\n+};\n+\n+/**\n+ * Allocate an SRAM Slice\n+ *\n+ * Allocate an SRAM slice from the indicated bank.  If successful an 8B SRAM\n+ * offset will be returned.  Slices are variable sized.  This may result in\n+ * a row being allocated from the RM SRAM bank pool if required.\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM alloc parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+int tf_sram_mgr_alloc(void *sram_handle,\n+\t\t      struct tf_sram_mgr_alloc_parms *parms);\n+/**\n+ * tf_sram_mgr_free_parms parameter definition\n+ */\n+struct tf_sram_mgr_free_parms {\n+\t/**\n+\t * [in] dir\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] bank\n+\t *\n+\t *  the SRAM bank to free to\n+\t */\n+\tenum tf_sram_bank_id bank_id;\n+\t/**\n+\t * [in] slice_size\n+\t *\n+\t *  the slice size to be returned\n+\t */\n+\tenum tf_sram_slice_size slice_size;\n+\t/**\n+\t * [in] sram_offset\n+\t *\n+\t *  the SRAM slice offset (8B) to be returned\n+\t */\n+\tuint16_t sram_offset;\n+\t/**\n+\t * [in] RM DB Handle required for RM free\n+\t */\n+\tvoid *rm_db;\n+\t/**\n+\t * [in] tf table type\n+\t */\n+\tenum tf_tbl_type tbl_type;\n+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)\n+\t/**\n+\t * [in] tfp\n+\t *\n+\t * A pointer to the tf handle\n+\t */\n+\tvoid *tfp;\n+#endif\n+};\n+\n+/**\n+ * Free an SRAM Slice\n+ *\n+ * Free an SRAM slice to the indicated bank.  This may result in a 64B row\n+ * being returned to the RM SRAM bank pool.\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM free parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+int tf_sram_mgr_free(void *sram_handle,\n+\t\t     struct tf_sram_mgr_free_parms *parms);\n+\n+/**\n+ * tf_sram_mgr_dump_parms parameter definition\n+ */\n+struct tf_sram_mgr_dump_parms {\n+\t/**\n+\t * [in] dir\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] bank\n+\t *\n+\t *  the SRAM bank to dump\n+\t */\n+\tenum tf_sram_bank_id bank_id;\n+\t/**\n+\t * [in] slice_size\n+\t *\n+\t *  the slice size list to be dumped\n+\t */\n+\tenum tf_sram_slice_size slice_size;\n+};\n+\n+/**\n+ * Dump a slice list\n+ *\n+ * Dump the slice list given the SRAM bank and the slice size\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM free parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+int tf_sram_mgr_dump(void *sram_handle,\n+\t\t     struct tf_sram_mgr_dump_parms *parms);\n+\n+/**\n+ * tf_sram_mgr_is_allocated_parms parameter definition\n+ */\n+struct tf_sram_mgr_is_allocated_parms {\n+\t/**\n+\t * [in] dir\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] bank\n+\t *\n+\t *  the SRAM bank to allocate from\n+\t */\n+\tenum tf_sram_bank_id bank_id;\n+\t/**\n+\t * [in] slice_size\n+\t *\n+\t *  the slice size which was allocated\n+\t */\n+\tenum tf_sram_slice_size slice_size;\n+\t/**\n+\t * [in] sram_offset\n+\t *\n+\t *  The sram slice offset to validate\n+\t */\n+\tuint16_t sram_offset;\n+\t/**\n+\t * [in/out] is_allocated\n+\t *\n+\t *  Pointer passed in to be filled with indication of allocation\n+\t */\n+\tbool *is_allocated;\n+};\n+\n+/**\n+ * Validate an SRAM Slice is allocated\n+ *\n+ * Validate whether the SRAM slice is allocated\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM alloc parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+int tf_sram_mgr_is_allocated(void *sram_handle,\n+\t\t\t     struct tf_sram_mgr_is_allocated_parms *parms);\n+\n+/**\n+ * Given the slice size, return a char string\n+ */\n+const char\n+*tf_sram_slice_2_str(enum tf_sram_slice_size slice_size);\n+\n+/**\n+ * Given the bank_id, return a char string\n+ */\n+const char\n+*tf_sram_bank_2_str(enum tf_sram_bank_id bank_id);\n+\n+#endif /* _TF_SRAM_MGR_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c\nindex 7011edcd78..0a8720e7b6 100644\n--- a/drivers/net/bnxt/tf_core/tf_tbl.c\n+++ b/drivers/net/bnxt/tf_core/tf_tbl.c\n@@ -16,20 +16,11 @@\n #include \"tf_session.h\"\n #include \"tf_device.h\"\n \n-#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) {\t\t\\\n-\t\t*(new_idx) = (((idx) + (base)) << (shift));\t\\\n-}\n-\n-#define TF_TBL_PTR_TO_RM(new_idx, idx, base, shift) {\t\t\\\n-\t\t*(new_idx) = (((idx) >> (shift)) - (base));\t\\\n-}\n-\n struct tf;\n \n-/**\n- * Shadow init flag, set on bind and cleared on unbind\n- */\n-static uint8_t shadow_init;\n+#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) {          \\\n+\t\t*(new_idx) = (((idx) + (base)) << (shift));    \\\n+}\n \n int\n tf_tbl_bind(struct tf *tfp,\n@@ -121,8 +112,6 @@ tf_tbl_unbind(struct tf *tfp)\n \t\ttbl_db->tbl_db[i] = NULL;\n \t}\n \n-\tshadow_init = 0;\n-\n \treturn 0;\n }\n \n@@ -135,7 +124,6 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,\n \tstruct tf_rm_allocate_parms aparms = { 0 };\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tbl_rm_db *tbl_db;\n \tvoid *tbl_db_ptr = NULL;\n \n@@ -154,28 +142,12 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,\n \trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"Failed to get em_ext_db from session, rc:%s\\n\",\n+\t\t\t    \"Failed to get tbl_db from session, rc:%s\\n\",\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* Only get table info if required for the device */\n-\tif (dev->ops->tf_dev_get_tbl_info) {\n-\t\trc = dev->ops->tf_dev_get_tbl_info(tfp,\n-\t\t\t\t\t\t   tbl_db->tbl_db[parms->dir],\n-\t\t\t\t\t\t   parms->type,\n-\t\t\t\t\t\t   &base,\n-\t\t\t\t\t\t   &shift);\n-\t\tif (rc) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Failed to get table info:%d\\n\",\n-\t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type);\n-\t\t\treturn rc;\n-\t\t}\n-\t}\n-\n \t/* Allocate requested element */\n \taparms.rm_db = tbl_db->tbl_db[parms->dir];\n \taparms.subtype = parms->type;\n@@ -183,13 +155,12 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,\n \trc = tf_rm_allocate(&aparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s: Failed allocate, type:%d\\n\",\n+\t\t\t    \"%s: Failed allocate, type:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type);\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n \t\treturn rc;\n \t}\n \n-\tTF_TBL_RM_TO_PTR(&idx, idx, base, shift);\n \t*parms->idx = idx;\n \n \treturn 0;\n@@ -205,7 +176,6 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \tint allocated = 0;\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tbl_rm_db *tbl_db;\n \tvoid *tbl_db_ptr = NULL;\n \n@@ -230,28 +200,10 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \t}\n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* Only get table info if required for the device */\n-\tif (dev->ops->tf_dev_get_tbl_info) {\n-\t\trc = dev->ops->tf_dev_get_tbl_info(tfp,\n-\t\t\t\t\t\t   tbl_db->tbl_db[parms->dir],\n-\t\t\t\t\t\t   parms->type,\n-\t\t\t\t\t\t   &base,\n-\t\t\t\t\t\t   &shift);\n-\t\tif (rc) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Failed to get table info:%d\\n\",\n-\t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type);\n-\t\t\treturn rc;\n-\t\t}\n-\t}\n-\n \t/* Check if element is in use */\n \taparms.rm_db = tbl_db->tbl_db[parms->dir];\n \taparms.subtype = parms->type;\n-\n-\tTF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);\n-\n+\taparms.index = parms->idx;\n \taparms.allocated = &allocated;\n \trc = tf_rm_is_allocated(&aparms);\n \tif (rc)\n@@ -259,9 +211,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \n \tif (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s: Entry already free, type:%d, index:%d\\n\",\n+\t\t\t    \"%s: Entry already free, type:%s, index:%d\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    parms->idx);\n \t\treturn -EINVAL;\n \t}\n@@ -279,9 +231,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \t\trc = tf_rm_get_hcapi_type(&hparms);\n \t\tif (rc) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s, Failed type lookup, type:%d, rc:%s\\n\",\n+\t\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n \t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type,\n+\t\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t\t    strerror(-rc));\n \t\t\treturn rc;\n \t\t}\n@@ -295,9 +247,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \t\t\t\t\t  parms->idx);\n \t\tif (rc) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s, Set failed, type:%d, rc:%s\\n\",\n+\t\t\t\t    \"%s, Set failed, type:%s, rc:%s\\n\",\n \t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type,\n+\t\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t\t    strerror(-rc));\n \t\t\treturn rc;\n \t\t}\n@@ -306,15 +258,13 @@ tf_tbl_free(struct tf *tfp __rte_unused,\n \t/* Free requested element */\n \tfparms.rm_db = tbl_db->tbl_db[parms->dir];\n \tfparms.subtype = parms->type;\n-\n-\tTF_TBL_PTR_TO_RM(&fparms.index, parms->idx, base, shift);\n-\n+\tfparms.index = parms->idx;\n \trc = tf_rm_free(&fparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s: Free failed, type:%d, index:%d\\n\",\n+\t\t\t    \"%s: Free failed, type:%s, index:%d\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    parms->idx);\n \t\treturn rc;\n \t}\n@@ -333,7 +283,6 @@ tf_tbl_set(struct tf *tfp,\n \tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tbl_rm_db *tbl_db;\n \tvoid *tbl_db_ptr = NULL;\n \n@@ -358,21 +307,6 @@ tf_tbl_set(struct tf *tfp,\n \t}\n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* Only get table info if required for the device */\n-\tif (dev->ops->tf_dev_get_tbl_info) {\n-\t\trc = dev->ops->tf_dev_get_tbl_info(tfp,\n-\t\t\t\t\t\t   tbl_db->tbl_db[parms->dir],\n-\t\t\t\t\t\t   parms->type,\n-\t\t\t\t\t\t   &base,\n-\t\t\t\t\t\t   &shift);\n-\t\tif (rc) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Failed to get table info:%d\\n\",\n-\t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type);\n-\t\t\treturn rc;\n-\t\t}\n-\t}\n \n \t/* Do not check meter drop counter because it is not allocated\n \t * resources\n@@ -381,19 +315,18 @@ tf_tbl_set(struct tf *tfp,\n \t\t/* Verify that the entry has been previously allocated */\n \t\taparms.rm_db = tbl_db->tbl_db[parms->dir];\n \t\taparms.subtype = parms->type;\n-\t\tTF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);\n-\n \t\taparms.allocated = &allocated;\n+\t\taparms.index = parms->idx;\n \t\trc = tf_rm_is_allocated(&aparms);\n \t\tif (rc)\n \t\t\treturn rc;\n \n \t\tif (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t   \"%s, Invalid or not allocated index, type:%d, idx:%d\\n\",\n-\t\t\t   tf_dir_2_str(parms->dir),\n-\t\t\t   parms->type,\n-\t\t\t   parms->idx);\n+\t\t\t      \"%s, Invalid or not allocated, type:%s, idx:%d\\n\",\n+\t\t\t      tf_dir_2_str(parms->dir),\n+\t\t\t      tf_tbl_type_2_str(parms->type),\n+\t\t\t      parms->idx);\n \t\t\treturn -EINVAL;\n \t\t}\n \t}\n@@ -405,9 +338,9 @@ tf_tbl_set(struct tf *tfp,\n \trc = tf_rm_get_hcapi_type(&hparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Failed type lookup, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n@@ -420,9 +353,9 @@ tf_tbl_set(struct tf *tfp,\n \t\t\t\t  parms->idx);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Set failed, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Set failed, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n@@ -441,7 +374,6 @@ tf_tbl_get(struct tf *tfp,\n \tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tbl_rm_db *tbl_db;\n \tvoid *tbl_db_ptr = NULL;\n \n@@ -466,22 +398,6 @@ tf_tbl_get(struct tf *tfp,\n \t}\n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* Only get table info if required for the device */\n-\tif (dev->ops->tf_dev_get_tbl_info) {\n-\t\trc = dev->ops->tf_dev_get_tbl_info(tfp,\n-\t\t\t\t\t\t   tbl_db->tbl_db[parms->dir],\n-\t\t\t\t\t\t   parms->type,\n-\t\t\t\t\t\t   &base,\n-\t\t\t\t\t\t   &shift);\n-\t\tif (rc) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Failed to get table info:%d\\n\",\n-\t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type);\n-\t\t\treturn rc;\n-\t\t}\n-\t}\n-\n \t/* Do not check meter drop counter because it is not allocated\n \t * resources.\n \t */\n@@ -489,8 +405,7 @@ tf_tbl_get(struct tf *tfp,\n \t\t/* Verify that the entry has been previously allocated */\n \t\taparms.rm_db = tbl_db->tbl_db[parms->dir];\n \t\taparms.subtype = parms->type;\n-\t\tTF_TBL_PTR_TO_RM(&aparms.index, parms->idx, base, shift);\n-\n+\t\taparms.index = parms->idx;\n \t\taparms.allocated = &allocated;\n \t\trc = tf_rm_is_allocated(&aparms);\n \t\tif (rc)\n@@ -498,9 +413,9 @@ tf_tbl_get(struct tf *tfp,\n \n \t\tif (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) {\n \t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t   \"%s, Invalid or not allocated index, type:%d, idx:%d\\n\",\n+\t\t\t   \"%s, Invalid or not allocated index, type:%s, idx:%d\\n\",\n \t\t\t   tf_dir_2_str(parms->dir),\n-\t\t\t   parms->type,\n+\t\t\t   tf_tbl_type_2_str(parms->type),\n \t\t\t   parms->idx);\n \t\t\treturn -EINVAL;\n \t\t}\n@@ -513,9 +428,9 @@ tf_tbl_get(struct tf *tfp,\n \trc = tf_rm_get_hcapi_type(&hparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Failed type lookup, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n@@ -529,9 +444,9 @@ tf_tbl_get(struct tf *tfp,\n \t\t\t\t  parms->idx);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Get failed, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Get failed, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n@@ -549,7 +464,6 @@ tf_tbl_bulk_get(struct tf *tfp,\n \tstruct tf_rm_check_indexes_in_range_parms cparms = { 0 };\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tbl_rm_db *tbl_db;\n \tvoid *tbl_db_ptr = NULL;\n \n@@ -574,40 +488,21 @@ tf_tbl_bulk_get(struct tf *tfp,\n \t}\n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* Only get table info if required for the device */\n-\tif (dev->ops->tf_dev_get_tbl_info) {\n-\t\trc = dev->ops->tf_dev_get_tbl_info(tfp,\n-\t\t\t\t\t\t   tbl_db->tbl_db[parms->dir],\n-\t\t\t\t\t\t   parms->type,\n-\t\t\t\t\t\t   &base,\n-\t\t\t\t\t\t   &shift);\n-\t\tif (rc) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"%s: Failed to get table info:%d\\n\",\n-\t\t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t\t    parms->type);\n-\t\t\treturn rc;\n-\t\t}\n-\t}\n-\n \t/* Verify that the entries are in the range of reserved resources. */\n \tcparms.rm_db = tbl_db->tbl_db[parms->dir];\n \tcparms.subtype = parms->type;\n-\n-\tTF_TBL_PTR_TO_RM(&cparms.starting_index, parms->starting_idx,\n-\t\t\t base, shift);\n-\n \tcparms.num_entries = parms->num_entries;\n+\tcparms.starting_index = parms->starting_idx;\n \n \trc = tf_rm_check_indexes_in_range(&cparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n \t\t\t    \"%s, Invalid or %d index starting from %d\"\n-\t\t\t    \" not in range, type:%d\",\n+\t\t\t    \" not in range, type:%s\",\n \t\t\t    tf_dir_2_str(parms->dir),\n \t\t\t    parms->starting_idx,\n \t\t\t    parms->num_entries,\n-\t\t\t    parms->type);\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n \t\treturn rc;\n \t}\n \n@@ -617,9 +512,9 @@ tf_tbl_bulk_get(struct tf *tfp,\n \trc = tf_rm_get_hcapi_type(&hparms);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Failed type lookup, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t\treturn rc;\n \t}\n@@ -634,9 +529,9 @@ tf_tbl_bulk_get(struct tf *tfp,\n \t\t\t\t       parms->physical_mem_addr);\n \tif (rc) {\n \t\tTFP_DRV_LOG(ERR,\n-\t\t\t    \"%s, Bulk get failed, type:%d, rc:%s\\n\",\n+\t\t\t    \"%s, Bulk get failed, type:%s, rc:%s\\n\",\n \t\t\t    tf_dir_2_str(parms->dir),\n-\t\t\t    parms->type,\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n \t\t\t    strerror(-rc));\n \t}\n \n@@ -653,9 +548,9 @@ tf_tbl_get_resc_info(struct tf *tfp,\n \tstruct tf_rm_get_alloc_info_parms ainfo;\n \tvoid *tbl_db_ptr = NULL;\n \tstruct tbl_rm_db *tbl_db;\n-\tuint16_t base = 0, shift = 0;\n \tstruct tf_dev_info *dev;\n \tstruct tf_session *tfs;\n+\tuint16_t base = 0, shift = 0;\n \n \tTF_CHECK_PARMS2(tfp, tbl);\n \n@@ -677,7 +572,6 @@ tf_tbl_get_resc_info(struct tf *tfp,\n \n \ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n \n-\t/* check if reserved resource for WC is multiple of num_slices */\n \tfor (d = 0; d < TF_DIR_MAX; d++) {\n \t\tainfo.rm_db = tbl_db->tbl_db[d];\n \t\tdinfo = tbl[d].info;\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h\nindex 7e1107ffe7..2483718e5d 100644\n--- a/drivers/net/bnxt/tf_core/tf_tbl.h\n+++ b/drivers/net/bnxt/tf_core/tf_tbl.h\n@@ -28,14 +28,6 @@ struct tf_tbl_cfg_parms {\n \t * Table Type element configuration array\n \t */\n \tstruct tf_rm_element_cfg *cfg;\n-\t/**\n-\t * Shadow table type configuration array\n-\t */\n-\tstruct tf_shadow_tbl_cfg *shadow_cfg;\n-\t/**\n-\t * Boolean controlling the request shadow copy.\n-\t */\n-\tbool shadow_copy;\n \t/**\n \t * Session resource allocations\n \t */\n@@ -197,8 +189,6 @@ struct tbl_rm_db {\n  *\n  * @ref tf_tbl_free\n  *\n- * @ref tf_tbl_alloc_search\n- *\n  * @ref tf_tbl_set\n  *\n  * @ref tf_tbl_get\n@@ -255,10 +245,7 @@ int tf_tbl_alloc(struct tf *tfp,\n \t\t struct tf_tbl_alloc_parms *parms);\n \n /**\n- * Free's the requested table type and returns it to the DB. If shadow\n- * DB is enabled its searched first and if found the element refcount\n- * is decremented. If refcount goes to 0 then its returned to the\n- * table type DB.\n+ * Frees the requested table type and returns it to the DB.\n  *\n  * [in] tfp\n  *   Pointer to TF handle, used for HCAPI communication\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.c b/drivers/net/bnxt/tf_core/tf_tbl_sram.c\nnew file mode 100644\nindex 0000000000..ea10afecb6\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.c\n@@ -0,0 +1,713 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2021 Broadcom\n+ * All rights reserved.\n+ */\n+\n+/* Truflow Table APIs and supporting code */\n+\n+#include <rte_common.h>\n+\n+#include \"tf_tbl.h\"\n+#include \"tf_tbl_sram.h\"\n+#include \"tf_sram_mgr.h\"\n+#include \"tf_common.h\"\n+#include \"tf_rm.h\"\n+#include \"tf_util.h\"\n+#include \"tf_msg.h\"\n+#include \"tfp.h\"\n+#include \"tf_session.h\"\n+#include \"tf_device.h\"\n+#include \"cfa_resource_types.h\"\n+\n+#define DBG_SRAM 0\n+\n+/**\n+ * tf_sram_tbl_get_info_parms parameter definition\n+ */\n+struct tf_tbl_sram_get_info_parms {\n+\t/**\n+\t * [in] table RM database\n+\t */\n+\tvoid *rm_db;\n+\t/**\n+\t * [in] Receive or transmit direction\n+\t */\n+\tenum tf_dir dir;\n+\t/**\n+\t * [in] table_type\n+\t *\n+\t *  the TF index table type\n+\t */\n+\tenum tf_tbl_type tbl_type;\n+\t/**\n+\t * [out] bank\n+\t *\n+\t *  The SRAM bank associated with the type\n+\t */\n+\tenum tf_sram_bank_id bank_id;\n+\t/**\n+\t * [out] slice_size\n+\t *\n+\t *  the slice size for the indicated table type\n+\t */\n+\tenum tf_sram_slice_size slice_size;\n+};\n+\n+/**\n+ * Translate HCAPI type to SRAM Manager bank\n+ */\n+const uint16_t tf_tbl_sram_hcapi_2_bank[CFA_RESOURCE_TYPE_P58_LAST] = {\n+\t[CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = TF_SRAM_BANK_ID_0,\n+\t[CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = TF_SRAM_BANK_ID_1,\n+\t[CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = TF_SRAM_BANK_ID_2,\n+\t[CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = TF_SRAM_BANK_ID_3\n+};\n+\n+#define TF_TBL_SRAM_SLICES_MAX  \\\n+\t(TF_SRAM_MGR_BLOCK_SZ_BYTES / TF_SRAM_MGR_MIN_SLICE_BYTES)\n+/**\n+ * Translate HCAPI type to SRAM Manager bank\n+ */\n+const uint8_t tf_tbl_sram_slices_2_size[TF_TBL_SRAM_SLICES_MAX + 1] = {\n+\t[0] = TF_SRAM_SLICE_SIZE_64B, /* if 0 slices assume 1 64B block */\n+\t[1] = TF_SRAM_SLICE_SIZE_64B, /* 1 slice  per 64B block */\n+\t[2] = TF_SRAM_SLICE_SIZE_32B, /* 2 slices per 64B block */\n+\t[4] = TF_SRAM_SLICE_SIZE_16B, /* 4 slices per 64B block */\n+\t[8] = TF_SRAM_SLICE_SIZE_8B   /* 8 slices per 64B block */\n+};\n+\n+/**\n+ * Get SRAM Table Information for a given index table type\n+ *\n+ *\n+ * [in] sram_handle\n+ *   Pointer to SRAM handle\n+ *\n+ * [in] parms\n+ *   Pointer to the SRAM get info parameters\n+ *\n+ * Returns\n+ *   - (0) if successful\n+ *   - (-EINVAL) on failure\n+ *\n+ */\n+static int tf_tbl_sram_get_info(struct tf_tbl_sram_get_info_parms *parms)\n+{\n+\tint rc = 0;\n+\tuint16_t hcapi_type;\n+\tuint16_t slices;\n+\tstruct tf_rm_get_hcapi_parms hparms;\n+\tstruct tf_rm_get_slices_parms sparms;\n+\n+\thparms.rm_db = parms->rm_db;\n+\thparms.subtype = parms->tbl_type;\n+\thparms.hcapi_type = &hcapi_type;\n+\n+\trc = tf_rm_get_hcapi_type(&hparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get hcapi_type %s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->tbl_type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\tparms->bank_id = tf_tbl_sram_hcapi_2_bank[hcapi_type];\n+\n+\tsparms.rm_db = parms->rm_db;\n+\tsparms.subtype = parms->tbl_type;\n+\tsparms.slices = &slices;\n+\n+\trc = tf_rm_get_slices(&sparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get slice cnt %s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->tbl_type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\tif (slices)\n+\t\tparms->slice_size = tf_tbl_sram_slices_2_size[slices];\n+\n+\tTFP_DRV_LOG(INFO,\n+\t\t    \"(%s) bank(%s) slice_size(%s)\\n\",\n+\t\t    tf_tbl_type_2_str(parms->tbl_type),\n+\t\t    tf_sram_bank_2_str(parms->bank_id),\n+\t\t    tf_sram_slice_2_str(parms->slice_size));\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_bind(struct tf *tfp __rte_unused)\n+{\n+\tint rc = 0;\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS1(tfp);\n+\n+\trc = tf_sram_mgr_bind(&sram_handle);\n+\n+\ttf_session_set_sram_db(tfp, sram_handle);\n+\n+\tTFP_DRV_LOG(INFO,\n+\t\t    \"SRAM Table - initialized\\n\");\n+\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_unbind(struct tf *tfp __rte_unused)\n+{\n+\tint rc = 0;\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS1(tfp);\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\tif (sram_handle)\n+\t\trc = tf_sram_mgr_unbind(sram_handle);\n+\n+\tTFP_DRV_LOG(INFO,\n+\t\t    \"SRAM Table - deinitialized\\n\");\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_alloc(struct tf *tfp,\n+\t\t  struct tf_tbl_alloc_parms *parms)\n+{\n+\tint rc;\n+\tuint16_t idx;\n+\tstruct tf_session *tfs;\n+\tstruct tf_dev_info *dev;\n+\tstruct tf_tbl_sram_get_info_parms iparms = { 0 };\n+\tstruct tf_sram_mgr_alloc_parms aparms = { 0 };\n+\tstruct tbl_rm_db *tbl_db;\n+\tvoid *tbl_db_ptr = NULL;\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS2(tfp, parms);\n+\n+\t/* Retrieve the session information */\n+\trc = tf_session_get(tfp, &tfs, &dev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get tbl_db from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\tiparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tiparms.dir = parms->dir;\n+\tiparms.tbl_type = parms->type;\n+\n+\trc = tf_tbl_sram_get_info(&iparms);\n+\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get SRAM info %s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\n+\taparms.dir = parms->dir;\n+\taparms.bank_id = iparms.bank_id;\n+\taparms.slice_size = iparms.slice_size;\n+\taparms.sram_offset = &idx;\n+\taparms.tbl_type = parms->type;\n+\taparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\n+\trc = tf_sram_mgr_alloc(sram_handle, &aparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to allocate SRAM table:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\t*parms->idx = idx;\n+\n+#if (DBG_SRAM == 1)\n+\t{\n+\t\tstruct tf_sram_mgr_dump_parms dparms;\n+\n+\t\tdparms.dir = parms->dir;\n+\t\tdparms.bank_id = iparms.bank_id;\n+\t\tdparms.slice_size = iparms.slice_size;\n+\n+\t\trc = tf_sram_mgr_dump(sram_handle, &dparms);\n+\t}\n+#endif\n+\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_free(struct tf *tfp __rte_unused,\n+\t\t struct tf_tbl_free_parms *parms)\n+{\n+\tint rc;\n+\tstruct tf_session *tfs;\n+\tstruct tf_dev_info *dev;\n+\tstruct tbl_rm_db *tbl_db;\n+\tvoid *tbl_db_ptr = NULL;\n+\tstruct tf_tbl_sram_get_info_parms iparms = { 0 };\n+\tstruct tf_sram_mgr_free_parms fparms = { 0 };\n+\tstruct tf_sram_mgr_is_allocated_parms aparms = { 0 };\n+\tbool allocated = false;\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS2(tfp, parms);\n+\n+\t/* Retrieve the session information */\n+\trc = tf_session_get(tfp, &tfs, &dev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get em_ext_db from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\tiparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tiparms.dir = parms->dir;\n+\tiparms.tbl_type = parms->type;\n+\n+\trc = tf_tbl_sram_get_info(&iparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get table info:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\n+#if (DBG_SRAM == 1)\n+\t{\n+\t\tstruct tf_sram_mgr_dump_parms dparms;\n+\n+\t\tprintf(\"%s: %s: %s\\n\", tf_dir_2_str(parms->dir),\n+\t\t       tf_sram_slice_2_str(iparms.slice_size),\n+\t\t       tf_sram_bank_2_str(iparms.bank_id));\n+\n+\t\tdparms.dir = parms->dir;\n+\t\tdparms.bank_id = iparms.bank_id;\n+\t\tdparms.slice_size = iparms.slice_size;\n+\n+\t\trc = tf_sram_mgr_dump(sram_handle, &dparms);\n+\t}\n+#endif\n+\n+\taparms.sram_offset = parms->idx;\n+\taparms.slice_size = iparms.slice_size;\n+\taparms.bank_id = iparms.bank_id;\n+\taparms.dir = parms->dir;\n+\taparms.is_allocated = &allocated;\n+\n+\trc = tf_sram_mgr_is_allocated(sram_handle, &aparms);\n+\tif (rc || !allocated) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Free of invalid entry:%s idx(%d):(%s)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    parms->idx,\n+\t\t\t    strerror(-rc));\n+\t\trc = -ENOMEM;\n+\t\treturn rc;\n+\t}\n+\n+\tfparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tfparms.tbl_type = parms->type;\n+\tfparms.sram_offset = parms->idx;\n+\tfparms.slice_size = iparms.slice_size;\n+\tfparms.bank_id = iparms.bank_id;\n+\tfparms.dir = parms->dir;\n+#if (STATS_CLEAR_ON_READ_SUPPORT == 0)\n+\tfparms.tfp = tfp;\n+#endif\n+\trc = tf_sram_mgr_free(sram_handle, &fparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to free entry:%s idx(%d)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    parms->idx);\n+\t\treturn rc;\n+\t}\n+\n+\n+#if (DBG_SRAM == 1)\n+\t{\n+\t\tstruct tf_sram_mgr_dump_parms dparms;\n+\n+\t\tprintf(\"%s: %s: %s\\n\", tf_dir_2_str(parms->dir),\n+\t\t       tf_sram_slice_2_str(iparms.slice_size),\n+\t\t       tf_sram_bank_2_str(iparms.bank_id));\n+\n+\t\tdparms.dir = parms->dir;\n+\t\tdparms.bank_id = iparms.bank_id;\n+\t\tdparms.slice_size = iparms.slice_size;\n+\n+\t\trc = tf_sram_mgr_dump(sram_handle, &dparms);\n+\t}\n+#endif\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_set(struct tf *tfp,\n+\t\tstruct tf_tbl_set_parms *parms)\n+{\n+\tint rc;\n+\tbool allocated = 0;\n+\tuint16_t hcapi_type;\n+\tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n+\tstruct tf_session *tfs;\n+\tstruct tf_dev_info *dev;\n+\tstruct tbl_rm_db *tbl_db;\n+\tvoid *tbl_db_ptr = NULL;\n+\tstruct tf_tbl_sram_get_info_parms iparms = { 0 };\n+\tstruct tf_sram_mgr_is_allocated_parms aparms = { 0 };\n+\tvoid *sram_handle = NULL;\n+\n+\n+\tTF_CHECK_PARMS3(tfp, parms, parms->data);\n+\n+\t/* Retrieve the session information */\n+\trc = tf_session_get(tfp, &tfs, &dev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get em_ext_db from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\tiparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tiparms.dir = parms->dir;\n+\tiparms.tbl_type = parms->type;\n+\n+\trc = tf_tbl_sram_get_info(&iparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get table info:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\n+\taparms.sram_offset = parms->idx;\n+\taparms.slice_size = iparms.slice_size;\n+\taparms.bank_id = iparms.bank_id;\n+\taparms.dir = parms->dir;\n+\taparms.is_allocated = &allocated;\n+\trc = tf_sram_mgr_is_allocated(sram_handle, &aparms);\n+\tif (rc || !allocated) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Entry not allocated:%s idx(%d):(%s)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    parms->idx,\n+\t\t\t    strerror(-rc));\n+\t\trc = -ENOMEM;\n+\t\treturn rc;\n+\t}\n+\n+\t/* Set the entry */\n+\thparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\thparms.subtype = parms->type;\n+\thparms.hcapi_type = &hcapi_type;\n+\trc = tf_rm_get_hcapi_type(&hparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\trc = tf_msg_set_tbl_entry(tfp,\n+\t\t\t\t  parms->dir,\n+\t\t\t\t  hcapi_type,\n+\t\t\t\t  parms->data_sz_in_bytes,\n+\t\t\t\t  parms->data,\n+\t\t\t\t  parms->idx);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Set failed, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_get(struct tf *tfp,\n+\t\tstruct tf_tbl_get_parms *parms)\n+{\n+\tint rc;\n+\tuint16_t hcapi_type;\n+\tbool allocated = 0;\n+\tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n+\tstruct tf_session *tfs;\n+\tstruct tf_dev_info *dev;\n+\tstruct tbl_rm_db *tbl_db;\n+\tvoid *tbl_db_ptr = NULL;\n+\tstruct tf_tbl_sram_get_info_parms iparms = { 0 };\n+\tstruct tf_sram_mgr_is_allocated_parms aparms = { 0 };\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS3(tfp, parms, parms->data);\n+\n+\t/* Retrieve the session information */\n+\trc = tf_session_get(tfp, &tfs, &dev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get em_ext_db from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\tiparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tiparms.dir = parms->dir;\n+\tiparms.tbl_type = parms->type;\n+\n+\trc = tf_tbl_sram_get_info(&iparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get table info:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\n+\taparms.sram_offset = parms->idx;\n+\taparms.slice_size = iparms.slice_size;\n+\taparms.bank_id = iparms.bank_id;\n+\taparms.dir = parms->dir;\n+\taparms.is_allocated = &allocated;\n+\n+\trc = tf_sram_mgr_is_allocated(sram_handle, &aparms);\n+\tif (rc || !allocated) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Entry not allocated:%s idx(%d):(%s)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    parms->idx,\n+\t\t\t    strerror(-rc));\n+\t\trc = -ENOMEM;\n+\t\treturn rc;\n+\t}\n+\n+\t/* Get the entry */\n+\thparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\thparms.subtype = parms->type;\n+\thparms.hcapi_type = &hcapi_type;\n+\trc = tf_rm_get_hcapi_type(&hparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\t/* Get the entry */\n+\trc = tf_msg_get_tbl_entry(tfp,\n+\t\t\t\t  parms->dir,\n+\t\t\t\t  hcapi_type,\n+\t\t\t\t  parms->data_sz_in_bytes,\n+\t\t\t\t  parms->data,\n+\t\t\t\t  parms->idx);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Get failed, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\treturn rc;\n+}\n+\n+int\n+tf_tbl_sram_bulk_get(struct tf *tfp,\n+\t\t     struct tf_tbl_get_bulk_parms *parms)\n+{\n+\tint rc;\n+\tuint16_t hcapi_type;\n+\tstruct tf_rm_get_hcapi_parms hparms = { 0 };\n+\tstruct tf_tbl_sram_get_info_parms iparms = { 0 };\n+\tstruct tf_session *tfs;\n+\tstruct tf_dev_info *dev;\n+\tstruct tbl_rm_db *tbl_db;\n+\tvoid *tbl_db_ptr = NULL;\n+\tuint16_t idx;\n+\tstruct tf_sram_mgr_is_allocated_parms aparms = { 0 };\n+\tbool allocated = false;\n+\tvoid *sram_handle = NULL;\n+\n+\tTF_CHECK_PARMS2(tfp, parms);\n+\n+\t/* Retrieve the session information */\n+\trc = tf_session_get(tfp, &tfs, &dev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get em_ext_db from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\ttbl_db = (struct tbl_rm_db *)tbl_db_ptr;\n+\n+\trc = tf_session_get_sram_db(tfp, &sram_handle);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"Failed to get sram_handle from session, rc:%s\\n\",\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\tiparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\tiparms.dir = parms->dir;\n+\tiparms.tbl_type = parms->type;\n+\n+\trc = tf_tbl_sram_get_info(&iparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Failed to get table info:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type));\n+\t\treturn rc;\n+\t}\n+\n+\t/* Validate the start offset and the end offset is allocated\n+\t * This API is only used for statistics.  8 Byte entry allocation\n+\t * is used to verify\n+\t */\n+\taparms.sram_offset = parms->starting_idx;\n+\taparms.slice_size = iparms.slice_size;\n+\taparms.bank_id = iparms.bank_id;\n+\taparms.dir = parms->dir;\n+\taparms.is_allocated = &allocated;\n+\trc = tf_sram_mgr_is_allocated(sram_handle, &aparms);\n+\tif (rc || !allocated) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Entry not allocated:%s starting_idx(%d):(%s)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    parms->starting_idx,\n+\t\t\t    strerror(-rc));\n+\t\trc = -ENOMEM;\n+\t\treturn rc;\n+\t}\n+\tidx = parms->starting_idx + parms->num_entries - 1;\n+\taparms.sram_offset = idx;\n+\trc = tf_sram_mgr_is_allocated(sram_handle, &aparms);\n+\tif (rc || !allocated) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s: Entry not allocated:%s last_idx(%d):(%s)\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    idx,\n+\t\t\t    strerror(-rc));\n+\t\trc = -ENOMEM;\n+\t\treturn rc;\n+\t}\n+\n+\thparms.rm_db = tbl_db->tbl_db[parms->dir];\n+\thparms.subtype = parms->type;\n+\thparms.hcapi_type = &hcapi_type;\n+\trc = tf_rm_get_hcapi_type(&hparms);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Failed type lookup, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t\treturn rc;\n+\t}\n+\n+\t/* Get the entries */\n+\trc = tf_msg_bulk_get_tbl_entry(tfp,\n+\t\t\t\t       parms->dir,\n+\t\t\t\t       hcapi_type,\n+\t\t\t\t       parms->starting_idx,\n+\t\t\t\t       parms->num_entries,\n+\t\t\t\t       parms->entry_sz_in_bytes,\n+\t\t\t\t       parms->physical_mem_addr);\n+\tif (rc) {\n+\t\tTFP_DRV_LOG(ERR,\n+\t\t\t    \"%s, Bulk get failed, type:%s, rc:%s\\n\",\n+\t\t\t    tf_dir_2_str(parms->dir),\n+\t\t\t    tf_tbl_type_2_str(parms->type),\n+\t\t\t    strerror(-rc));\n+\t}\n+\treturn rc;\n+}\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.h b/drivers/net/bnxt/tf_core/tf_tbl_sram.h\nnew file mode 100644\nindex 0000000000..32001e34a9\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.h\n@@ -0,0 +1,154 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2021 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef TF_TBL_SRAM_H_\n+#define TF_TBL_SRAM_H_\n+\n+#include \"tf_core.h\"\n+#include \"stack.h\"\n+\n+\n+/**\n+ * The SRAM Table module provides processing of managed SRAM types.\n+ */\n+\n+\n+/**\n+ * @page  tblsram SRAM Table\n+ *\n+ * @ref tf_tbl_sram_bind\n+ *\n+ * @ref tf_tbl_sram_unbind\n+ *\n+ * @ref tf_tbl_sram_alloc\n+ *\n+ * @ref tf_tbl_sram_free\n+ *\n+ * @ref tf_tbl_sram_set\n+ *\n+ * @ref tf_tbl_sram_get\n+ *\n+ * @ref tf_tbl_sram_bulk_get\n+ */\n+\n+/**\n+ * Initializes the Table module with the requested DBs. Must be\n+ * invoked as the first thing before any of the access functions.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table configuration parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_bind(struct tf *tfp);\n+\n+/**\n+ * Cleans up the private DBs and releases all the data.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_unbind(struct tf *tfp);\n+\n+/**\n+ * Allocates the requested table type from the internal RM DB.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table allocation parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_alloc(struct tf *tfp,\n+\t\t      struct tf_tbl_alloc_parms *parms);\n+\n+/**\n+ * Free's the requested table type and returns it to the DB. If shadow\n+ * DB is enabled its searched first and if found the element refcount\n+ * is decremented. If refcount goes to 0 then its returned to the\n+ * table type DB.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table free parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_free(struct tf *tfp,\n+\t\t     struct tf_tbl_free_parms *parms);\n+\n+\n+/**\n+ * Configures the requested element by sending a firmware request which\n+ * then installs it into the device internal structures.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table set parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_set(struct tf *tfp,\n+\t\t    struct tf_tbl_set_parms *parms);\n+\n+/**\n+ * Retrieves the requested element by sending a firmware request to get\n+ * the element.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table get parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_get(struct tf *tfp,\n+\t\t    struct tf_tbl_get_parms *parms);\n+\n+/**\n+ * Retrieves bulk block of elements by sending a firmware request to\n+ * get the elements.\n+ *\n+ * [in] tfp\n+ *   Pointer to TF handle, used for HCAPI communication\n+ *\n+ * [in] parms\n+ *   Pointer to Table get bulk parameters\n+ *\n+ * Returns\n+ *   - (0) if successful.\n+ *   - (-EINVAL) on failure.\n+ */\n+int tf_tbl_sram_bulk_get(struct tf *tfp,\n+\t\t\t struct tf_tbl_get_bulk_parms *parms);\n+\n+#endif /* TF_TBL_SRAM_H */\ndiff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c\nindex 45206c5992..806af3070a 100644\n--- a/drivers/net/bnxt/tf_core/tf_tcam.c\n+++ b/drivers/net/bnxt/tf_core/tf_tcam.c\n@@ -43,7 +43,7 @@ tf_tcam_bind(struct tf *tfp,\n \tstruct tf_shadow_tcam_free_db_parms fshadow;\n \tstruct tf_shadow_tcam_cfg_parms shadow_cfg;\n \tstruct tf_shadow_tcam_create_db_parms shadow_cdb;\n-\tuint16_t num_slices = 1;\n+\tuint16_t num_slices = parms->wc_num_slices;\n \tstruct tf_session *tfs;\n \tstruct tf_dev_info *dev;\n \tstruct tcam_rm_db *tcam_db;\n@@ -61,7 +61,7 @@ tf_tcam_bind(struct tf *tfp,\n \tif (rc)\n \t\treturn rc;\n \n-\tif (dev->ops->tf_dev_get_tcam_slice_info == NULL) {\n+\tif (dev->ops->tf_dev_set_tcam_slice_info == NULL) {\n \t\trc = -EOPNOTSUPP;\n \t\tTFP_DRV_LOG(ERR,\n \t\t\t    \"Operation not supported, rc:%s\\n\",\n@@ -69,10 +69,8 @@ tf_tcam_bind(struct tf *tfp,\n \t\treturn rc;\n \t}\n \n-\trc = dev->ops->tf_dev_get_tcam_slice_info(tfp,\n-\t\t\t\t\t\t  TF_TCAM_TBL_TYPE_WC_TCAM,\n-\t\t\t\t\t\t  0,\n-\t\t\t\t\t\t  &num_slices);\n+\trc = dev->ops->tf_dev_set_tcam_slice_info(tfp,\n+\t\t\t\t\t\t  num_slices);\n \tif (rc)\n \t\treturn rc;\n \ndiff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h\nindex bed17af6ae..b1e7a92b0b 100644\n--- a/drivers/net/bnxt/tf_core/tf_tcam.h\n+++ b/drivers/net/bnxt/tf_core/tf_tcam.h\n@@ -12,6 +12,9 @@\n  * The TCAM module provides processing of Internal TCAM types.\n  */\n \n+/* Number of slices per row for WC TCAM */\n+extern uint16_t g_wc_num_slices_per_row;\n+\n /**\n  * TCAM configuration parameters\n  */\n@@ -36,6 +39,10 @@ struct tf_tcam_cfg_parms {\n \t * Session resource allocations\n \t */\n \tstruct tf_session_resources *resources;\n+\t/**\n+\t * WC number of slices per row.\n+\t */\n+\tenum tf_wc_num_slice wc_num_slices;\n };\n \n /**\ndiff --git a/drivers/net/bnxt/tf_core/tf_tcam_shared.c b/drivers/net/bnxt/tf_core/tf_tcam_shared.c\nindex 83b6fbd5fb..c120c6f577 100644\n--- a/drivers/net/bnxt/tf_core/tf_tcam_shared.c\n+++ b/drivers/net/bnxt/tf_core/tf_tcam_shared.c\n@@ -279,18 +279,6 @@ tf_tcam_shared_bind(struct tf *tfp,\n \t\tif (rc)\n \t\t\treturn rc;\n \n-\t\trc = tf_tcam_shared_get_slices(tfp,\n-\t\t\t\t\t       dev,\n-\t\t\t\t\t       &num_slices);\n-\t\tif (rc)\n-\t\t\treturn rc;\n-\n-\t\tif (num_slices > 1) {\n-\t\t\tTFP_DRV_LOG(ERR,\n-\t\t\t\t    \"Only single slice supported\\n\");\n-\t\t\treturn -EOPNOTSUPP;\n-\t\t}\n-\n \t\ttf_tcam_shared_create_db(&tcam_shared_wc);\n \n \n@@ -330,6 +318,18 @@ tf_tcam_shared_bind(struct tf *tfp,\n \n \t\t\ttf_session_set_tcam_shared_db(tfp, (void *)tcam_shared_wc);\n \t\t}\n+\n+\t\trc = tf_tcam_shared_get_slices(tfp,\n+\t\t\t\t\t       dev,\n+\t\t\t\t\t       &num_slices);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\tif (num_slices > 1) {\n+\t\t\tTFP_DRV_LOG(ERR,\n+\t\t\t\t    \"Only single slice supported\\n\");\n+\t\t\treturn -EOPNOTSUPP;\n+\t\t}\n \t}\n done:\n \treturn rc;\n@@ -972,9 +972,9 @@ tf_tcam_shared_move_entry(struct tf *tfp,\n \tsparms.idx = dphy_idx;\n \tsparms.key = gparms.key;\n \tsparms.mask = gparms.mask;\n-\tsparms.key_size = gparms.key_size;\n+\tsparms.key_size = key_sz_bytes;\n \tsparms.result = gparms.result;\n-\tsparms.result_size = gparms.result_size;\n+\tsparms.result_size = remap_sz_bytes;\n \n \trc = tf_msg_tcam_entry_set(tfp, dev, &sparms);\n \tif (rc) {\ndiff --git a/drivers/net/bnxt/tf_core/tf_util.c b/drivers/net/bnxt/tf_core/tf_util.c\nindex d100399d0a..c1b9be0755 100644\n--- a/drivers/net/bnxt/tf_core/tf_util.c\n+++ b/drivers/net/bnxt/tf_core/tf_util.c\n@@ -76,6 +76,8 @@ tf_tbl_type_2_str(enum tf_tbl_type tbl_type)\n \tswitch (tbl_type) {\n \tcase TF_TBL_TYPE_FULL_ACT_RECORD:\n \t\treturn \"Full Action record\";\n+\tcase TF_TBL_TYPE_COMPACT_ACT_RECORD:\n+\t\treturn \"Compact Action record\";\n \tcase TF_TBL_TYPE_MCAST_GROUPS:\n \t\treturn \"Multicast Groups\";\n \tcase TF_TBL_TYPE_ACT_ENCAP_8B:\n@@ -96,6 +98,14 @@ tf_tbl_type_2_str(enum tf_tbl_type tbl_type)\n \t\treturn \"Stats 64B\";\n \tcase TF_TBL_TYPE_ACT_MODIFY_IPV4:\n \t\treturn \"Modify IPv4\";\n+\tcase TF_TBL_TYPE_ACT_MODIFY_8B:\n+\t\treturn \"Modify 8B\";\n+\tcase TF_TBL_TYPE_ACT_MODIFY_16B:\n+\t\treturn \"Modify 16B\";\n+\tcase TF_TBL_TYPE_ACT_MODIFY_32B:\n+\t\treturn \"Modify 32B\";\n+\tcase TF_TBL_TYPE_ACT_MODIFY_64B:\n+\t\treturn \"Modify 64B\";\n \tcase TF_TBL_TYPE_METER_PROF:\n \t\treturn \"Meter Profile\";\n \tcase TF_TBL_TYPE_METER_INST:\ndiff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\nindex dbf85e4eda..183bae66c5 100644\n--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\n+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\n@@ -384,6 +384,7 @@ ulp_ctx_shared_session_open(struct bnxt *bp,\n \tsize_t copy_nbytes;\n \tuint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;\n \tint32_t\trc = 0;\n+\tuint8_t app_id;\n \n \t/* only perform this if shared session is enabled. */\n \tif (!bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx))\n@@ -422,6 +423,12 @@ ulp_ctx_shared_session_open(struct bnxt *bp,\n \tif (rc)\n \t\treturn rc;\n \n+\trc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);\n+\tif (rc) {\n+\t\tBNXT_TF_DBG(ERR, \"Unable to get the app id from ulp.\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n \trc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);\n \tif (rc) {\n \t\tBNXT_TF_DBG(ERR, \"Unable to get device id from ulp.\\n\");\n@@ -445,6 +452,10 @@ ulp_ctx_shared_session_open(struct bnxt *bp,\n \n \tparms.shadow_copy = true;\n \tparms.bp = bp;\n+\tif (app_id == 0 || app_id == 3)\n+\t\tparms.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;\n+\telse\n+\t\tparms.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;\n \n \t/*\n \t * Open the session here, but the collect the resources during the\n@@ -516,6 +527,7 @@ ulp_ctx_session_open(struct bnxt *bp,\n \tstruct tf_open_session_parms\tparams;\n \tstruct tf_session_resources\t*resources;\n \tuint32_t ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST;\n+\tuint8_t app_id;\n \n \tmemset(&params, 0, sizeof(params));\n \n@@ -529,6 +541,12 @@ ulp_ctx_session_open(struct bnxt *bp,\n \n \tparams.shadow_copy = true;\n \n+\trc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id);\n+\tif (rc) {\n+\t\tBNXT_TF_DBG(ERR, \"Unable to get the app id from ulp.\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n \trc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id);\n \tif (rc) {\n \t\tBNXT_TF_DBG(ERR, \"Unable to get device id from ulp.\\n\");\n@@ -556,6 +574,11 @@ ulp_ctx_session_open(struct bnxt *bp,\n \t\treturn rc;\n \n \tparams.bp = bp;\n+\tif (app_id == 0 || app_id == 3)\n+\t\tparams.wc_num_slices = TF_WC_TCAM_2_SLICE_PER_ROW;\n+\telse\n+\t\tparams.wc_num_slices = TF_WC_TCAM_1_SLICE_PER_ROW;\n+\n \trc = tf_open_session(&bp->tfp, &params);\n \tif (rc) {\n \t\tBNXT_TF_DBG(ERR, \"Failed to open TF session - %s, rc = %d\\n\",\ndiff --git a/meson_options.txt b/meson_options.txt\nindex 0e92734c49..f686e6d92a 100644\n--- a/meson_options.txt\n+++ b/meson_options.txt\n@@ -46,3 +46,5 @@ option('tests', type: 'boolean', value: true, description:\n        'build unit tests')\n option('use_hpet', type: 'boolean', value: false, description:\n        'use HPET timer in EAL')\n+option('bnxt_tf_wc_slices', type: 'integer', min: 1, max: 4, value: 2,\n+\tdescription: 'Number of slices per WC TCAM entry')\n",
    "prefixes": [
        "04/14"
    ]
}