get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/68394/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 68394,
    "url": "http://patches.dpdk.org/api/patches/68394/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com",
    "date": "2020-04-14T08:13:05",
    "name": "[v3,08/34] net/bnxt: add resource manager functionality",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0d064d522a6be41f28bd82a4bebb74da482e00a7",
    "submitter": {
        "id": 1635,
        "url": "http://patches.dpdk.org/api/people/1635/?format=api",
        "name": "Venkat Duvvuru",
        "email": "venkatkumar.duvvuru@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "http://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com/mbox/",
    "series": [
        {
            "id": 9362,
            "url": "http://patches.dpdk.org/api/series/9362/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=9362",
            "date": "2020-04-14T08:12:57",
            "name": "add support for host based flow table management",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/9362/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/68394/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/68394/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BC502A0577;\n\tTue, 14 Apr 2020 10:15:52 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 452301C209;\n\tTue, 14 Apr 2020 10:14:10 +0200 (CEST)",
            "from mail-pg1-f176.google.com (mail-pg1-f176.google.com\n [209.85.215.176]) by dpdk.org (Postfix) with ESMTP id 79C1E1C202\n for <dev@dpdk.org>; Tue, 14 Apr 2020 10:14:08 +0200 (CEST)",
            "by mail-pg1-f176.google.com with SMTP id h69so3132425pgc.8\n for <dev@dpdk.org>; Tue, 14 Apr 2020 01:14:08 -0700 (PDT)",
            "from S60.dhcp.broadcom.net ([192.19.234.250])\n by smtp.gmail.com with ESMTPSA id e196sm671176pfh.43.2020.04.14.01.14.03\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Tue, 14 Apr 2020 01:14:04 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references;\n bh=1PWwSmghdxwLvXwGN4zdejonawKJGz+3yTLd3hrGrgM=;\n b=PpzjA6YU7scTRmreHXl+utjUSO5SA+ibelxsF9vBgYADngVffY7gwh7kF4eJQ9k2it\n OnSSD+mZzWPSMT/wRiqebD+DEoC/foBXe/MckcBrs8WzaFcb5wo92i5K0JXTL2Y2ZQvf\n YwJ8vEapBTFXPW+Jgvc4r5/p6c8ew+XZ1Cijo=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references;\n bh=1PWwSmghdxwLvXwGN4zdejonawKJGz+3yTLd3hrGrgM=;\n b=GSZUgIvOPUPC7nQ+i/m/RdyHKEIhOzbyKC3uMabm09OBQaOm1TLvCgGkgyVChwWyvW\n EtXVe5gp2kgYqydB7K0ufQnRbQ08QwRvqRpP3i4KOJ0GOA/H3qk1R9YsJuGdXdHlSIGx\n RVQKaWeZ0AVWAx7BDJYWPlF9bUkt2mpTm3sHtl8n/tWBhA99bRfUNgjMawEow3MhvskF\n zwnozKO68py1WHag+Rj1JVMN3lWtdYkDktXg2sFttnL77LWh/CssAYFnpFsn7iYz76dp\n p2hMWbTB9QnU3Bfe8VWx2XiRIJAGFkendzyrnSMGFkJmdiJJNqPFXya5xcgDufl2jbi4\n 0UXg==",
        "X-Gm-Message-State": "AGi0PuZDqte+ZH4LJhUZmMG8MuyN0Bew5snJp/+btz4mmPqQnyZwr+wl\n pZ1eH2KJwzbW5z1ym0D7ViC+s8tmq8hmaYIw77bZaW+c8hzoEdbl9sg/E3VQPM0itk+Ni2SFpo4\n 7I2wbVcUaXE2pA0pJx0d1A3D0jIK7N0iRgNhuNZyB3P9sSg9y3v5/eWkcss25UZwiDCIJ",
        "X-Google-Smtp-Source": "\n APiQypKZ/TKX42bdCnv5CaIEPcLY6EGY8MUwtHZgrybCuZ4CJ2FwinvnoWAgTo5VTxaWh4a7ujPDbQ==",
        "X-Received": "by 2002:a62:2a8d:: with SMTP id\n q135mr22975209pfq.220.1586852045861;\n Tue, 14 Apr 2020 01:14:05 -0700 (PDT)",
        "From": "Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Michael Wildt <michael.wildt@broadcom.com>",
        "Date": "Tue, 14 Apr 2020 13:43:05 +0530",
        "Message-Id": "\n <1586852011-37536-9-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "\n <1586852011-37536-1-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "References": "\n <1586806811-21736-1-git-send-email-venkatkumar.duvvuru@broadcom.com>\n <1586852011-37536-1-git-send-email-venkatkumar.duvvuru@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH v3 08/34] net/bnxt: add resource manager\n\tfunctionality",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Michael Wildt <michael.wildt@broadcom.com>\n\n- Add TruFlow RM functionality for resource handling\n- Update the TruFlow Resource Manager (RM) with resource\n  support functions for debugging as well as resource cleanup.\n- Add support for Internal and external pools.\n\nSigned-off-by: Michael Wildt <michael.wildt@broadcom.com>\nReviewed-by: Randy Schacher <stuart.schacher@broadcom.com>\nReviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>\n---\n drivers/net/bnxt/tf_core/tf_core.c    |   14 +\n drivers/net/bnxt/tf_core/tf_core.h    |   26 +\n drivers/net/bnxt/tf_core/tf_rm.c      | 1718 +++++++++++++++++++++++++++++++--\n drivers/net/bnxt/tf_core/tf_session.h |   10 +\n drivers/net/bnxt/tf_core/tf_tbl.h     |   43 +\n 5 files changed, 1735 insertions(+), 76 deletions(-)\n create mode 100644 drivers/net/bnxt/tf_core/tf_tbl.h",
    "diff": "diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c\nindex 7d76efa..bb6d38b 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.c\n+++ b/drivers/net/bnxt/tf_core/tf_core.c\n@@ -149,6 +149,20 @@ tf_open_session(struct tf                    *tfp,\n \t\tgoto cleanup_close;\n \t}\n \n+\t/* Shadow DB configuration */\n+\tif (parms->shadow_copy) {\n+\t\t/* Ignore shadow_copy setting */\n+\t\tsession->shadow_copy = 0;/* parms->shadow_copy; */\n+#if (TF_SHADOW == 1)\n+\t\trc = tf_rm_shadow_db_init(tfs);\n+\t\tif (rc)\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"Shadow DB Initialization failed\\n, rc:%d\",\n+\t\t\t\t    rc);\n+\t\t/* Add additional processing */\n+#endif /* TF_SHADOW */\n+\t}\n+\n \t/* Adjust the Session with what firmware allowed us to get */\n \trc = tf_rm_allocate_validate(tfp);\n \tif (rc) {\ndiff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h\nindex 3455d8f..16c8251 100644\n--- a/drivers/net/bnxt/tf_core/tf_core.h\n+++ b/drivers/net/bnxt/tf_core/tf_core.h\n@@ -30,6 +30,32 @@ enum tf_dir {\n \tTF_DIR_MAX\n };\n \n+/**\n+ * External pool size\n+ *\n+ * Defines a single pool of external action records of\n+ * fixed size.  Currently, this is an index.\n+ */\n+#define TF_EXT_POOL_ENTRY_SZ_BYTES 1\n+\n+/**\n+ *  External pool entry count\n+ *\n+ *  Defines the number of entries in the external action pool\n+ */\n+#define TF_EXT_POOL_ENTRY_CNT (1 * 1024)\n+\n+/**\n+ * Number of external pools\n+ */\n+#define TF_EXT_POOL_CNT_MAX 1\n+\n+/**\n+ * External pool Id\n+ */\n+#define TF_EXT_POOL_0      0 /**< matches TF_TBL_TYPE_EXT   */\n+#define TF_EXT_POOL_1      1 /**< matches TF_TBL_TYPE_EXT_0 */\n+\n /********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/\n \n /**\ndiff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c\nindex 56767e7..a5e96f29 100644\n--- a/drivers/net/bnxt/tf_core/tf_rm.c\n+++ b/drivers/net/bnxt/tf_core/tf_rm.c\n@@ -104,9 +104,82 @@ const char\n \tcase TF_IDENT_TYPE_L2_FUNC:\n \t\treturn \"l2_func\";\n \tdefault:\n-\t\tbreak;\n+\t\treturn \"Invalid identifier\";\n+\t}\n+}\n+\n+const char\n+*tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type)\n+{\n+\tswitch (tcam_type) {\n+\tcase TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:\n+\t\treturn \"l2_ctxt_tcam\";\n+\tcase TF_TCAM_TBL_TYPE_PROF_TCAM:\n+\t\treturn \"prof_tcam\";\n+\tcase TF_TCAM_TBL_TYPE_WC_TCAM:\n+\t\treturn \"wc_tcam\";\n+\tcase TF_TCAM_TBL_TYPE_VEB_TCAM:\n+\t\treturn \"veb_tcam\";\n+\tcase TF_TCAM_TBL_TYPE_SP_TCAM:\n+\t\treturn \"sp_tcam\";\n+\tcase TF_TCAM_TBL_TYPE_CT_RULE_TCAM:\n+\t\treturn \"ct_rule_tcam\";\n+\tdefault:\n+\t\treturn \"Invalid tcam table type\";\n+\t}\n+}\n+\n+const char\n+*tf_hcapi_hw_2_str(enum tf_resource_type_hw hw_type)\n+{\n+\tswitch (hw_type) {\n+\tcase TF_RESC_TYPE_HW_L2_CTXT_TCAM:\n+\t\treturn \"L2 ctxt tcam\";\n+\tcase TF_RESC_TYPE_HW_PROF_FUNC:\n+\t\treturn \"Profile Func\";\n+\tcase TF_RESC_TYPE_HW_PROF_TCAM:\n+\t\treturn \"Profile tcam\";\n+\tcase TF_RESC_TYPE_HW_EM_PROF_ID:\n+\t\treturn \"EM profile id\";\n+\tcase TF_RESC_TYPE_HW_EM_REC:\n+\t\treturn \"EM record\";\n+\tcase TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:\n+\t\treturn \"WC tcam profile id\";\n+\tcase TF_RESC_TYPE_HW_WC_TCAM:\n+\t\treturn \"WC tcam\";\n+\tcase TF_RESC_TYPE_HW_METER_PROF:\n+\t\treturn \"Meter profile\";\n+\tcase TF_RESC_TYPE_HW_METER_INST:\n+\t\treturn \"Meter instance\";\n+\tcase TF_RESC_TYPE_HW_MIRROR:\n+\t\treturn \"Mirror\";\n+\tcase TF_RESC_TYPE_HW_UPAR:\n+\t\treturn \"UPAR\";\n+\tcase TF_RESC_TYPE_HW_SP_TCAM:\n+\t\treturn \"Source properties tcam\";\n+\tcase TF_RESC_TYPE_HW_L2_FUNC:\n+\t\treturn \"L2 Function\";\n+\tcase TF_RESC_TYPE_HW_FKB:\n+\t\treturn \"FKB\";\n+\tcase TF_RESC_TYPE_HW_TBL_SCOPE:\n+\t\treturn \"Table scope\";\n+\tcase TF_RESC_TYPE_HW_EPOCH0:\n+\t\treturn \"EPOCH0\";\n+\tcase TF_RESC_TYPE_HW_EPOCH1:\n+\t\treturn \"EPOCH1\";\n+\tcase TF_RESC_TYPE_HW_METADATA:\n+\t\treturn \"Metadata\";\n+\tcase TF_RESC_TYPE_HW_CT_STATE:\n+\t\treturn \"Connection tracking state\";\n+\tcase TF_RESC_TYPE_HW_RANGE_PROF:\n+\t\treturn \"Range profile\";\n+\tcase TF_RESC_TYPE_HW_RANGE_ENTRY:\n+\t\treturn \"Range entry\";\n+\tcase TF_RESC_TYPE_HW_LAG_ENTRY:\n+\t\treturn \"LAG\";\n+\tdefault:\n+\t\treturn \"Invalid identifier\";\n \t}\n-\treturn \"Invalid identifier\";\n }\n \n const char\n@@ -145,6 +218,93 @@ const char\n }\n \n /**\n+ * Helper function to perform a HW HCAPI resource type lookup against\n+ * the reserved value of the same static type.\n+ *\n+ * Returns:\n+ *   -EOPNOTSUPP - Reserved resource type not supported\n+ *   Value       - Integer value of the reserved value for the requested type\n+ */\n+static int\n+tf_rm_rsvd_hw_value(enum tf_dir dir, enum tf_resource_type_hw index)\n+{\n+\tuint32_t value = -EOPNOTSUPP;\n+\n+\tswitch (index) {\n+\tcase TF_RESC_TYPE_HW_L2_CTXT_TCAM:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_L2_CTXT_TCAM, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_PROF_FUNC:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_PROF_FUNC, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_PROF_TCAM:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_PROF_TCAM, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_EM_PROF_ID:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_EM_PROF_ID, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_EM_REC:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_EM_REC, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_WC_TCAM_PROF_ID:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_WC_TCAM_PROF_ID, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_WC_TCAM:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_WC_TCAM, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_METER_PROF:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_METER_PROF, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_METER_INST:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_METER_INST, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_MIRROR:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_MIRROR, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_UPAR:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_UPAR, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_SP_TCAM:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_SP_TCAM, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_L2_FUNC:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_L2_FUNC, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_FKB:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_FKB, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_TBL_SCOPE:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_TBL_SCOPE, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_EPOCH0:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_EPOCH0, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_EPOCH1:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_EPOCH1, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_METADATA:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_METADATA, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_CT_STATE:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_CT_STATE, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_RANGE_PROF:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_RANGE_PROF, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_RANGE_ENTRY:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_RANGE_ENTRY, value);\n+\t\tbreak;\n+\tcase TF_RESC_TYPE_HW_LAG_ENTRY:\n+\t\tTF_RESC_RSVD(dir, TF_RSVD_LAG_ENTRY, value);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn value;\n+}\n+\n+/**\n  * Helper function to perform a SRAM HCAPI resource type lookup\n  * against the reserved value of the same static type.\n  *\n@@ -205,6 +365,36 @@ tf_rm_rsvd_sram_value(enum tf_dir dir, enum tf_resource_type_sram index)\n }\n \n /**\n+ * Helper function to print all the HW resource qcaps errors reported\n+ * in the error_flag.\n+ *\n+ * [in] dir\n+ *   Receive or transmit direction\n+ *\n+ * [in] error_flag\n+ *   Pointer to the hw error flags created at time of the query check\n+ */\n+static void\n+tf_rm_print_hw_qcaps_error(enum tf_dir dir,\n+\t\t\t   struct tf_rm_hw_query *hw_query,\n+\t\t\t   uint32_t *error_flag)\n+{\n+\tint i;\n+\n+\tPMD_DRV_LOG(ERR, \"QCAPS errors HW\\n\");\n+\tPMD_DRV_LOG(ERR, \"  Direction: %s\\n\", tf_dir_2_str(dir));\n+\tPMD_DRV_LOG(ERR, \"  Elements:\\n\");\n+\n+\tfor (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {\n+\t\tif (*error_flag & 1 << i)\n+\t\t\tPMD_DRV_LOG(ERR, \"    %s, %d elem available, req:%d\\n\",\n+\t\t\t\t    tf_hcapi_hw_2_str(i),\n+\t\t\t\t    hw_query->hw_query[i].max,\n+\t\t\t\t    tf_rm_rsvd_hw_value(dir, i));\n+\t}\n+}\n+\n+/**\n  * Helper function to print all the SRAM resource qcaps errors\n  * reported in the error_flag.\n  *\n@@ -264,12 +454,139 @@ tf_rm_check_hw_qcaps_static(struct tf_rm_hw_query *query,\n \t\t\t    uint32_t *error_flag)\n {\n \t*error_flag = 0;\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_L2_CTXT_TCAM,\n+\t\t\t     TF_RSVD_L2_CTXT_TCAM,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_PROF_FUNC,\n+\t\t\t     TF_RSVD_PROF_FUNC,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_PROF_TCAM,\n+\t\t\t     TF_RSVD_PROF_TCAM,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_EM_PROF_ID,\n+\t\t\t     TF_RSVD_EM_PROF_ID,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_EM_REC,\n+\t\t\t     TF_RSVD_EM_REC,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_WC_TCAM_PROF_ID,\n+\t\t\t     TF_RSVD_WC_TCAM_PROF_ID,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_WC_TCAM,\n+\t\t\t     TF_RSVD_WC_TCAM,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_METER_PROF,\n+\t\t\t     TF_RSVD_METER_PROF,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_METER_INST,\n+\t\t\t     TF_RSVD_METER_INST,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_MIRROR,\n+\t\t\t     TF_RSVD_MIRROR,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_UPAR,\n+\t\t\t     TF_RSVD_UPAR,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_SP_TCAM,\n+\t\t\t     TF_RSVD_SP_TCAM,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_L2_FUNC,\n+\t\t\t     TF_RSVD_L2_FUNC,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_FKB,\n+\t\t\t     TF_RSVD_FKB,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_TBL_SCOPE,\n+\t\t\t     TF_RSVD_TBL_SCOPE,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_EPOCH0,\n+\t\t\t     TF_RSVD_EPOCH0,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_EPOCH1,\n+\t\t\t     TF_RSVD_EPOCH1,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_METADATA,\n+\t\t\t     TF_RSVD_METADATA,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_CT_STATE,\n+\t\t\t     TF_RSVD_CT_STATE,\n+\t\t\t     error_flag);\n+\n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_RANGE_PROF,\n+\t\t\t     TF_RSVD_RANGE_PROF,\n+\t\t\t     error_flag);\n+\n \tTF_RM_CHECK_HW_ALLOC(query,\n \t\t\t     dir,\n \t\t\t     TF_RESC_TYPE_HW_RANGE_ENTRY,\n \t\t\t     TF_RSVD_RANGE_ENTRY,\n \t\t\t     error_flag);\n \n+\tTF_RM_CHECK_HW_ALLOC(query,\n+\t\t\t     dir,\n+\t\t\t     TF_RESC_TYPE_HW_LAG_ENTRY,\n+\t\t\t     TF_RSVD_LAG_ENTRY,\n+\t\t\t     error_flag);\n+\n \tif (*error_flag != 0)\n \t\treturn -ENOMEM;\n \n@@ -434,26 +751,584 @@ tf_rm_reserve_range(uint32_t count,\n \t\t\tfor (i = 0; i < rsv_begin; i++)\n \t\t\t\tba_alloc_index(pool, i);\n \n-\t\t\t/* Skip and then do the remaining */\n-\t\t\tif (rsv_end < max - 1) {\n-\t\t\t\tfor (i = rsv_end; i < max; i++)\n-\t\t\t\t\tba_alloc_index(pool, i);\n-\t\t\t}\n-\t\t}\n-\t}\n+\t\t\t/* Skip and then do the remaining */\n+\t\t\tif (rsv_end < max - 1) {\n+\t\t\t\tfor (i = rsv_end; i < max; i++)\n+\t\t\t\t\tba_alloc_index(pool, i);\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * Internal function to mark all the l2 ctxt allocated that Truflow\n+ * does not own.\n+ */\n+static void\n+tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;\n+\tuint32_t end = 0;\n+\n+\t/* l2 ctxt rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_L2_CTXT_TCAM,\n+\t\t\t    tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);\n+\n+\t/* l2 ctxt tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_L2_CTXT_TCAM,\n+\t\t\t    tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the profile tcam and profile func\n+ * resources that Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_prof(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_PROF_FUNC;\n+\tuint32_t end = 0;\n+\n+\t/* profile func rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_PROF_FUNC,\n+\t\t\t    tfs->TF_PROF_FUNC_POOL_NAME_RX);\n+\n+\t/* profile func tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_PROF_FUNC,\n+\t\t\t    tfs->TF_PROF_FUNC_POOL_NAME_TX);\n+\n+\tindex = TF_RESC_TYPE_HW_PROF_TCAM;\n+\n+\t/* profile tcam rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_PROF_TCAM,\n+\t\t\t    tfs->TF_PROF_TCAM_POOL_NAME_RX);\n+\n+\t/* profile tcam tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_PROF_TCAM,\n+\t\t\t    tfs->TF_PROF_TCAM_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the em profile id allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_em_prof(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_EM_PROF_ID;\n+\tuint32_t end = 0;\n+\n+\t/* em prof id rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EM_PROF_ID,\n+\t\t\t    tfs->TF_EM_PROF_ID_POOL_NAME_RX);\n+\n+\t/* em prof id tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EM_PROF_ID,\n+\t\t\t    tfs->TF_EM_PROF_ID_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the wildcard tcam and profile id\n+ * resources that Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_wc(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_WC_TCAM_PROF_ID;\n+\tuint32_t end = 0;\n+\n+\t/* wc profile id rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_WC_PROF_ID,\n+\t\t\t    tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX);\n+\n+\t/* wc profile id tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_WC_PROF_ID,\n+\t\t\t    tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX);\n+\n+\tindex = TF_RESC_TYPE_HW_WC_TCAM;\n+\n+\t/* wc tcam rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_WC_TCAM_ROW,\n+\t\t\t    tfs->TF_WC_TCAM_POOL_NAME_RX);\n+\n+\t/* wc tcam tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_WC_TCAM_ROW,\n+\t\t\t    tfs->TF_WC_TCAM_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the meter resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_meter(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_METER_PROF;\n+\tuint32_t end = 0;\n+\n+\t/* meter profiles rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METER_PROF,\n+\t\t\t    tfs->TF_METER_PROF_POOL_NAME_RX);\n+\n+\t/* meter profiles tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METER_PROF,\n+\t\t\t    tfs->TF_METER_PROF_POOL_NAME_TX);\n+\n+\tindex = TF_RESC_TYPE_HW_METER_INST;\n+\n+\t/* meter rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METER,\n+\t\t\t    tfs->TF_METER_INST_POOL_NAME_RX);\n+\n+\t/* meter tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METER,\n+\t\t\t    tfs->TF_METER_INST_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the mirror resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_mirror(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_MIRROR;\n+\tuint32_t end = 0;\n+\n+\t/* mirror rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_MIRROR,\n+\t\t\t    tfs->TF_MIRROR_POOL_NAME_RX);\n+\n+\t/* mirror tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_MIRROR,\n+\t\t\t    tfs->TF_MIRROR_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the upar resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_upar(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_UPAR;\n+\tuint32_t end = 0;\n+\n+\t/* upar rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_UPAR,\n+\t\t\t    tfs->TF_UPAR_POOL_NAME_RX);\n+\n+\t/* upar tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_UPAR,\n+\t\t\t    tfs->TF_UPAR_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the sp tcam resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_sp_tcam(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_SP_TCAM;\n+\tuint32_t end = 0;\n+\n+\t/* sp tcam rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_SP_TCAM,\n+\t\t\t    tfs->TF_SP_TCAM_POOL_NAME_RX);\n+\n+\t/* sp tcam tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_SP_TCAM,\n+\t\t\t    tfs->TF_SP_TCAM_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the l2 func resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_l2_func(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_L2_FUNC;\n+\tuint32_t end = 0;\n+\n+\t/* l2 func rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_L2_FUNC,\n+\t\t\t    tfs->TF_L2_FUNC_POOL_NAME_RX);\n+\n+\t/* l2 func tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_L2_FUNC,\n+\t\t\t    tfs->TF_L2_FUNC_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the fkb resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_fkb(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_FKB;\n+\tuint32_t end = 0;\n+\n+\t/* fkb rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_FKB,\n+\t\t\t    tfs->TF_FKB_POOL_NAME_RX);\n+\n+\t/* fkb tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_FKB,\n+\t\t\t    tfs->TF_FKB_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the tbld scope resources allocated\n+ * that Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_tbl_scope(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_TBL_SCOPE;\n+\tuint32_t end = 0;\n+\n+\t/* tbl scope rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_TBL_SCOPE,\n+\t\t\t    tfs->TF_TBL_SCOPE_POOL_NAME_RX);\n+\n+\t/* tbl scope tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_TBL_SCOPE,\n+\t\t\t    tfs->TF_TBL_SCOPE_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the l2 epoch resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_epoch(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_EPOCH0;\n+\tuint32_t end = 0;\n+\n+\t/* epoch0 rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EPOCH0,\n+\t\t\t    tfs->TF_EPOCH0_POOL_NAME_RX);\n+\n+\t/* epoch0 tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EPOCH0,\n+\t\t\t    tfs->TF_EPOCH0_POOL_NAME_TX);\n+\n+\tindex = TF_RESC_TYPE_HW_EPOCH1;\n+\n+\t/* epoch1 rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EPOCH1,\n+\t\t\t    tfs->TF_EPOCH1_POOL_NAME_RX);\n+\n+\t/* epoch1 tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_EPOCH1,\n+\t\t\t    tfs->TF_EPOCH1_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the metadata resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_metadata(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_METADATA;\n+\tuint32_t end = 0;\n+\n+\t/* metadata rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METADATA,\n+\t\t\t    tfs->TF_METADATA_POOL_NAME_RX);\n+\n+\t/* metadata tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_METADATA,\n+\t\t\t    tfs->TF_METADATA_POOL_NAME_TX);\n+}\n+\n+/**\n+ * Internal function to mark all the ct state resources allocated that\n+ * Truflow does not own.\n+ */\n+static void\n+tf_rm_rsvd_ct_state(struct tf_session *tfs)\n+{\n+\tuint32_t index = TF_RESC_TYPE_HW_CT_STATE;\n+\tuint32_t end = 0;\n+\n+\t/* ct state rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_CT_STATE,\n+\t\t\t    tfs->TF_CT_STATE_POOL_NAME_RX);\n+\n+\t/* ct state tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_CT_STATE,\n+\t\t\t    tfs->TF_CT_STATE_POOL_NAME_TX);\n }\n \n /**\n- * Internal function to mark all the l2 ctxt allocated that Truflow\n- * does not own.\n+ * Internal function to mark all the range resources allocated that\n+ * Truflow does not own.\n  */\n static void\n-tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)\n+tf_rm_rsvd_range(struct tf_session *tfs)\n {\n-\tuint32_t index = TF_RESC_TYPE_HW_L2_CTXT_TCAM;\n+\tuint32_t index = TF_RESC_TYPE_HW_RANGE_PROF;\n \tuint32_t end = 0;\n \n-\t/* l2 ctxt rx direction */\n+\t/* range profile rx direction */\n \tif (tfs->resc.rx.hw_entry[index].stride > 0)\n \t\tend = tfs->resc.rx.hw_entry[index].start +\n \t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n@@ -461,10 +1336,10 @@ tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)\n \ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n \t\t\t    tfs->resc.rx.hw_entry[index].start,\n \t\t\t    end,\n-\t\t\t    TF_NUM_L2_CTXT_TCAM,\n-\t\t\t    tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX);\n+\t\t\t    TF_NUM_RANGE_PROF,\n+\t\t\t    tfs->TF_RANGE_PROF_POOL_NAME_RX);\n \n-\t/* l2 ctxt tx direction */\n+\t/* range profile tx direction */\n \tif (tfs->resc.tx.hw_entry[index].stride > 0)\n \t\tend = tfs->resc.tx.hw_entry[index].start +\n \t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n@@ -472,21 +1347,45 @@ tf_rm_rsvd_l2_ctxt(struct tf_session *tfs)\n \ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n \t\t\t    tfs->resc.tx.hw_entry[index].start,\n \t\t\t    end,\n-\t\t\t    TF_NUM_L2_CTXT_TCAM,\n-\t\t\t    tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX);\n+\t\t\t    TF_NUM_RANGE_PROF,\n+\t\t\t    tfs->TF_RANGE_PROF_POOL_NAME_TX);\n+\n+\tindex = TF_RESC_TYPE_HW_RANGE_ENTRY;\n+\n+\t/* range entry rx direction */\n+\tif (tfs->resc.rx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.rx.hw_entry[index].start +\n+\t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.rx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_RANGE_ENTRY,\n+\t\t\t    tfs->TF_RANGE_ENTRY_POOL_NAME_RX);\n+\n+\t/* range entry tx direction */\n+\tif (tfs->resc.tx.hw_entry[index].stride > 0)\n+\t\tend = tfs->resc.tx.hw_entry[index].start +\n+\t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n+\n+\ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n+\t\t\t    tfs->resc.tx.hw_entry[index].start,\n+\t\t\t    end,\n+\t\t\t    TF_NUM_RANGE_ENTRY,\n+\t\t\t    tfs->TF_RANGE_ENTRY_POOL_NAME_TX);\n }\n \n /**\n- * Internal function to mark all the l2 func resources allocated that\n+ * Internal function to mark all the lag resources allocated that\n  * Truflow does not own.\n  */\n static void\n-tf_rm_rsvd_l2_func(struct tf_session *tfs)\n+tf_rm_rsvd_lag_entry(struct tf_session *tfs)\n {\n-\tuint32_t index = TF_RESC_TYPE_HW_L2_FUNC;\n+\tuint32_t index = TF_RESC_TYPE_HW_LAG_ENTRY;\n \tuint32_t end = 0;\n \n-\t/* l2 func rx direction */\n+\t/* lag entry rx direction */\n \tif (tfs->resc.rx.hw_entry[index].stride > 0)\n \t\tend = tfs->resc.rx.hw_entry[index].start +\n \t\t\ttfs->resc.rx.hw_entry[index].stride - 1;\n@@ -494,10 +1393,10 @@ tf_rm_rsvd_l2_func(struct tf_session *tfs)\n \ttf_rm_reserve_range(tfs->resc.rx.hw_entry[index].stride,\n \t\t\t    tfs->resc.rx.hw_entry[index].start,\n \t\t\t    end,\n-\t\t\t    TF_NUM_L2_FUNC,\n-\t\t\t    tfs->TF_L2_FUNC_POOL_NAME_RX);\n+\t\t\t    TF_NUM_LAG_ENTRY,\n+\t\t\t    tfs->TF_LAG_ENTRY_POOL_NAME_RX);\n \n-\t/* l2 func tx direction */\n+\t/* lag entry tx direction */\n \tif (tfs->resc.tx.hw_entry[index].stride > 0)\n \t\tend = tfs->resc.tx.hw_entry[index].start +\n \t\t\ttfs->resc.tx.hw_entry[index].stride - 1;\n@@ -505,8 +1404,8 @@ tf_rm_rsvd_l2_func(struct tf_session *tfs)\n \ttf_rm_reserve_range(tfs->resc.tx.hw_entry[index].stride,\n \t\t\t    tfs->resc.tx.hw_entry[index].start,\n \t\t\t    end,\n-\t\t\t    TF_NUM_L2_FUNC,\n-\t\t\t    tfs->TF_L2_FUNC_POOL_NAME_TX);\n+\t\t\t    TF_NUM_LAG_ENTRY,\n+\t\t\t    tfs->TF_LAG_ENTRY_POOL_NAME_TX);\n }\n \n /**\n@@ -909,7 +1808,21 @@ tf_rm_reserve_hw(struct tf *tfp)\n \t * used except the resources that Truflow took ownership off.\n \t */\n \ttf_rm_rsvd_l2_ctxt(tfs);\n+\ttf_rm_rsvd_prof(tfs);\n+\ttf_rm_rsvd_em_prof(tfs);\n+\ttf_rm_rsvd_wc(tfs);\n+\ttf_rm_rsvd_mirror(tfs);\n+\ttf_rm_rsvd_meter(tfs);\n+\ttf_rm_rsvd_upar(tfs);\n+\ttf_rm_rsvd_sp_tcam(tfs);\n \ttf_rm_rsvd_l2_func(tfs);\n+\ttf_rm_rsvd_fkb(tfs);\n+\ttf_rm_rsvd_tbl_scope(tfs);\n+\ttf_rm_rsvd_epoch(tfs);\n+\ttf_rm_rsvd_metadata(tfs);\n+\ttf_rm_rsvd_ct_state(tfs);\n+\ttf_rm_rsvd_range(tfs);\n+\ttf_rm_rsvd_lag_entry(tfs);\n }\n \n /**\n@@ -972,6 +1885,7 @@ tf_rm_allocate_validate_hw(struct tf *tfp,\n \t\t\t\"%s, HW QCAPS validation failed, error_flag:0x%x\\n\",\n \t\t\ttf_dir_2_str(dir),\n \t\t\terror_flag);\n+\t\ttf_rm_print_hw_qcaps_error(dir, &hw_query, &error_flag);\n \t\tgoto cleanup;\n \t}\n \n@@ -1032,65 +1946,388 @@ tf_rm_allocate_validate_sram(struct tf *tfp,\n \tstruct tf_rm_entry *sram_entries;\n \tuint32_t error_flag;\n \n-\tif (dir == TF_DIR_RX)\n-\t\tsram_entries = tfs->resc.rx.sram_entry;\n-\telse\n-\t\tsram_entries = tfs->resc.tx.sram_entry;\n+\tif (dir == TF_DIR_RX)\n+\t\tsram_entries = tfs->resc.rx.sram_entry;\n+\telse\n+\t\tsram_entries = tfs->resc.tx.sram_entry;\n+\n+\t/* Query for Session SRAM Resources */\n+\trc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"%s, SRAM qcaps message send failed\\n\",\n+\t\t\t    tf_dir_2_str(dir));\n+\t\tgoto cleanup;\n+\t}\n+\n+\trc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"%s, SRAM QCAPS validation failed, error_flag:%x\\n\",\n+\t\t\ttf_dir_2_str(dir),\n+\t\t\terror_flag);\n+\t\ttf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);\n+\t\tgoto cleanup;\n+\t}\n+\n+\t/* Post process SRAM capability */\n+\tfor (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)\n+\t\tsram_alloc.sram_num[i] = sram_query.sram_query[i].max;\n+\n+\t/* Allocate Session SRAM Resources */\n+\trc = tf_msg_session_sram_resc_alloc(tfp,\n+\t\t\t\t\t    dir,\n+\t\t\t\t\t    &sram_alloc,\n+\t\t\t\t\t    sram_entries);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"%s, SRAM alloc message send failed\\n\",\n+\t\t\t    tf_dir_2_str(dir));\n+\t\tgoto cleanup;\n+\t}\n+\n+\t/* Perform SRAM allocation validation as its possible the\n+\t * resource availability changed between qcaps and alloc\n+\t */\n+\trc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);\n+\tif (rc) {\n+\t\t/* Log error */\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"%s, SRAM Resource allocation validation failed\\n\",\n+\t\t\t    tf_dir_2_str(dir));\n+\t\tgoto cleanup;\n+\t}\n+\n+\treturn 0;\n+\n+ cleanup:\n+\treturn -1;\n+}\n+\n+/**\n+ * Helper function used to prune a HW resource array to only hold\n+ * elements that needs to be flushed.\n+ *\n+ * [in] tfs\n+ *   Session handle\n+ *\n+ * [in] dir\n+ *   Receive or transmit direction\n+ *\n+ * [in] hw_entries\n+ *   Master HW Resource database\n+ *\n+ * [in/out] flush_entries\n+ *   Pruned HW Resource database of entries to be flushed. This\n+ *   array should be passed in as a complete copy of the master HW\n+ *   Resource database. The outgoing result will be a pruned version\n+ *   based on the result of the requested checking\n+ *\n+ * Returns:\n+ *    0 - Success, no flush required\n+ *    1 - Success, flush required\n+ *   -1 - Internal error\n+ */\n+static int\n+tf_rm_hw_to_flush(struct tf_session *tfs,\n+\t\t  enum tf_dir dir,\n+\t\t  struct tf_rm_entry *hw_entries,\n+\t\t  struct tf_rm_entry *flush_entries)\n+{\n+\tint rc;\n+\tint flush_rc = 0;\n+\tint free_cnt;\n+\tstruct bitalloc *pool;\n+\n+\t/* Check all the hw resource pools and check for left over\n+\t * elements. Any found will result in the complete pool of a\n+\t * type to get invalidated.\n+\t */\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_L2_CTXT_TCAM_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_L2_CTXT_TCAM].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_PROF_FUNC_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_PROF_FUNC].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_PROF_FUNC].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_PROF_TCAM_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_PROF_TCAM].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_PROF_TCAM].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_EM_PROF_ID_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_EM_PROF_ID].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tflush_entries[TF_RESC_TYPE_HW_EM_REC].start = 0;\n+\tflush_entries[TF_RESC_TYPE_HW_EM_REC].stride = 0;\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_WC_TCAM_PROF_ID_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_WC_TCAM_PROF_ID].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_WC_TCAM_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_WC_TCAM].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_WC_TCAM].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_WC_TCAM].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_METER_PROF_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_PROF].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_METER_PROF].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_METER_PROF].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_METER_INST_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_METER_INST].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_METER_INST].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_METER_INST].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_MIRROR_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_MIRROR].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_MIRROR].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_MIRROR].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_UPAR_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_UPAR].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_UPAR].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_UPAR].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_SP_TCAM_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_SP_TCAM].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_SP_TCAM].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_SP_TCAM].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_L2_FUNC_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_L2_FUNC].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_L2_FUNC].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_L2_FUNC].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_FKB_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_FKB].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_FKB].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_FKB].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n \n-\t/* Query for Session SRAM Resources */\n-\trc = tf_msg_session_sram_resc_qcaps(tfp, dir, &sram_query);\n-\tif (rc) {\n-\t\t/* Log error */\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t    \"%s, SRAM qcaps message send failed\\n\",\n-\t\t\t    tf_dir_2_str(dir));\n-\t\tgoto cleanup;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_TBL_SCOPE_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride = 0;\n+\t} else {\n+\t\tPMD_DRV_LOG(ERR, \"%s: TBL_SCOPE free_cnt:%d, entries:%d\\n\",\n+\t\t\t    tf_dir_2_str(dir),\n+\t\t\t    free_cnt,\n+\t\t\t    hw_entries[TF_RESC_TYPE_HW_TBL_SCOPE].stride);\n+\t\tflush_rc = 1;\n \t}\n \n-\trc = tf_rm_check_sram_qcaps_static(&sram_query, dir, &error_flag);\n-\tif (rc) {\n-\t\t/* Log error */\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\"%s, SRAM QCAPS validation failed, error_flag:%x\\n\",\n-\t\t\ttf_dir_2_str(dir),\n-\t\t\terror_flag);\n-\t\ttf_rm_print_sram_qcaps_error(dir, &sram_query, &error_flag);\n-\t\tgoto cleanup;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_EPOCH0_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH0].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_EPOCH0].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_EPOCH0].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n \t}\n \n-\t/* Post process SRAM capability */\n-\tfor (i = 0; i < TF_RESC_TYPE_SRAM_MAX; i++)\n-\t\tsram_alloc.sram_num[i] = sram_query.sram_query[i].max;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_EPOCH1_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_EPOCH1].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_EPOCH1].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_EPOCH1].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n \n-\t/* Allocate Session SRAM Resources */\n-\trc = tf_msg_session_sram_resc_alloc(tfp,\n-\t\t\t\t\t    dir,\n-\t\t\t\t\t    &sram_alloc,\n-\t\t\t\t\t    sram_entries);\n-\tif (rc) {\n-\t\t/* Log error */\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t    \"%s, SRAM alloc message send failed\\n\",\n-\t\t\t    tf_dir_2_str(dir));\n-\t\tgoto cleanup;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_METADATA_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_METADATA].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_METADATA].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_METADATA].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n \t}\n \n-\t/* Perform SRAM allocation validation as its possible the\n-\t * resource availability changed between qcaps and alloc\n-\t */\n-\trc = tf_rm_sram_alloc_validate(dir, &sram_alloc, sram_entries);\n-\tif (rc) {\n-\t\t/* Log error */\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t    \"%s, SRAM Resource allocation validation failed\\n\",\n-\t\t\t    tf_dir_2_str(dir));\n-\t\tgoto cleanup;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_CT_STATE_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_CT_STATE].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_CT_STATE].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_CT_STATE].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n \t}\n \n-\treturn 0;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_RANGE_PROF_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_RANGE_PROF].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_RANGE_PROF].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n \n- cleanup:\n-\treturn -1;\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_RANGE_ENTRY_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_RANGE_ENTRY].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\tTF_RM_GET_POOLS(tfs, dir, &pool,\n+\t\t\tTF_LAG_ENTRY_POOL_NAME,\n+\t\t\trc);\n+\tif (rc)\n+\t\treturn rc;\n+\tfree_cnt = ba_free_count(pool);\n+\tif (free_cnt == hw_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride) {\n+\t\tflush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].start = 0;\n+\t\tflush_entries[TF_RESC_TYPE_HW_LAG_ENTRY].stride = 0;\n+\t} else {\n+\t\tflush_rc = 1;\n+\t}\n+\n+\treturn flush_rc;\n }\n \n /**\n@@ -1335,6 +2572,32 @@ tf_rm_sram_to_flush(struct tf_session *tfs,\n }\n \n /**\n+ * Helper function used to generate an error log for the HW types that\n+ * needs to be flushed. The types should have been cleaned up ahead of\n+ * invoking tf_close_session.\n+ *\n+ * [in] hw_entries\n+ *   HW Resource database holding elements to be flushed\n+ */\n+static void\n+tf_rm_log_hw_flush(enum tf_dir dir,\n+\t\t   struct tf_rm_entry *hw_entries)\n+{\n+\tint i;\n+\n+\t/* Walk the hw flush array and log the types that wasn't\n+\t * cleaned up.\n+\t */\n+\tfor (i = 0; i < TF_RESC_TYPE_HW_MAX; i++) {\n+\t\tif (hw_entries[i].stride != 0)\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"%s: %s was not cleaned up\\n\",\n+\t\t\t\t    tf_dir_2_str(dir),\n+\t\t\t\t    tf_hcapi_hw_2_str(i));\n+\t}\n+}\n+\n+/**\n  * Helper function used to generate an error log for the SRAM types\n  * that needs to be flushed. The types should have been cleaned up\n  * ahead of invoking tf_close_session.\n@@ -1386,6 +2649,53 @@ tf_rm_init(struct tf *tfp __rte_unused)\n \t/* Initialization of HW Resource Pools */\n \tba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_RX, TF_NUM_L2_CTXT_TCAM);\n \tba_init(tfs->TF_L2_CTXT_TCAM_POOL_NAME_TX, TF_NUM_L2_CTXT_TCAM);\n+\tba_init(tfs->TF_PROF_FUNC_POOL_NAME_RX, TF_NUM_PROF_FUNC);\n+\tba_init(tfs->TF_PROF_FUNC_POOL_NAME_TX, TF_NUM_PROF_FUNC);\n+\tba_init(tfs->TF_PROF_TCAM_POOL_NAME_RX, TF_NUM_PROF_TCAM);\n+\tba_init(tfs->TF_PROF_TCAM_POOL_NAME_TX, TF_NUM_PROF_TCAM);\n+\tba_init(tfs->TF_EM_PROF_ID_POOL_NAME_RX, TF_NUM_EM_PROF_ID);\n+\tba_init(tfs->TF_EM_PROF_ID_POOL_NAME_TX, TF_NUM_EM_PROF_ID);\n+\n+\t/* TBD, how do we want to handle EM records ?*/\n+\t/* EM Records should not be controlled by way of a pool */\n+\n+\tba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_RX, TF_NUM_WC_PROF_ID);\n+\tba_init(tfs->TF_WC_TCAM_PROF_ID_POOL_NAME_TX, TF_NUM_WC_PROF_ID);\n+\tba_init(tfs->TF_WC_TCAM_POOL_NAME_RX, TF_NUM_WC_TCAM_ROW);\n+\tba_init(tfs->TF_WC_TCAM_POOL_NAME_TX, TF_NUM_WC_TCAM_ROW);\n+\tba_init(tfs->TF_METER_PROF_POOL_NAME_RX, TF_NUM_METER_PROF);\n+\tba_init(tfs->TF_METER_PROF_POOL_NAME_TX, TF_NUM_METER_PROF);\n+\tba_init(tfs->TF_METER_INST_POOL_NAME_RX, TF_NUM_METER);\n+\tba_init(tfs->TF_METER_INST_POOL_NAME_TX, TF_NUM_METER);\n+\tba_init(tfs->TF_MIRROR_POOL_NAME_RX, TF_NUM_MIRROR);\n+\tba_init(tfs->TF_MIRROR_POOL_NAME_TX, TF_NUM_MIRROR);\n+\tba_init(tfs->TF_UPAR_POOL_NAME_RX, TF_NUM_UPAR);\n+\tba_init(tfs->TF_UPAR_POOL_NAME_TX, TF_NUM_UPAR);\n+\n+\tba_init(tfs->TF_SP_TCAM_POOL_NAME_RX, TF_NUM_SP_TCAM);\n+\tba_init(tfs->TF_SP_TCAM_POOL_NAME_TX, TF_NUM_SP_TCAM);\n+\n+\tba_init(tfs->TF_FKB_POOL_NAME_RX, TF_NUM_FKB);\n+\tba_init(tfs->TF_FKB_POOL_NAME_TX, TF_NUM_FKB);\n+\n+\tba_init(tfs->TF_TBL_SCOPE_POOL_NAME_RX, TF_NUM_TBL_SCOPE);\n+\tba_init(tfs->TF_TBL_SCOPE_POOL_NAME_TX, TF_NUM_TBL_SCOPE);\n+\tba_init(tfs->TF_L2_FUNC_POOL_NAME_RX, TF_NUM_L2_FUNC);\n+\tba_init(tfs->TF_L2_FUNC_POOL_NAME_TX, TF_NUM_L2_FUNC);\n+\tba_init(tfs->TF_EPOCH0_POOL_NAME_RX, TF_NUM_EPOCH0);\n+\tba_init(tfs->TF_EPOCH0_POOL_NAME_TX, TF_NUM_EPOCH0);\n+\tba_init(tfs->TF_EPOCH1_POOL_NAME_RX, TF_NUM_EPOCH1);\n+\tba_init(tfs->TF_EPOCH1_POOL_NAME_TX, TF_NUM_EPOCH1);\n+\tba_init(tfs->TF_METADATA_POOL_NAME_RX, TF_NUM_METADATA);\n+\tba_init(tfs->TF_METADATA_POOL_NAME_TX, TF_NUM_METADATA);\n+\tba_init(tfs->TF_CT_STATE_POOL_NAME_RX, TF_NUM_CT_STATE);\n+\tba_init(tfs->TF_CT_STATE_POOL_NAME_TX, TF_NUM_CT_STATE);\n+\tba_init(tfs->TF_RANGE_PROF_POOL_NAME_RX, TF_NUM_RANGE_PROF);\n+\tba_init(tfs->TF_RANGE_PROF_POOL_NAME_TX, TF_NUM_RANGE_PROF);\n+\tba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_RX, TF_NUM_RANGE_ENTRY);\n+\tba_init(tfs->TF_RANGE_ENTRY_POOL_NAME_TX, TF_NUM_RANGE_ENTRY);\n+\tba_init(tfs->TF_LAG_ENTRY_POOL_NAME_RX, TF_NUM_LAG_ENTRY);\n+\tba_init(tfs->TF_LAG_ENTRY_POOL_NAME_TX, TF_NUM_LAG_ENTRY);\n \n \t/* Initialization of SRAM Resource Pools\n \t * These pools are set to the TFLIB defined MAX sizes not\n@@ -1476,6 +2786,7 @@ tf_rm_close(struct tf *tfp)\n \tint rc_close = 0;\n \tint i;\n \tstruct tf_rm_entry *hw_entries;\n+\tstruct tf_rm_entry *hw_flush_entries;\n \tstruct tf_rm_entry *sram_entries;\n \tstruct tf_rm_entry *sram_flush_entries;\n \tstruct tf_session *tfs __rte_unused =\n@@ -1501,14 +2812,41 @@ tf_rm_close(struct tf *tfp)\n \tfor (i = 0; i < TF_DIR_MAX; i++) {\n \t\tif (i == TF_DIR_RX) {\n \t\t\thw_entries = tfs->resc.rx.hw_entry;\n+\t\t\thw_flush_entries = flush_resc.rx.hw_entry;\n \t\t\tsram_entries = tfs->resc.rx.sram_entry;\n \t\t\tsram_flush_entries = flush_resc.rx.sram_entry;\n \t\t} else {\n \t\t\thw_entries = tfs->resc.tx.hw_entry;\n+\t\t\thw_flush_entries = flush_resc.tx.hw_entry;\n \t\t\tsram_entries = tfs->resc.tx.sram_entry;\n \t\t\tsram_flush_entries = flush_resc.tx.sram_entry;\n \t\t}\n \n+\t\t/* Check for any not previously freed HW resources and\n+\t\t * flush if required.\n+\t\t */\n+\t\trc = tf_rm_hw_to_flush(tfs, i, hw_entries, hw_flush_entries);\n+\t\tif (rc) {\n+\t\t\trc_close = -ENOTEMPTY;\n+\t\t\t/* Log error */\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"%s, lingering HW resources\\n\",\n+\t\t\t\t    tf_dir_2_str(i));\n+\n+\t\t\t/* Log the entries to be flushed */\n+\t\t\ttf_rm_log_hw_flush(i, hw_flush_entries);\n+\t\t\trc = tf_msg_session_hw_resc_flush(tfp,\n+\t\t\t\t\t\t\t  i,\n+\t\t\t\t\t\t\t  hw_flush_entries);\n+\t\t\tif (rc) {\n+\t\t\t\trc_close = rc;\n+\t\t\t\t/* Log error */\n+\t\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\t    \"%s, HW flush failed\\n\",\n+\t\t\t\t\t    tf_dir_2_str(i));\n+\t\t\t}\n+\t\t}\n+\n \t\t/* Check for any not previously freed SRAM resources\n \t\t * and flush if required.\n \t\t */\n@@ -1560,6 +2898,234 @@ tf_rm_close(struct tf *tfp)\n \treturn rc_close;\n }\n \n+#if (TF_SHADOW == 1)\n+int\n+tf_rm_shadow_db_init(struct tf_session *tfs)\n+{\n+\trc = 1;\n+\n+\treturn rc;\n+}\n+#endif /* TF_SHADOW */\n+\n+int\n+tf_rm_lookup_tcam_type_pool(struct tf_session *tfs,\n+\t\t\t    enum tf_dir dir,\n+\t\t\t    enum tf_tcam_tbl_type type,\n+\t\t\t    struct bitalloc **pool)\n+{\n+\tint rc = -EOPNOTSUPP;\n+\n+\t*pool = NULL;\n+\n+\tswitch (type) {\n+\tcase TF_TCAM_TBL_TYPE_L2_CTXT_TCAM:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_L2_CTXT_TCAM_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TCAM_TBL_TYPE_PROF_TCAM:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_PROF_TCAM_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TCAM_TBL_TYPE_WC_TCAM:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_WC_TCAM_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TCAM_TBL_TYPE_VEB_TCAM:\n+\tcase TF_TCAM_TBL_TYPE_SP_TCAM:\n+\tcase TF_TCAM_TBL_TYPE_CT_RULE_TCAM:\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tif (rc == -EOPNOTSUPP) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Tcam type not supported, type:%d\\n\",\n+\t\t\t    dir,\n+\t\t\t    type);\n+\t\treturn rc;\n+\t} else if (rc == -1) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"%s:, Tcam type lookup failed, type:%d\\n\",\n+\t\t\t    tf_dir_2_str(dir),\n+\t\t\t    type);\n+\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+tf_rm_lookup_tbl_type_pool(struct tf_session *tfs,\n+\t\t\t   enum tf_dir dir,\n+\t\t\t   enum tf_tbl_type type,\n+\t\t\t   struct bitalloc **pool)\n+{\n+\tint rc = -EOPNOTSUPP;\n+\n+\t*pool = NULL;\n+\n+\tswitch (type) {\n+\tcase TF_TBL_TYPE_FULL_ACT_RECORD:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_FULL_ACTION_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_MCAST_GROUPS:\n+\t\t/* No pools for TX direction, so bail out */\n+\t\tif (dir == TF_DIR_TX)\n+\t\t\tbreak;\n+\t\tTF_RM_GET_POOLS_RX(tfs, pool,\n+\t\t\t\t   TF_SRAM_MCG_POOL_NAME);\n+\t\trc = 0;\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_ENCAP_8B:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_ENCAP_8B_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_ENCAP_16B:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_ENCAP_16B_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_ENCAP_64B:\n+\t\t/* No pools for RX direction, so bail out */\n+\t\tif (dir == TF_DIR_RX)\n+\t\t\tbreak;\n+\t\tTF_RM_GET_POOLS_TX(tfs, pool,\n+\t\t\t\t   TF_SRAM_ENCAP_64B_POOL_NAME);\n+\t\trc = 0;\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_SP_SMAC_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC_IPV4:\n+\t\t/* No pools for TX direction, so bail out */\n+\t\tif (dir == TF_DIR_RX)\n+\t\t\tbreak;\n+\t\tTF_RM_GET_POOLS_TX(tfs, pool,\n+\t\t\t\t   TF_SRAM_SP_SMAC_IPV4_POOL_NAME);\n+\t\trc = 0;\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_SP_SMAC_IPV6:\n+\t\t/* No pools for TX direction, so bail out */\n+\t\tif (dir == TF_DIR_RX)\n+\t\t\tbreak;\n+\t\tTF_RM_GET_POOLS_TX(tfs, pool,\n+\t\t\t\t   TF_SRAM_SP_SMAC_IPV6_POOL_NAME);\n+\t\trc = 0;\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_STATS_64:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_STATS_64B_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_MODIFY_SPORT:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_NAT_SPORT_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_MODIFY_IPV4_SRC:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_NAT_S_IPV4_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_ACT_MODIFY_IPV4_DEST:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_SRAM_NAT_D_IPV4_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_METER_PROF:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_METER_PROF_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_METER_INST:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_METER_INST_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_MIRROR_CONFIG:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_MIRROR_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_UPAR:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_UPAR_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_EPOCH0:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_EPOCH0_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_EPOCH1:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_EPOCH1_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_METADATA:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_METADATA_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_CT_STATE:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_CT_STATE_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_RANGE_PROF:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_RANGE_PROF_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_RANGE_ENTRY:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_RANGE_ENTRY_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\tcase TF_TBL_TYPE_LAG:\n+\t\tTF_RM_GET_POOLS(tfs, dir, pool,\n+\t\t\t\tTF_LAG_ENTRY_POOL_NAME,\n+\t\t\t\trc);\n+\t\tbreak;\n+\t/* Not yet supported */\n+\tcase TF_TBL_TYPE_ACT_ENCAP_32B:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_IPV6_DEST:\n+\tcase TF_TBL_TYPE_ACT_MODIFY_IPV6_SRC:\n+\tcase TF_TBL_TYPE_VNIC_SVIF:\n+\t\tbreak;\n+\t/* No bitalloc pools for these types */\n+\tcase TF_TBL_TYPE_EXT:\n+\tcase TF_TBL_TYPE_EXT_0:\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\tif (rc == -EOPNOTSUPP) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Table type not supported, type:%d\\n\",\n+\t\t\t    dir,\n+\t\t\t    type);\n+\t\treturn rc;\n+\t} else if (rc == -1) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"dir:%d, Table type lookup failed, type:%d\\n\",\n+\t\t\t    dir,\n+\t\t\t    type);\n+\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\n+\n int\n tf_rm_convert_tbl_type(enum tf_tbl_type type,\n \t\t       uint32_t *hcapi_type)\ndiff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h\nindex 34b6c41..fed34f1 100644\n--- a/drivers/net/bnxt/tf_core/tf_session.h\n+++ b/drivers/net/bnxt/tf_core/tf_session.h\n@@ -12,6 +12,7 @@\n #include \"bitalloc.h\"\n #include \"tf_core.h\"\n #include \"tf_rm.h\"\n+#include \"tf_tbl.h\"\n \n /** Session defines\n  */\n@@ -285,6 +286,15 @@ struct tf_session {\n \n \t/** Lookup3 init values */\n \tuint32_t lkup_lkup3_init_cfg[TF_DIR_MAX];\n+\n+\t/** Table scope array */\n+\tstruct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];\n+\n+\t/** Each external pool is associated with a single table scope\n+\t *  For each external pool store the associated table scope in\n+\t *  this data structure\n+\t */\n+\tuint32_t ext_pool_2_scope[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];\n };\n \n #endif /* _TF_SESSION_H_ */\ndiff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h\nnew file mode 100644\nindex 0000000..5a5e72f\n--- /dev/null\n+++ b/drivers/net/bnxt/tf_core/tf_tbl.h\n@@ -0,0 +1,43 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Broadcom\n+ * All rights reserved.\n+ */\n+\n+#ifndef _TF_TBL_H_\n+#define _TF_TBL_H_\n+\n+#include <stdint.h>\n+\n+enum tf_pg_tbl_lvl {\n+\tPT_LVL_0,\n+\tPT_LVL_1,\n+\tPT_LVL_2,\n+\tPT_LVL_MAX\n+};\n+\n+/** Invalid table scope id */\n+#define TF_TBL_SCOPE_INVALID 0xffffffff\n+\n+/**\n+ * Table Scope Control Block\n+ *\n+ * Holds private data for a table scope. Only one instance of a table\n+ * scope with Internal EM is supported.\n+ */\n+struct tf_tbl_scope_cb {\n+\tuint32_t tbl_scope_id;\n+\tint index;\n+\tuint32_t              *ext_pool_mem[TF_DIR_MAX][TF_EXT_POOL_CNT_MAX];\n+};\n+\n+/**\n+ * Initialize table pool structure to indicate\n+ * no table scope has been associated with the\n+ * external pool of indexes.\n+ *\n+ * [in] session\n+ */\n+void\n+tf_init_tbl_pool(struct tf_session *session);\n+\n+#endif /* _TF_TBL_H_ */\n",
    "prefixes": [
        "v3",
        "08/34"
    ]
}