get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54126/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54126,
    "url": "http://patches.dpdk.org/api/patches/54126/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190602174247.32368-10-lance.richardson@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190602174247.32368-10-lance.richardson@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190602174247.32368-10-lance.richardson@broadcom.com",
    "date": "2019-06-02T17:42:44",
    "name": "[09/11] net/bnxt: add support for thor controller",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "769014af7738d9b0fdc989d1a4c149d94a4b6069",
    "submitter": {
        "id": 1323,
        "url": "http://patches.dpdk.org/api/people/1323/?format=api",
        "name": "Lance Richardson",
        "email": "lance.richardson@broadcom.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190602174247.32368-10-lance.richardson@broadcom.com/mbox/",
    "series": [
        {
            "id": 4850,
            "url": "http://patches.dpdk.org/api/series/4850/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4850",
            "date": "2019-06-02T17:42:35",
            "name": "add support for BCM57508 controller",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4850/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54126/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/54126/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id DE11F1B9B6;\n\tSun,  2 Jun 2019 19:43:26 +0200 (CEST)",
            "from mail-yb1-f195.google.com (mail-yb1-f195.google.com\n\t[209.85.219.195]) by dpdk.org (Postfix) with ESMTP id 5B9D31B94B\n\tfor <dev@dpdk.org>; Sun,  2 Jun 2019 19:43:13 +0200 (CEST)",
            "by mail-yb1-f195.google.com with SMTP id d2so5732085ybh.8\n\tfor <dev@dpdk.org>; Sun, 02 Jun 2019 10:43:13 -0700 (PDT)",
            "from lrichardson-VirtualBox.attlocal.net\n\t(69-218-223-106.lightspeed.rlghnc.sbcglobal.net. [69.218.223.106])\n\tby smtp.gmail.com with ESMTPSA id\n\tn78sm1000819ywd.2.2019.06.02.10.43.10\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tSun, 02 Jun 2019 10:43:10 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n\ts=google; \n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=0eLM4GerkKDSvom3yOSPJWXEh5lX9zidxfc3oltSHzI=;\n\tb=Y91+qasdNucoJILq+nduwmZaGO3IoCGsY4nq19AQmyz3PmTJEl6OH01gvDvKKUxjjn\n\tz+jSjpfrUTSVsEMJZlo42KhejmTKS5m+aMOXMtv/UxdcyfOdPEUOJWfhfk5Ed/R526e6\n\tHNvluAZ9GIaWFda/9tea1UGE7oNlP0LfnLM0A=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=0eLM4GerkKDSvom3yOSPJWXEh5lX9zidxfc3oltSHzI=;\n\tb=RqppPuGLDXHmQERgDqg5YiRD0t3y8PivnIOMaOPB/W6IjIBIW5wcL+LFxktJxE2o0q\n\tx1LEFacZqSiWoXz2QRHLBxJ8tFgPgaAYkbJwYDZWXW8Zq0y0n5GLik+6URFooMFpWIkc\n\t3HivGATlp3DmbPH/kDlnJr6WAaaM6kbRYh87dpXXJ3OB+DdkNHzXeEh1iwNKi5bVZKTI\n\tlfOHh5ysQmn+SIQva6y6Q7aaUrMY2fLkfBPGRYMte/HxZF7O8dnkQ3nmhJuR1lXFdPOZ\n\toBlCG9Kwqdf5Vb+Zz/vCljf89oV7Ks6G7F79+4yZL9eiP/htwqmYIDOYMyipT7/dVrS5\n\tr67g==",
        "X-Gm-Message-State": "APjAAAVRlqRzcNvvnpmp/DOLdVAAS7Z2MUqZsHNycuEJVGvomxPG2TNC\n\tNzmuaddr18b7c8Jc+ytYXBY8zjJ+0csc/5tatrdIwgF1vWOBFadeYYVrsIR+1TlL2g1UwjOdpTm\n\tTuBuRlX9uxpikiGMJ5vdCHFyts7ktyr8A5TBO0/pmdUHvvz7JtH6dMPISPZaCJRSP",
        "X-Google-Smtp-Source": "APXvYqzaNPN0bzAwMqrzvcWjFYRdIPSoL567jcIIZDKaXnw3OB1IK3dl18hDEFt6gjwzGJlet8jdRw==",
        "X-Received": "by 2002:a25:3a44:: with SMTP id\n\th65mr10606355yba.449.1559497391240; \n\tSun, 02 Jun 2019 10:43:11 -0700 (PDT)",
        "From": "Lance Richardson <lance.richardson@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "ajit.khaparde@broadcom.com, ferruh.yigit@intel.com,\n\tLance Richardson <lance.richardson@broadcom.com>,\n\tKalesh AP <kalesh-anakkur.purayil@broadcom.com>",
        "Date": "Sun,  2 Jun 2019 13:42:44 -0400",
        "Message-Id": "<20190602174247.32368-10-lance.richardson@broadcom.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190602174247.32368-1-lance.richardson@broadcom.com>",
        "References": "<20190602174247.32368-1-lance.richardson@broadcom.com>",
        "Subject": "[dpdk-dev] [PATCH 09/11] net/bnxt: add support for thor controller",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit adds support to the bnxt PMD for devices\nbased on the BCM57508 \"thor\" Ethernet controller.\n\nSigned-off-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>\nSigned-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>\nSigned-off-by: Lance Richardson <lance.richardson@broadcom.com>\n---\n drivers/net/bnxt/bnxt.h        |  98 +++++++-\n drivers/net/bnxt/bnxt_cpr.h    |  12 +-\n drivers/net/bnxt/bnxt_ethdev.c | 323 +++++++++++++++++++++++---\n drivers/net/bnxt/bnxt_hwrm.c   | 404 ++++++++++++++++++++++++++++++---\n drivers/net/bnxt/bnxt_hwrm.h   |  10 +\n drivers/net/bnxt/bnxt_ring.c   | 183 ++++++++++++---\n drivers/net/bnxt/bnxt_ring.h   |  26 ++-\n drivers/net/bnxt/bnxt_rxq.c    |  16 +-\n drivers/net/bnxt/bnxt_rxq.h    |   1 +\n drivers/net/bnxt/bnxt_rxr.c    |  27 +++\n drivers/net/bnxt/bnxt_txq.c    |   2 +-\n drivers/net/bnxt/bnxt_txq.h    |   1 +\n drivers/net/bnxt/bnxt_txr.c    |  25 ++\n 13 files changed, 1028 insertions(+), 100 deletions(-)",
    "diff": "diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h\nindex 9bb8d825d..641790fef 100644\n--- a/drivers/net/bnxt/bnxt.h\n+++ b/drivers/net/bnxt/bnxt.h\n@@ -239,6 +239,93 @@ struct bnxt_coal {\n \tuint16_t\t\t\tcmpl_aggr_dma_tmr_during_int;\n };\n \n+/* 64-bit doorbell */\n+#define DBR_XID_SFT\t\t\t\t32\n+#define DBR_PATH_L2\t\t\t\t(0x1ULL << 56)\n+#define DBR_TYPE_SQ\t\t\t\t(0x0ULL << 60)\n+#define DBR_TYPE_SRQ\t\t\t\t(0x2ULL << 60)\n+#define DBR_TYPE_CQ\t\t\t\t(0x4ULL << 60)\n+#define DBR_TYPE_NQ\t\t\t\t(0xaULL << 60)\n+\n+#define BNXT_RSS_TBL_SIZE_THOR\t\t512\n+#define BNXT_RSS_ENTRIES_PER_CTX_THOR\t64\n+#define BNXT_MAX_RSS_CTXTS_THOR \\\n+\t(BNXT_RSS_TBL_SIZE_THOR / BNXT_RSS_ENTRIES_PER_CTX_THOR)\n+\n+#define BNXT_MAX_TC    8\n+#define BNXT_MAX_QUEUE 8\n+#define BNXT_MAX_TC_Q  (BNXT_MAX_TC + 1)\n+#define BNXT_MAX_Q     (bp->max_q + 1)\n+#define BNXT_PAGE_SHFT 12\n+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHFT)\n+#define MAX_CTX_PAGES  (BNXT_PAGE_SIZE / 8)\n+\n+#define PTU_PTE_VALID             0x1UL\n+#define PTU_PTE_LAST              0x2UL\n+#define PTU_PTE_NEXT_TO_LAST      0x4UL\n+\n+struct bnxt_ring_mem_info {\n+\tint\t\t\t\tnr_pages;\n+\tint\t\t\t\tpage_size;\n+\tuint32_t\t\t\tflags;\n+#define BNXT_RMEM_VALID_PTE_FLAG\t1\n+#define BNXT_RMEM_RING_PTE_FLAG\t\t2\n+\n+\tvoid\t\t\t\t**pg_arr;\n+\trte_iova_t\t\t\t*dma_arr;\n+\tconst struct rte_memzone\t*mz;\n+\n+\tuint64_t\t\t\t*pg_tbl;\n+\trte_iova_t\t\t\tpg_tbl_map;\n+\tconst struct rte_memzone\t*pg_tbl_mz;\n+\n+\tint\t\t\t\tvmem_size;\n+\tvoid\t\t\t\t**vmem;\n+};\n+\n+struct bnxt_ctx_pg_info {\n+\tuint32_t\tentries;\n+\tvoid\t\t*ctx_pg_arr[MAX_CTX_PAGES];\n+\trte_iova_t\tctx_dma_arr[MAX_CTX_PAGES];\n+\tstruct bnxt_ring_mem_info ring_mem;\n+};\n+\n+struct bnxt_ctx_mem_info {\n+\tuint32_t        qp_max_entries;\n+\tuint16_t        qp_min_qp1_entries;\n+\tuint16_t        qp_max_l2_entries;\n+\tuint16_t        qp_entry_size;\n+\tuint16_t        srq_max_l2_entries;\n+\tuint32_t        srq_max_entries;\n+\tuint16_t        srq_entry_size;\n+\tuint16_t        cq_max_l2_entries;\n+\tuint32_t        cq_max_entries;\n+\tuint16_t        cq_entry_size;\n+\tuint16_t        vnic_max_vnic_entries;\n+\tuint16_t        vnic_max_ring_table_entries;\n+\tuint16_t        vnic_entry_size;\n+\tuint32_t        stat_max_entries;\n+\tuint16_t        stat_entry_size;\n+\tuint16_t        tqm_entry_size;\n+\tuint32_t        tqm_min_entries_per_ring;\n+\tuint32_t        tqm_max_entries_per_ring;\n+\tuint32_t        mrav_max_entries;\n+\tuint16_t        mrav_entry_size;\n+\tuint16_t        tim_entry_size;\n+\tuint32_t        tim_max_entries;\n+\tuint8_t         tqm_entries_multiple;\n+\n+\tuint32_t        flags;\n+#define BNXT_CTX_FLAG_INITED    0x01\n+\n+\tstruct bnxt_ctx_pg_info qp_mem;\n+\tstruct bnxt_ctx_pg_info srq_mem;\n+\tstruct bnxt_ctx_pg_info cq_mem;\n+\tstruct bnxt_ctx_pg_info vnic_mem;\n+\tstruct bnxt_ctx_pg_info stat_mem;\n+\tstruct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TC_Q];\n+};\n+\n #define BNXT_HWRM_SHORT_REQ_LEN\t\tsizeof(struct hwrm_short_input)\n struct bnxt {\n \tvoid\t\t\t\t*bar0;\n@@ -262,6 +349,7 @@ struct bnxt {\n #define BNXT_FLAG_KONG_MB_EN\t(1 << 10)\n #define BNXT_FLAG_TRUSTED_VF_EN\t(1 << 11)\n #define BNXT_FLAG_DFLT_VNIC_SET\t(1 << 12)\n+#define BNXT_FLAG_THOR_CHIP\t(1 << 13)\n #define BNXT_FLAG_NEW_RM\t(1 << 30)\n #define BNXT_FLAG_INIT_DONE\t(1U << 31)\n #define BNXT_PF(bp)\t\t(!((bp)->flags & BNXT_FLAG_VF))\n@@ -272,6 +360,9 @@ struct bnxt {\n #define BNXT_USE_CHIMP_MB\t0 //For non-CFA commands, everything uses Chimp.\n #define BNXT_USE_KONG(bp)\t((bp)->flags & BNXT_FLAG_KONG_MB_EN)\n #define BNXT_VF_IS_TRUSTED(bp)\t((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)\n+#define BNXT_CHIP_THOR(bp)\t((bp)->flags & BNXT_FLAG_THOR_CHIP)\n+#define BNXT_HAS_NQ(bp)\t\tBNXT_CHIP_THOR(bp)\n+#define BNXT_HAS_RING_GRPS(bp)\t(!BNXT_CHIP_THOR(bp))\n \n \tunsigned int\t\trx_nr_rings;\n \tunsigned int\t\trx_cp_nr_rings;\n@@ -325,6 +416,9 @@ struct bnxt {\n \tstruct bnxt_link_info\tlink_info;\n \tstruct bnxt_cos_queue_info\tcos_queue[BNXT_COS_QUEUE_COUNT];\n \tuint8_t\t\t\ttx_cosq_id;\n+\tuint8_t                 max_tc;\n+\tuint8_t                 max_lltc;\n+\tuint8_t                 max_q;\n \n \tuint16_t\t\tfw_fid;\n \tuint8_t\t\t\tdflt_mac_addr[RTE_ETHER_ADDR_LEN];\n@@ -332,11 +426,12 @@ struct bnxt {\n \tuint16_t\t\tmax_cp_rings;\n \tuint16_t\t\tmax_tx_rings;\n \tuint16_t\t\tmax_rx_rings;\n+\tuint16_t\t\tmax_nq_rings;\n \tuint16_t\t\tmax_l2_ctx;\n \tuint16_t\t\tmax_vnics;\n \tuint16_t\t\tmax_stat_ctx;\n \tuint16_t\t\tvlan;\n-\tstruct bnxt_pf_info\t\tpf;\n+\tstruct bnxt_pf_info\tpf;\n \tuint8_t\t\t\tport_partition_type;\n \tuint8_t\t\t\tdev_stopped;\n \tuint8_t\t\t\tvxlan_port_cnt;\n@@ -352,6 +447,7 @@ struct bnxt {\n \tuint8_t\t\t\tnum_leds;\n \tstruct bnxt_ptp_cfg     *ptp_cfg;\n \tuint16_t\t\tvf_resv_strategy;\n+\tstruct bnxt_ctx_mem_info        *ctx;\n };\n \n int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);\ndiff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h\nindex 77a22d241..8c6a34b61 100644\n--- a/drivers/net/bnxt/bnxt_cpr.h\n+++ b/drivers/net/bnxt/bnxt_cpr.h\n@@ -19,6 +19,10 @@ struct bnxt_db_info;\n \t(!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) &\t\\\n \t    CMPL_BASE_V) == !(v))\n \n+#define NQ_CMP_VALID(nqcmp, raw_cons, ring)\t\t\\\n+\t(!!((nqcmp)->v & rte_cpu_to_le_32(NQ_CN_V)) ==\t\\\n+\t !((raw_cons) & ((ring)->ring_size)))\n+\n #define CMP_TYPE(cmp)\t\t\t\t\t\t\\\n \t(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)\n \n@@ -70,8 +74,12 @@ struct bnxt_db_info;\n \t\t    ((cpr)->cp_db.doorbell))\n \n struct bnxt_db_info {\n-\tvoid\t\t*doorbell;\n-\tuint32_t\tdb_key32;\n+\tvoid                    *doorbell;\n+\tunion {\n+\t\tuint64_t        db_key64;\n+\t\tuint32_t        db_key32;\n+\t};\n+\tbool                    db_64;\n };\n \n struct bnxt_ring;\ndiff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c\nindex 070262468..d26066062 100644\n--- a/drivers/net/bnxt/bnxt_ethdev.c\n+++ b/drivers/net/bnxt/bnxt_ethdev.c\n@@ -71,6 +71,10 @@ int bnxt_logtype_driver;\n #define BROADCOM_DEV_ID_57407_MF 0x16ea\n #define BROADCOM_DEV_ID_57414_MF 0x16ec\n #define BROADCOM_DEV_ID_57416_MF 0x16ee\n+#define BROADCOM_DEV_ID_57508 0x1750\n+#define BROADCOM_DEV_ID_57504 0x1751\n+#define BROADCOM_DEV_ID_57502 0x1752\n+#define BROADCOM_DEV_ID_57500_VF 0x1807\n #define BROADCOM_DEV_ID_58802 0xd802\n #define BROADCOM_DEV_ID_58804 0xd804\n #define BROADCOM_DEV_ID_58808 0x16f0\n@@ -119,6 +123,10 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {\n \t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },\n \t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },\n \t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },\n+\t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },\n+\t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },\n+\t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },\n+\t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF) },\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n@@ -224,6 +232,12 @@ static int bnxt_init_chip(struct bnxt *bp)\n \t\tbp->flags &= ~BNXT_FLAG_JUMBO;\n \t}\n \n+\t/* THOR does not support ring groups.\n+\t * But we will use the array to save RSS context IDs.\n+\t */\n+\tif (BNXT_CHIP_THOR(bp))\n+\t\tbp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;\n+\n \trc = bnxt_alloc_all_hwrm_stat_ctxs(bp);\n \tif (rc) {\n \t\tPMD_DRV_LOG(ERR, \"HWRM stat ctx alloc failure rc: %x\\n\", rc);\n@@ -317,7 +331,7 @@ static int bnxt_init_chip(struct bnxt *bp)\n \t\t\t\t    \"rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\\n\",\n \t\t\t\t    j, rxq->vnic, rxq->vnic->fw_grp_ids);\n \n-\t\t\tif (rxq->rx_deferred_start)\n+\t\t\tif (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)\n \t\t\t\trxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;\n \t\t}\n \n@@ -573,22 +587,16 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)\n \t    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >\n \t    bp->max_cp_rings ||\n \t    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >\n-\t    bp->max_stat_ctx ||\n-\t    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||\n-\t    (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&\n-\t     bp->max_vnics < eth_dev->data->nb_rx_queues)) {\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\"Insufficient resources to support requested config\\n\");\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\"Num Queues Requested: Tx %d, Rx %d\\n\",\n-\t\t\teth_dev->data->nb_tx_queues,\n-\t\t\teth_dev->data->nb_rx_queues);\n-\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\"MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\\n\",\n-\t\t\tbp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,\n-\t\t\tbp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);\n-\t\treturn -ENOSPC;\n-\t}\n+\t    bp->max_stat_ctx)\n+\t\tgoto resource_error;\n+\n+\tif (BNXT_HAS_RING_GRPS(bp) &&\n+\t    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)\n+\t\tgoto resource_error;\n+\n+\tif (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&\n+\t    bp->max_vnics < eth_dev->data->nb_rx_queues)\n+\t\tgoto resource_error;\n \n \tbp->rx_cp_nr_rings = bp->rx_nr_rings;\n \tbp->tx_cp_nr_rings = bp->tx_nr_rings;\n@@ -601,6 +609,19 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)\n \t\tbnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);\n \t}\n \treturn 0;\n+\n+resource_error:\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"Insufficient resources to support requested config\\n\");\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"Num Queues Requested: Tx %d, Rx %d\\n\",\n+\t\t    eth_dev->data->nb_tx_queues,\n+\t\t    eth_dev->data->nb_rx_queues);\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\\n\",\n+\t\t    bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,\n+\t\t    bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);\n+\treturn -ENOSPC;\n }\n \n static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)\n@@ -3265,7 +3286,8 @@ static bool bnxt_vf_pciid(uint16_t id)\n \t    id == BROADCOM_DEV_ID_57414_VF ||\n \t    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||\n \t    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||\n-\t    id == BROADCOM_DEV_ID_58802_VF)\n+\t    id == BROADCOM_DEV_ID_58802_VF ||\n+\t    id == BROADCOM_DEV_ID_57500_VF)\n \t\treturn true;\n \treturn false;\n }\n@@ -3327,6 +3349,245 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)\n \treturn rc;\n }\n \n+static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,\n+\t\t\t\t  struct bnxt_ctx_pg_info *ctx_pg,\n+\t\t\t\t  uint32_t mem_size,\n+\t\t\t\t  const char *suffix,\n+\t\t\t\t  uint16_t idx)\n+{\n+\tstruct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;\n+\tconst struct rte_memzone *mz = NULL;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\trte_iova_t mz_phys_addr;\n+\tuint64_t valid_bits = 0;\n+\tuint32_t sz;\n+\tint i;\n+\n+\tif (!mem_size)\n+\t\treturn 0;\n+\n+\trmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /\n+\t\t\t BNXT_PAGE_SIZE;\n+\trmem->page_size = BNXT_PAGE_SIZE;\n+\trmem->pg_arr = ctx_pg->ctx_pg_arr;\n+\trmem->dma_arr = ctx_pg->ctx_dma_arr;\n+\trmem->flags = BNXT_RMEM_VALID_PTE_FLAG;\n+\n+\tvalid_bits = PTU_PTE_VALID;\n+\n+\tif (rmem->nr_pages > 1) {\n+\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, \"bnxt_ctx_pg_tbl%s_%x\",\n+\t\t\t suffix, idx);\n+\t\tmz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;\n+\t\tmz = rte_memzone_lookup(mz_name);\n+\t\tif (!mz) {\n+\t\t\tmz = rte_memzone_reserve_aligned(mz_name,\n+\t\t\t\t\t\trmem->nr_pages * 8,\n+\t\t\t\t\t\tSOCKET_ID_ANY,\n+\t\t\t\t\t\tRTE_MEMZONE_2MB |\n+\t\t\t\t\t\tRTE_MEMZONE_SIZE_HINT_ONLY |\n+\t\t\t\t\t\tRTE_MEMZONE_IOVA_CONTIG,\n+\t\t\t\t\t\tBNXT_PAGE_SIZE);\n+\t\t\tif (mz == NULL)\n+\t\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmemset(mz->addr, 0, mz->len);\n+\t\tmz_phys_addr = mz->iova;\n+\t\tif ((unsigned long)mz->addr == mz_phys_addr) {\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t\"Memzone physical address same as virtual.\\n\");\n+\t\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t\t    \"Using rte_mem_virt2iova()\\n\");\n+\t\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n+\t\t\tif (mz_phys_addr == 0) {\n+\t\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\t\"unable to map addr to phys memory\\n\");\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\t\t}\n+\t\trte_mem_lock_page(((char *)mz->addr));\n+\n+\t\trmem->pg_tbl = mz->addr;\n+\t\trmem->pg_tbl_map = mz_phys_addr;\n+\t\trmem->pg_tbl_mz = mz;\n+\t}\n+\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, \"bnxt_ctx_%s_%x\", suffix, idx);\n+\tmz = rte_memzone_lookup(mz_name);\n+\tif (!mz) {\n+\t\tmz = rte_memzone_reserve_aligned(mz_name,\n+\t\t\t\t\t\t mem_size,\n+\t\t\t\t\t\t SOCKET_ID_ANY,\n+\t\t\t\t\t\t RTE_MEMZONE_1GB |\n+\t\t\t\t\t\t RTE_MEMZONE_SIZE_HINT_ONLY |\n+\t\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG,\n+\t\t\t\t\t\t BNXT_PAGE_SIZE);\n+\t\tif (mz == NULL)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\tmemset(mz->addr, 0, mz->len);\n+\tmz_phys_addr = mz->iova;\n+\tif ((unsigned long)mz->addr == mz_phys_addr) {\n+\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t    \"Memzone physical address same as virtual.\\n\");\n+\t\tPMD_DRV_LOG(WARNING,\n+\t\t\t    \"Using rte_mem_virt2iova()\\n\");\n+\t\tfor (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)\n+\t\t\trte_mem_lock_page(((char *)mz->addr) + sz);\n+\t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n+\t\tif (mz_phys_addr == RTE_BAD_IOVA) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"unable to map addr to phys memory\\n\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tfor (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {\n+\t\trte_mem_lock_page(((char *)mz->addr) + sz);\n+\t\trmem->pg_arr[i] = ((char *)mz->addr) + sz;\n+\t\trmem->dma_arr[i] = mz_phys_addr + sz;\n+\n+\t\tif (rmem->nr_pages > 1) {\n+\t\t\tif (i == rmem->nr_pages - 2 &&\n+\t\t\t    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))\n+\t\t\t\tvalid_bits |= PTU_PTE_NEXT_TO_LAST;\n+\t\t\telse if (i == rmem->nr_pages - 1 &&\n+\t\t\t\t (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))\n+\t\t\t\tvalid_bits |= PTU_PTE_LAST;\n+\n+\t\t\trmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |\n+\t\t\t\t\t\t\t   valid_bits);\n+\t\t}\n+\t}\n+\n+\trmem->mz = mz;\n+\tif (rmem->vmem_size)\n+\t\trmem->vmem = (void **)mz->addr;\n+\trmem->dma_arr[0] = mz_phys_addr;\n+\treturn 0;\n+}\n+\n+static void bnxt_free_ctx_mem(struct bnxt *bp)\n+{\n+\tint i;\n+\n+\tif (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))\n+\t\treturn;\n+\n+\tbp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;\n+\trte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);\n+\trte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);\n+\trte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);\n+\trte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);\n+\trte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);\n+\trte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);\n+\trte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);\n+\trte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);\n+\trte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);\n+\trte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);\n+\n+\tfor (i = 0; i < BNXT_MAX_Q; i++) {\n+\t\tif (bp->ctx->tqm_mem[i])\n+\t\t\trte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);\n+\t}\n+\n+\trte_free(bp->ctx);\n+\tbp->ctx = NULL;\n+}\n+\n+#define roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))\n+\n+#define min_t(type, x, y) ({                    \\\n+\ttype __min1 = (x);                      \\\n+\ttype __min2 = (y);                      \\\n+\t__min1 < __min2 ? __min1 : __min2; })\n+\n+#define max_t(type, x, y) ({                    \\\n+\ttype __max1 = (x);                      \\\n+\ttype __max2 = (y);                      \\\n+\t__max1 > __max2 ? __max1 : __max2; })\n+\n+#define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)\n+\n+int bnxt_alloc_ctx_mem(struct bnxt *bp)\n+{\n+\tstruct bnxt_ctx_pg_info *ctx_pg;\n+\tstruct bnxt_ctx_mem_info *ctx;\n+\tuint32_t mem_size, ena, entries;\n+\tint i, rc;\n+\n+\trc = bnxt_hwrm_func_backing_store_qcaps(bp);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"Query context mem capability failed\\n\");\n+\t\treturn rc;\n+\t}\n+\tctx = bp->ctx;\n+\tif (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))\n+\t\treturn 0;\n+\n+\tctx_pg = &ctx->qp_mem;\n+\tctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;\n+\tmem_size = ctx->qp_entry_size * ctx_pg->entries;\n+\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"qp_mem\", 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tctx_pg = &ctx->srq_mem;\n+\tctx_pg->entries = ctx->srq_max_l2_entries;\n+\tmem_size = ctx->srq_entry_size * ctx_pg->entries;\n+\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"srq_mem\", 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tctx_pg = &ctx->cq_mem;\n+\tctx_pg->entries = ctx->cq_max_l2_entries;\n+\tmem_size = ctx->cq_entry_size * ctx_pg->entries;\n+\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"cq_mem\", 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tctx_pg = &ctx->vnic_mem;\n+\tctx_pg->entries = ctx->vnic_max_vnic_entries +\n+\t\tctx->vnic_max_ring_table_entries;\n+\tmem_size = ctx->vnic_entry_size * ctx_pg->entries;\n+\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"vnic_mem\", 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tctx_pg = &ctx->stat_mem;\n+\tctx_pg->entries = ctx->stat_max_entries;\n+\tmem_size = ctx->stat_entry_size * ctx_pg->entries;\n+\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"stat_mem\", 0);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tentries = ctx->qp_max_l2_entries;\n+\tentries = roundup(entries, ctx->tqm_entries_multiple);\n+\tentries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,\n+\t\t\t  ctx->tqm_max_entries_per_ring);\n+\tfor (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {\n+\t\tctx_pg = ctx->tqm_mem[i];\n+\t\t/* use min tqm entries for now. */\n+\t\tctx_pg->entries = entries;\n+\t\tmem_size = ctx->tqm_entry_size * ctx_pg->entries;\n+\t\trc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, \"tqm_mem\", i);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t\tena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;\n+\t}\n+\n+\tena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;\n+\trc = bnxt_hwrm_func_backing_store_cfg(bp, ena);\n+\tif (rc)\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to configure context mem: rc = %d\\n\", rc);\n+\telse\n+\t\tctx->flags |= BNXT_CTX_FLAG_INITED;\n+\n+\treturn 0;\n+}\n \n #define ALLOW_FUNC(x)\t\\\n \t{ \\\n@@ -3361,6 +3622,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \tif (bnxt_vf_pciid(pci_dev->id.device_id))\n \t\tbp->flags |= BNXT_FLAG_VF;\n \n+\tif (pci_dev->id.device_id == BROADCOM_DEV_ID_57508 ||\n+\t    pci_dev->id.device_id == BROADCOM_DEV_ID_57504 ||\n+\t    pci_dev->id.device_id == BROADCOM_DEV_ID_57502 ||\n+\t    pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF)\n+\t\tbp->flags |= BNXT_FLAG_THOR_CHIP;\n+\n \trc = bnxt_init_board(eth_dev);\n \tif (rc) {\n \t\tPMD_DRV_LOG(ERR,\n@@ -3497,13 +3764,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t\tPMD_DRV_LOG(ERR, \"hwrm queue qportcfg failed\\n\");\n \t\tgoto error_free;\n \t}\n-\n-\trc = bnxt_hwrm_func_qcfg(bp);\n-\tif (rc) {\n-\t\tPMD_DRV_LOG(ERR, \"hwrm func qcfg failed\\n\");\n-\t\tgoto error_free;\n-\t}\n-\n \t/* Get the MAX capabilities for this function */\n \trc = bnxt_hwrm_func_qcaps(bp);\n \tif (rc) {\n@@ -3538,7 +3798,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \tmemcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));\n \tmemcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);\n \n-\tif (bp->max_ring_grps < bp->rx_cp_nr_rings) {\n+\t/* THOR does not support ring groups.\n+\t * But we will use the array to save RSS context IDs.\n+\t */\n+\tif (BNXT_CHIP_THOR(bp)) {\n+\t\tbp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;\n+\t} else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {\n \t\t/* 1 ring is for default completion ring */\n \t\tPMD_DRV_LOG(ERR, \"Insufficient resource: Ring Group\\n\");\n \t\trc = -ENOSPC;\n@@ -3592,6 +3857,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)\n \t\tpci_dev->mem_resource[0].phys_addr,\n \t\tpci_dev->mem_resource[0].addr);\n \n+\trc = bnxt_hwrm_func_qcfg(bp);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"hwrm func qcfg failed\\n\");\n+\t\tgoto error_free;\n+\t}\n \n \tif (BNXT_PF(bp)) {\n \t\t//if (bp->pf.active_vfs) {\n@@ -3677,6 +3947,7 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)\n \t\tbnxt_dev_close_op(eth_dev);\n \tif (bp->pf.vf_info)\n \t\trte_free(bp->pf.vf_info);\n+\tbnxt_free_ctx_mem(bp);\n \teth_dev->dev_ops = NULL;\n \teth_dev->rx_pkt_burst = NULL;\n \teth_dev->tx_pkt_burst = NULL;\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex 45d37f176..29f270195 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -29,6 +29,7 @@\n #define HWRM_CMD_TIMEOUT\t\t6000000\n #define HWRM_SPEC_CODE_1_8_3\t\t0x10803\n #define HWRM_VERSION_1_9_1\t\t0x10901\n+#define HWRM_VERSION_1_9_2\t\t0x10903\n \n struct bnxt_plcmodes_cfg {\n \tuint32_t\tflags;\n@@ -62,6 +63,18 @@ static int page_roundup(size_t size)\n \treturn 1 << page_getenum(size);\n }\n \n+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,\n+\t\t\t\t  uint8_t *pg_attr,\n+\t\t\t\t  uint64_t *pg_dir)\n+{\n+\tif (rmem->nr_pages > 1) {\n+\t\t*pg_attr = 1;\n+\t\t*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);\n+\t} else {\n+\t\t*pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);\n+\t}\n+}\n+\n /*\n  * HWRM Functions (sent to HWRM)\n  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()\n@@ -608,6 +621,10 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)\n \n \trc = __bnxt_hwrm_func_qcaps(bp);\n \tif (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {\n+\t\trc = bnxt_alloc_ctx_mem(bp);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n \t\trc = bnxt_hwrm_func_resc_qcaps(bp);\n \t\tif (!rc)\n \t\t\tbp->flags |= BNXT_FLAG_NEW_RM;\n@@ -703,13 +720,16 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)\n \n \tHWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);\n \n-\treq.enables = rte_cpu_to_le_32\n-\t\t\t(HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |\n-\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |\n-\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |\n-\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |\n-\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |\n-\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);\n+\tenables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |\n+\t\t  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |\n+\t\t  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |\n+\t\t  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |\n+\t\t  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;\n+\n+\tif (BNXT_HAS_RING_GRPS(bp)) {\n+\t\tenables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;\n+\t\treq.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);\n+\t}\n \n \treq.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);\n \treq.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *\n@@ -717,14 +737,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)\n \treq.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);\n \treq.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +\n \t\t\t\t\t      bp->tx_nr_rings);\n-\treq.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);\n \treq.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);\n \tif (bp->vf_resv_strategy ==\n \t    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {\n-\t\tenables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |\n-\t\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |\n-\t\t\t\tHWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;\n-\t\treq.enables |= rte_cpu_to_le_32(enables);\n+\t\tenables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |\n+\t\t\t   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |\n+\t\t\t   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;\n \t\treq.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);\n \t\treq.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);\n \t\treq.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);\n@@ -738,7 +756,11 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)\n \t\t\tHWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |\n \t\t\tHWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;\n \n+\tif (test && BNXT_HAS_RING_GRPS(bp))\n+\t\tflags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;\n+\n \treq.flags = rte_cpu_to_le_32(flags);\n+\treq.enables |= rte_cpu_to_le_32(enables);\n \n \trc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);\n \n@@ -774,6 +796,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)\n \t\tbp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);\n \t\tbp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);\n \t}\n+\tbp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);\n \tbp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);\n \tif (bp->vf_resv_strategy >\n \t    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)\n@@ -1092,6 +1115,13 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)\n \t\t\t}\n \t\t}\n \t}\n+\n+\tbp->max_tc = resp->max_configurable_queues;\n+\tbp->max_lltc = resp->max_configurable_lossless_queues;\n+\tif (bp->max_tc > BNXT_MAX_QUEUE)\n+\t\tbp->max_tc = BNXT_MAX_QUEUE;\n+\tbp->max_q = bp->max_tc;\n+\n \tPMD_DRV_LOG(DEBUG, \"Tx Cos Queue to use: %d\\n\", bp->tx_cosq_id);\n \n \treturn rc;\n@@ -1106,6 +1136,8 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,\n \tuint32_t enables = 0;\n \tstruct hwrm_ring_alloc_input req = {.req_type = 0 };\n \tstruct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;\n+\tstruct rte_mempool *mb_pool;\n+\tuint16_t rx_buf_size;\n \n \tHWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);\n \n@@ -1117,24 +1149,59 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,\n \n \tswitch (ring_type) {\n \tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:\n+\t\treq.ring_type = ring_type;\n+\t\treq.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);\n+\t\treq.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);\n \t\treq.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);\n-\t\t/* FALLTHROUGH */\n+\t\tif (stats_ctx_id != INVALID_STATS_CTX_ID)\n+\t\t\tenables |=\n+\t\t\tHWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;\n+\t\tbreak;\n \tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:\n \t\treq.ring_type = ring_type;\n \t\treq.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);\n \t\treq.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);\n+\t\tif (BNXT_CHIP_THOR(bp)) {\n+\t\t\tmb_pool = bp->rx_queues[0]->mb_pool;\n+\t\t\trx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -\n+\t\t\t\t      RTE_PKTMBUF_HEADROOM;\n+\t\t\treq.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);\n+\t\t\tenables |=\n+\t\t\t\tHWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;\n+\t\t}\n \t\tif (stats_ctx_id != INVALID_STATS_CTX_ID)\n \t\t\tenables |=\n-\t\t\tHWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;\n+\t\t\t\tHWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;\n \t\tbreak;\n \tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:\n \t\treq.ring_type = ring_type;\n-\t\t/*\n-\t\t * TODO: Some HWRM versions crash with\n-\t\t * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL\n-\t\t */\n+\t\tif (BNXT_HAS_NQ(bp)) {\n+\t\t\t/* Association of cp ring with nq */\n+\t\t\treq.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);\n+\t\t\tenables |=\n+\t\t\t\tHWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;\n+\t\t}\n \t\treq.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;\n \t\tbreak;\n+\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:\n+\t\treq.ring_type = ring_type;\n+\t\treq.page_size = BNXT_PAGE_SHFT;\n+\t\treq.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;\n+\t\tbreak;\n+\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:\n+\t\treq.ring_type = ring_type;\n+\t\treq.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);\n+\n+\t\tmb_pool = bp->rx_queues[0]->mb_pool;\n+\t\trx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -\n+\t\t\t      RTE_PKTMBUF_HEADROOM;\n+\t\treq.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);\n+\n+\t\treq.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);\n+\t\tenables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |\n+\t\t\t   HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |\n+\t\t\t   HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;\n+\t\tbreak;\n \tdefault:\n \t\tPMD_DRV_LOG(ERR, \"hwrm alloc invalid ring type %d\\n\",\n \t\t\tring_type);\n@@ -1156,12 +1223,23 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,\n \t\t\treturn rc;\n \t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:\n \t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t\"hwrm_ring_alloc rx failed. rc:%d\\n\", rc);\n+\t\t\t\t    \"hwrm_ring_alloc rx failed. rc:%d\\n\", rc);\n+\t\t\tHWRM_UNLOCK();\n+\t\t\treturn rc;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"hwrm_ring_alloc rx agg failed. rc:%d\\n\",\n+\t\t\t\t    rc);\n \t\t\tHWRM_UNLOCK();\n \t\t\treturn rc;\n \t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:\n \t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t\"hwrm_ring_alloc tx failed. rc:%d\\n\", rc);\n+\t\t\t\t    \"hwrm_ring_alloc tx failed. rc:%d\\n\", rc);\n+\t\t\tHWRM_UNLOCK();\n+\t\t\treturn rc;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"hwrm_ring_alloc nq failed. rc:%d\\n\", rc);\n \t\t\tHWRM_UNLOCK();\n \t\t\treturn rc;\n \t\tdefault:\n@@ -1208,6 +1286,14 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,\n \t\t\tPMD_DRV_LOG(ERR, \"hwrm_ring_free tx failed. rc:%d\\n\",\n \t\t\t\trc);\n \t\t\treturn rc;\n+\t\tcase HWRM_RING_FREE_INPUT_RING_TYPE_NQ:\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"hwrm_ring_free nq failed. rc:%d\\n\", rc);\n+\t\t\treturn rc;\n+\t\tcase HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"hwrm_ring_free agg failed. rc:%d\\n\", rc);\n+\t\t\treturn rc;\n \t\tdefault:\n \t\t\tPMD_DRV_LOG(ERR, \"Invalid ring, rc:%d\\n\", rc);\n \t\t\treturn rc;\n@@ -1332,6 +1418,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \tstruct hwrm_vnic_alloc_input req = { 0 };\n \tstruct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;\n \n+\tif (!BNXT_HAS_RING_GRPS(bp))\n+\t\tgoto skip_ring_grps;\n+\n \t/* map ring groups to this vnic */\n \tPMD_DRV_LOG(DEBUG, \"Alloc VNIC. Start %x, End %x\\n\",\n \t\tvnic->start_grp_id, vnic->end_grp_id);\n@@ -1342,6 +1431,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \tvnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;\n \tvnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;\n \tvnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;\n+\n+skip_ring_grps:\n \tvnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n \t\t\t\tRTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;\n \tHWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);\n@@ -1423,6 +1514,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \tstruct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n \tuint32_t ctx_enable_flag = 0;\n \tstruct bnxt_plcmodes_cfg pmodes;\n+\tuint32_t enables = 0;\n \n \tif (vnic->fw_vnic_id == INVALID_HW_RING_ID) {\n \t\tPMD_DRV_LOG(DEBUG, \"VNIC ID %x\\n\", vnic->fw_vnic_id);\n@@ -1435,9 +1527,22 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \n \tHWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);\n \n+\tif (BNXT_CHIP_THOR(bp)) {\n+\t\tstruct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0];\n+\t\tstruct bnxt_rx_ring_info *rxr = rxq->rx_ring;\n+\t\tstruct bnxt_cp_ring_info *cpr = bp->def_cp_ring;\n+\n+\t\treq.default_rx_ring_id =\n+\t\t\trte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);\n+\t\treq.default_cmpl_ring_id =\n+\t\t\trte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);\n+\t\tenables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |\n+\t\t\t  HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;\n+\t\tgoto config_mru;\n+\t}\n+\n \t/* Only RSS support for now TBD: COS & LB */\n-\treq.enables =\n-\t    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);\n+\tenables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;\n \tif (vnic->lb_rule != 0xffff)\n \t\tctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;\n \tif (vnic->cos_rule != 0xffff)\n@@ -1446,12 +1551,15 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \t\tctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;\n \t\tctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;\n \t}\n-\treq.enables |= rte_cpu_to_le_32(ctx_enable_flag);\n-\treq.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);\n+\tenables |= ctx_enable_flag;\n \treq.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);\n \treq.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);\n \treq.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);\n \treq.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);\n+\n+config_mru:\n+\treq.enables = rte_cpu_to_le_32(enables);\n+\treq.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);\n \treq.mru = rte_cpu_to_le_16(vnic->mru);\n \t/* Configure default VNIC only once. */\n \tif (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {\n@@ -1672,6 +1780,9 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,\n \tstruct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };\n \tstruct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n \n+\tif (BNXT_CHIP_THOR(bp))\n+\t\treturn 0;\n+\n \tHWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);\n \n \tif (enable) {\n@@ -1887,6 +1998,9 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)\n \tuint16_t idx;\n \tuint32_t rc = 0;\n \n+\tif (!BNXT_HAS_RING_GRPS(bp))\n+\t\treturn 0;\n+\n \tfor (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {\n \n \t\tif (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)\n@@ -1900,6 +2014,18 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)\n \treturn rc;\n }\n \n+static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)\n+{\n+\tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n+\n+\tbnxt_hwrm_ring_free(bp, cp_ring,\n+\t\t\t    HWRM_RING_FREE_INPUT_RING_TYPE_NQ);\n+\tcp_ring->fw_ring_id = INVALID_HW_RING_ID;\n+\tmemset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *\n+\t\t\t\t     sizeof(*cpr->cp_desc_ring));\n+\tcpr->cp_raw_cons = 0;\n+}\n+\n static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)\n {\n \tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n@@ -1935,6 +2061,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \tring = rxr->ag_ring_struct;\n \tif (ring->fw_ring_id != INVALID_HW_RING_ID) {\n \t\tbnxt_hwrm_ring_free(bp, ring,\n+\t\t\t\t    BNXT_CHIP_THOR(bp) ?\n+\t\t\t\t    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :\n \t\t\t\t    HWRM_RING_FREE_INPUT_RING_TYPE_RX);\n \t\tring->fw_ring_id = INVALID_HW_RING_ID;\n \t\tmemset(rxr->ag_buf_ring, 0,\n@@ -1943,8 +2071,11 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \t\trxr->ag_prod = 0;\n \t\tbp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;\n \t}\n-\tif (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)\n+\tif (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {\n \t\tbnxt_free_cp_ring(bp, cpr);\n+\t\tif (rxq->nq_ring)\n+\t\t\tbnxt_free_nq_ring(bp, rxq->nq_ring);\n+\t}\n \n \tbp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;\n }\n@@ -1975,6 +2106,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)\n \t\tif (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {\n \t\t\tbnxt_free_cp_ring(bp, cpr);\n \t\t\tcpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;\n+\t\t\tif (txq->nq_ring)\n+\t\t\t\tbnxt_free_nq_ring(bp, txq->nq_ring);\n \t\t}\n \t}\n \n@@ -1989,6 +2122,9 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)\n \tuint16_t i;\n \tuint32_t rc = 0;\n \n+\tif (!BNXT_HAS_RING_GRPS(bp))\n+\t\treturn 0;\n+\n \tfor (i = 0; i < bp->rx_cp_nr_rings; i++) {\n \t\trc = bnxt_hwrm_ring_grp_alloc(bp, i);\n \t\tif (rc)\n@@ -2516,18 +2652,27 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n {\n \tstruct hwrm_func_cfg_input req = {0};\n \tstruct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n+\tuint32_t enables;\n \tint rc;\n \n-\treq.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_MRU |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |\n-\t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);\n+\tenables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_MRU |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |\n+\t\t  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;\n+\n+\tif (BNXT_HAS_RING_GRPS(bp)) {\n+\t\tenables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;\n+\t\treq.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);\n+\t} else if (BNXT_HAS_NQ(bp)) {\n+\t\tenables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;\n+\t\treq.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);\n+\t}\n+\n \treq.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);\n \treq.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);\n \treq.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +\n@@ -2540,8 +2685,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n \treq.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);\n \treq.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);\n \treq.num_vnics = rte_cpu_to_le_16(bp->max_vnics);\n-\treq.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);\n \treq.fid = rte_cpu_to_le_16(0xffff);\n+\treq.enables = rte_cpu_to_le_32(enables);\n \n \tHWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);\n \n@@ -2711,6 +2856,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)\n \tbp->pf.func_cfg_flags |=\n \t\tHWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;\n \trc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);\n+\trc = __bnxt_hwrm_func_qcaps(bp);\n \treturn rc;\n }\n \n@@ -3970,6 +4116,192 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,\n \treturn 0;\n }\n \n+#define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)\n+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)\n+{\n+\tstruct hwrm_func_backing_store_qcaps_input req = {0};\n+\tstruct hwrm_func_backing_store_qcaps_output *resp =\n+\t\tbp->hwrm_cmd_resp_addr;\n+\tint rc;\n+\n+\tif (!BNXT_CHIP_THOR(bp) ||\n+\t    bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||\n+\t    BNXT_VF(bp) ||\n+\t    bp->ctx)\n+\t\treturn 0;\n+\n+\tHWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);\n+\trc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);\n+\tHWRM_CHECK_RESULT_SILENT();\n+\n+\tif (!rc) {\n+\t\tstruct bnxt_ctx_pg_info *ctx_pg;\n+\t\tstruct bnxt_ctx_mem_info *ctx;\n+\t\tint total_alloc_len;\n+\t\tint i;\n+\n+\t\ttotal_alloc_len = sizeof(*ctx);\n+\t\tctx = rte_malloc(\"bnxt_ctx_mem\", total_alloc_len,\n+\t\t\t\t RTE_CACHE_LINE_SIZE);\n+\t\tif (!ctx) {\n+\t\t\trc = -ENOMEM;\n+\t\t\tgoto ctx_err;\n+\t\t}\n+\t\tmemset(ctx, 0, total_alloc_len);\n+\n+\t\tctx_pg = rte_malloc(\"bnxt_ctx_pg_mem\",\n+\t\t\t\t    sizeof(*ctx_pg) * BNXT_MAX_Q,\n+\t\t\t\t    RTE_CACHE_LINE_SIZE);\n+\t\tif (!ctx_pg) {\n+\t\t\trc = -ENOMEM;\n+\t\t\tgoto ctx_err;\n+\t\t}\n+\t\tfor (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)\n+\t\t\tctx->tqm_mem[i] = ctx_pg;\n+\n+\t\tbp->ctx = ctx;\n+\t\tctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);\n+\t\tctx->qp_min_qp1_entries =\n+\t\t\trte_le_to_cpu_16(resp->qp_min_qp1_entries);\n+\t\tctx->qp_max_l2_entries =\n+\t\t\trte_le_to_cpu_16(resp->qp_max_l2_entries);\n+\t\tctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);\n+\t\tctx->srq_max_l2_entries =\n+\t\t\trte_le_to_cpu_16(resp->srq_max_l2_entries);\n+\t\tctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);\n+\t\tctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);\n+\t\tctx->cq_max_l2_entries =\n+\t\t\trte_le_to_cpu_16(resp->cq_max_l2_entries);\n+\t\tctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);\n+\t\tctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);\n+\t\tctx->vnic_max_vnic_entries =\n+\t\t\trte_le_to_cpu_16(resp->vnic_max_vnic_entries);\n+\t\tctx->vnic_max_ring_table_entries =\n+\t\t\trte_le_to_cpu_16(resp->vnic_max_ring_table_entries);\n+\t\tctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);\n+\t\tctx->stat_max_entries =\n+\t\t\trte_le_to_cpu_32(resp->stat_max_entries);\n+\t\tctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);\n+\t\tctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);\n+\t\tctx->tqm_min_entries_per_ring =\n+\t\t\trte_le_to_cpu_32(resp->tqm_min_entries_per_ring);\n+\t\tctx->tqm_max_entries_per_ring =\n+\t\t\trte_le_to_cpu_32(resp->tqm_max_entries_per_ring);\n+\t\tctx->tqm_entries_multiple = resp->tqm_entries_multiple;\n+\t\tif (!ctx->tqm_entries_multiple)\n+\t\t\tctx->tqm_entries_multiple = 1;\n+\t\tctx->mrav_max_entries =\n+\t\t\trte_le_to_cpu_32(resp->mrav_max_entries);\n+\t\tctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);\n+\t\tctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);\n+\t\tctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);\n+\t} else {\n+\t\trc = 0;\n+\t}\n+ctx_err:\n+\tHWRM_UNLOCK();\n+\treturn rc;\n+}\n+\n+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)\n+{\n+\tstruct hwrm_func_backing_store_cfg_input req = {0};\n+\tstruct hwrm_func_backing_store_cfg_output *resp =\n+\t\tbp->hwrm_cmd_resp_addr;\n+\tstruct bnxt_ctx_mem_info *ctx = bp->ctx;\n+\tstruct bnxt_ctx_pg_info *ctx_pg;\n+\tuint32_t *num_entries;\n+\tuint64_t *pg_dir;\n+\tuint8_t *pg_attr;\n+\tuint32_t ena;\n+\tint i, rc;\n+\n+\tif (!ctx)\n+\t\treturn 0;\n+\n+\tHWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);\n+\treq.enables = rte_cpu_to_le_32(enables);\n+\n+\tif (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {\n+\t\tctx_pg = &ctx->qp_mem;\n+\t\treq.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);\n+\t\treq.qp_num_qp1_entries =\n+\t\t\trte_cpu_to_le_16(ctx->qp_min_qp1_entries);\n+\t\treq.qp_num_l2_entries =\n+\t\t\trte_cpu_to_le_16(ctx->qp_max_l2_entries);\n+\t\treq.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,\n+\t\t\t\t      &req.qpc_pg_size_qpc_lvl,\n+\t\t\t\t      &req.qpc_page_dir);\n+\t}\n+\n+\tif (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {\n+\t\tctx_pg = &ctx->srq_mem;\n+\t\treq.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);\n+\t\treq.srq_num_l2_entries =\n+\t\t\t\t rte_cpu_to_le_16(ctx->srq_max_l2_entries);\n+\t\treq.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,\n+\t\t\t\t      &req.srq_pg_size_srq_lvl,\n+\t\t\t\t      &req.srq_page_dir);\n+\t}\n+\n+\tif (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {\n+\t\tctx_pg = &ctx->cq_mem;\n+\t\treq.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);\n+\t\treq.cq_num_l2_entries =\n+\t\t\t\trte_cpu_to_le_16(ctx->cq_max_l2_entries);\n+\t\treq.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,\n+\t\t\t\t      &req.cq_pg_size_cq_lvl,\n+\t\t\t\t      &req.cq_page_dir);\n+\t}\n+\n+\tif (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {\n+\t\tctx_pg = &ctx->vnic_mem;\n+\t\treq.vnic_num_vnic_entries =\n+\t\t\trte_cpu_to_le_16(ctx->vnic_max_vnic_entries);\n+\t\treq.vnic_num_ring_table_entries =\n+\t\t\trte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);\n+\t\treq.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,\n+\t\t\t\t      &req.vnic_pg_size_vnic_lvl,\n+\t\t\t\t      &req.vnic_page_dir);\n+\t}\n+\n+\tif (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {\n+\t\tctx_pg = &ctx->stat_mem;\n+\t\treq.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);\n+\t\treq.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,\n+\t\t\t\t      &req.stat_pg_size_stat_lvl,\n+\t\t\t\t      &req.stat_page_dir);\n+\t}\n+\n+\treq.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);\n+\tnum_entries = &req.tqm_sp_num_entries;\n+\tpg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;\n+\tpg_dir = &req.tqm_sp_page_dir;\n+\tena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;\n+\tfor (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {\n+\t\tif (!(enables & ena))\n+\t\t\tcontinue;\n+\n+\t\treq.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);\n+\n+\t\tctx_pg = ctx->tqm_mem[i];\n+\t\t*num_entries = rte_cpu_to_le_16(ctx_pg->entries);\n+\t\tbnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);\n+\t}\n+\n+\trc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);\n+\tHWRM_CHECK_RESULT();\n+\tHWRM_UNLOCK();\n+\tif (rc)\n+\t\trc = -EIO;\n+\treturn rc;\n+}\n+\n int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)\n {\n \tstruct hwrm_port_qstats_ext_input req = {0};\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h\nindex 53d79f046..ffd99de34 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.h\n+++ b/drivers/net/bnxt/bnxt_hwrm.h\n@@ -36,6 +36,13 @@ struct bnxt_cp_ring_info;\n #define HWRM_SPEC_CODE_1_9_0\t\t0x10900\n #define HWRM_SPEC_CODE_1_9_2\t\t0x10902\n \n+#define FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES              \\\n+\t(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP |        \\\n+\tHWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ |        \\\n+\tHWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ |         \\\n+\tHWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC |       \\\n+\tHWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)\n+\n int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,\n \t\t\t\t   struct bnxt_vnic_info *vnic);\n int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,\n@@ -179,4 +186,7 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,\n \t\t\tstruct bnxt_coal *coal, uint16_t ring_id);\n int bnxt_hwrm_check_vf_rings(struct bnxt *bp);\n int bnxt_hwrm_ext_port_qstats(struct bnxt *bp);\n+int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp);\n+int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables);\n+int bnxt_alloc_ctx_mem(struct bnxt *bp);\n #endif\ndiff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c\nindex 8473e4abe..56bb463a6 100644\n--- a/drivers/net/bnxt/bnxt_ring.c\n+++ b/drivers/net/bnxt/bnxt_ring.c\n@@ -67,6 +67,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \t\t\t    struct bnxt_tx_queue *txq,\n \t\t\t    struct bnxt_rx_queue *rxq,\n \t\t\t    struct bnxt_cp_ring_info *cp_ring_info,\n+\t\t\t    struct bnxt_cp_ring_info *nq_ring_info,\n \t\t\t    const char *suffix)\n {\n \tstruct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;\n@@ -78,49 +79,70 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \tuint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;\n \tconst struct rte_memzone *mz = NULL;\n \tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\trte_iova_t mz_phys_addr_base;\n \trte_iova_t mz_phys_addr;\n \tint sz;\n \n \tint stats_len = (tx_ring_info || rx_ring_info) ?\n \t    RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -\n \t\t\t\t   sizeof (struct hwrm_resp_hdr)) : 0;\n+\tstats_len = RTE_ALIGN(stats_len, 128);\n \n \tint cp_vmem_start = stats_len;\n \tint cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);\n+\tcp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);\n \n-\tint tx_vmem_start = cp_vmem_start + cp_vmem_len;\n+\tint nq_vmem_len = BNXT_CHIP_THOR(bp) ?\n+\t\tRTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;\n+\tnq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);\n+\n+\tint nq_vmem_start = cp_vmem_start + cp_vmem_len;\n+\n+\tint tx_vmem_start = nq_vmem_start + nq_vmem_len;\n \tint tx_vmem_len =\n \t    tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->\n \t\t\t\t\t\ttx_ring_struct->vmem_size) : 0;\n+\ttx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);\n \n \tint rx_vmem_start = tx_vmem_start + tx_vmem_len;\n \tint rx_vmem_len = rx_ring_info ?\n \t\tRTE_CACHE_LINE_ROUNDUP(rx_ring_info->\n \t\t\t\t\t\trx_ring_struct->vmem_size) : 0;\n+\trx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);\n \tint ag_vmem_start = 0;\n \tint ag_vmem_len = 0;\n \tint cp_ring_start =  0;\n+\tint nq_ring_start = 0;\n \n \tag_vmem_start = rx_vmem_start + rx_vmem_len;\n \tag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(\n \t\t\t\trx_ring_info->ag_ring_struct->vmem_size) : 0;\n \tcp_ring_start = ag_vmem_start + ag_vmem_len;\n+\tcp_ring_start = RTE_ALIGN(cp_ring_start, 4096);\n \n \tint cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *\n \t\t\t\t\t\t sizeof(struct cmpl_base));\n+\tcp_ring_len = RTE_ALIGN(cp_ring_len, 128);\n+\tnq_ring_start = cp_ring_start + cp_ring_len;\n+\tnq_ring_start = RTE_ALIGN(nq_ring_start, 4096);\n+\n+\tint nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;\n \n-\tint tx_ring_start = cp_ring_start + cp_ring_len;\n+\tint tx_ring_start = nq_ring_start + nq_ring_len;\n \tint tx_ring_len = tx_ring_info ?\n \t    RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *\n \t\t\t\t   sizeof(struct tx_bd_long)) : 0;\n+\ttx_ring_len = RTE_ALIGN(tx_ring_len, 4096);\n \n \tint rx_ring_start = tx_ring_start + tx_ring_len;\n \tint rx_ring_len =  rx_ring_info ?\n \t\tRTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *\n \t\tsizeof(struct rx_prod_pkt_bd)) : 0;\n+\trx_ring_len = RTE_ALIGN(rx_ring_len, 4096);\n \n \tint ag_ring_start = rx_ring_start + rx_ring_len;\n \tint ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;\n+\tag_ring_len = RTE_ALIGN(ag_ring_len, 4096);\n \n \tint ag_bitmap_start = ag_ring_start + ag_ring_len;\n \tint ag_bitmap_len =  rx_ring_info ?\n@@ -154,14 +176,16 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \t\t\treturn -ENOMEM;\n \t}\n \tmemset(mz->addr, 0, mz->len);\n+\tmz_phys_addr_base = mz->iova;\n \tmz_phys_addr = mz->iova;\n-\tif ((unsigned long)mz->addr == mz_phys_addr) {\n+\tif ((unsigned long)mz->addr == mz_phys_addr_base) {\n \t\tPMD_DRV_LOG(WARNING,\n \t\t\t\"Memzone physical address same as virtual.\\n\");\n \t\tPMD_DRV_LOG(WARNING,\n \t\t\t\"Using rte_mem_virt2iova()\\n\");\n \t\tfor (sz = 0; sz < total_alloc_len; sz += getpagesize())\n \t\t\trte_mem_lock_page(((char *)mz->addr) + sz);\n+\t\tmz_phys_addr_base = rte_mem_virt2iova(mz->addr);\n \t\tmz_phys_addr = rte_mem_virt2iova(mz->addr);\n \t\tif (mz_phys_addr == 0) {\n \t\t\tPMD_DRV_LOG(ERR,\n@@ -255,6 +279,24 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \t\tcp_ring_info->hw_stats_map = mz_phys_addr;\n \t}\n \tcp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;\n+\n+\tif (BNXT_HAS_NQ(bp)) {\n+\t\tstruct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;\n+\n+\t\tnq_ring->bd = (char *)mz->addr + nq_ring_start;\n+\t\tnq_ring->bd_dma = mz_phys_addr + nq_ring_start;\n+\t\tnq_ring_info->cp_desc_ring = nq_ring->bd;\n+\t\tnq_ring_info->cp_desc_mapping = nq_ring->bd_dma;\n+\t\tnq_ring->mem_zone = (const void *)mz;\n+\n+\t\tif (!nq_ring->bd)\n+\t\t\treturn -ENOMEM;\n+\t\tif (nq_ring->vmem_size)\n+\t\t\t*nq_ring->vmem = (char *)mz->addr + nq_vmem_start;\n+\n+\t\tnq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;\n+\t}\n+\n \treturn 0;\n }\n \n@@ -279,43 +321,109 @@ static void bnxt_init_dflt_coal(struct bnxt_coal *coal)\n static void bnxt_set_db(struct bnxt *bp,\n \t\t\tstruct bnxt_db_info *db,\n \t\t\tuint32_t ring_type,\n-\t\t\tuint32_t map_idx)\n+\t\t\tuint32_t map_idx,\n+\t\t\tuint32_t fid)\n {\n-\tdb->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;\n-\tswitch (ring_type) {\n-\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:\n-\t\tdb->db_key32 = DB_KEY_TX;\n-\t\tbreak;\n-\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:\n-\t\tdb->db_key32 = DB_KEY_RX;\n-\t\tbreak;\n-\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:\n-\t\tdb->db_key32 = DB_KEY_CP;\n-\t\tbreak;\n+\tif (BNXT_CHIP_THOR(bp)) {\n+\t\tif (BNXT_PF(bp))\n+\t\t\tdb->doorbell = (char *)bp->doorbell_base + 0x10000;\n+\t\telse\n+\t\t\tdb->doorbell = (char *)bp->doorbell_base + 0x4000;\n+\t\tswitch (ring_type) {\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:\n+\t\t\tdb->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;\n+\t\t\tbreak;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:\n+\t\t\tdb->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;\n+\t\t\tbreak;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:\n+\t\t\tdb->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;\n+\t\t\tbreak;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:\n+\t\t\tdb->db_key64 = DBR_PATH_L2 | DBR_TYPE_NQ;\n+\t\t\tbreak;\n+\t\t}\n+\t\tdb->db_key64 |= (uint64_t)fid << DBR_XID_SFT;\n+\t\tdb->db_64 = true;\n+\t} else {\n+\t\tdb->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;\n+\t\tswitch (ring_type) {\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:\n+\t\t\tdb->db_key32 = DB_KEY_TX;\n+\t\t\tbreak;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:\n+\t\t\tdb->db_key32 = DB_KEY_RX;\n+\t\t\tbreak;\n+\t\tcase HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:\n+\t\t\tdb->db_key32 = DB_KEY_CP;\n+\t\t\tbreak;\n+\t\t}\n+\t\tdb->db_64 = false;\n \t}\n }\n \n static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,\n-\t\t\t\tstruct bnxt_cp_ring_info *cpr)\n+\t\t\t\tstruct bnxt_cp_ring_info *cpr,\n+\t\t\t\tstruct bnxt_cp_ring_info *nqr)\n {\n \tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n+\tuint32_t nq_ring_id = HWRM_NA_SIGNATURE;\n \tuint8_t ring_type;\n \tint rc = 0;\n \n \tring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;\n \n+\tif (BNXT_HAS_NQ(bp)) {\n+\t\tif (nqr) {\n+\t\t\tnq_ring_id = nqr->cp_ring_struct->fw_ring_id;\n+\t\t} else {\n+\t\t\tPMD_DRV_LOG(ERR, \"NQ ring is NULL\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n \trc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,\n-\t\t\t\t  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);\n+\t\t\t\t  HWRM_NA_SIGNATURE, nq_ring_id);\n \tif (rc)\n \t\treturn rc;\n \n \tcpr->cp_cons = 0;\n-\tbnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index);\n+\tbnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,\n+\t\t    cp_ring->fw_ring_id);\n \tbnxt_db_cq(cpr);\n \n \treturn 0;\n }\n \n+static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,\n+\t\t\t      struct bnxt_cp_ring_info *nqr,\n+\t\t\t      bool rx)\n+{\n+\tstruct bnxt_ring *nq_ring = nqr->cp_ring_struct;\n+\tuint8_t ring_type;\n+\tint rc = 0;\n+\n+\tif (!BNXT_HAS_NQ(bp))\n+\t\treturn -EINVAL;\n+\n+\tring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;\n+\n+\trc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,\n+\t\t\t\t  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (rx)\n+\t\tbp->grp_info[queue_index].cp_fw_ring_id = nq_ring->fw_ring_id;\n+\n+\tbnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,\n+\t\t    nq_ring->fw_ring_id);\n+\tbnxt_db_nq(nqr);\n+\n+\treturn 0;\n+}\n+\n static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)\n {\n \tstruct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];\n@@ -336,7 +444,7 @@ static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)\n \n \trxr->rx_prod = 0;\n \tbp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;\n-\tbnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index);\n+\tbnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id);\n \tbnxt_db_write(&rxr->rx_db, rxr->rx_prod);\n \n \treturn 0;\n@@ -354,7 +462,14 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)\n \tuint8_t ring_type;\n \tint rc = 0;\n \n-\tring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;\n+\tring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;\n+\n+\tif (BNXT_CHIP_THOR(bp)) {\n+\t\tring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;\n+\t\thw_stats_ctx_id = cpr->hw_stats_ctx_id;\n+\t} else {\n+\t\tring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;\n+\t}\n \n \trc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,\n \t\t\t\t  hw_stats_ctx_id, cp_ring->fw_ring_id);\n@@ -364,7 +479,7 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)\n \n \trxr->ag_prod = 0;\n \tbp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;\n-\tbnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx);\n+\tbnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id);\n \tbnxt_db_write(&rxr->ag_db, rxr->ag_prod);\n \n \treturn 0;\n@@ -375,10 +490,16 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \tstruct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];\n \tstruct bnxt_cp_ring_info *cpr = rxq->cp_ring;\n \tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n+\tstruct bnxt_cp_ring_info *nqr = rxq->nq_ring;\n \tstruct bnxt_rx_ring_info *rxr = rxq->rx_ring;\n \tint rc = 0;\n \n-\tif (bnxt_alloc_cmpl_ring(bp, queue_index, cpr))\n+\tif (BNXT_HAS_NQ(bp)) {\n+\t\tif (bnxt_alloc_nq_ring(bp, queue_index, nqr, true))\n+\t\t\tgoto err_out;\n+\t}\n+\n+\tif (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))\n \t\tgoto err_out;\n \n \tbp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;\n@@ -444,12 +565,16 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)\n \tfor (i = 0; i < bp->rx_cp_nr_rings; i++) {\n \t\tstruct bnxt_rx_queue *rxq = bp->rx_queues[i];\n \t\tstruct bnxt_cp_ring_info *cpr = rxq->cp_ring;\n+\t\tstruct bnxt_cp_ring_info *nqr = rxq->nq_ring;\n \t\tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n \t\tstruct bnxt_rx_ring_info *rxr = rxq->rx_ring;\n \n-\t\tbp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;\n+\t\tif (BNXT_HAS_NQ(bp)) {\n+\t\t\tif (bnxt_alloc_nq_ring(bp, i, nqr, true))\n+\t\t\t\tgoto err_out;\n+\t\t}\n \n-\t\tif (bnxt_alloc_cmpl_ring(bp, i, cpr))\n+\t\tif (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))\n \t\t\tgoto err_out;\n \n \t\tbp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;\n@@ -492,11 +617,17 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)\n \t\tstruct bnxt_tx_queue *txq = bp->tx_queues[i];\n \t\tstruct bnxt_cp_ring_info *cpr = txq->cp_ring;\n \t\tstruct bnxt_ring *cp_ring = cpr->cp_ring_struct;\n+\t\tstruct bnxt_cp_ring_info *nqr = txq->nq_ring;\n \t\tstruct bnxt_tx_ring_info *txr = txq->tx_ring;\n \t\tstruct bnxt_ring *ring = txr->tx_ring_struct;\n \t\tunsigned int idx = i + bp->rx_cp_nr_rings;\n \n-\t\tif (bnxt_alloc_cmpl_ring(bp, idx, cpr))\n+\t\tif (BNXT_HAS_NQ(bp)) {\n+\t\t\tif (bnxt_alloc_nq_ring(bp, idx, nqr, false))\n+\t\t\t\tgoto err_out;\n+\t\t}\n+\n+\t\tif (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))\n \t\t\tgoto err_out;\n \n \t\t/* Tx ring */\n@@ -508,7 +639,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)\n \t\tif (rc)\n \t\t\tgoto err_out;\n \n-\t\tbnxt_set_db(bp, &txr->tx_db, ring_type, idx);\n+\t\tbnxt_set_db(bp, &txr->tx_db, ring_type, idx, ring->fw_ring_id);\n \t\ttxq->index = idx;\n \t\tbnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);\n \t}\ndiff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h\nindex 8cb0e8eb0..af2c5762f 100644\n--- a/drivers/net/bnxt/bnxt_ring.h\n+++ b/drivers/net/bnxt/bnxt_ring.h\n@@ -49,6 +49,7 @@ struct bnxt_ring {\n \tvoid\t\t\t**vmem;\n \n \tuint16_t\t\tfw_ring_id; /* Ring id filled by Chimp FW */\n+\tuint16_t                fw_rx_ring_id;\n \tconst void\t\t*mem_zone;\n };\n \n@@ -70,19 +71,40 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,\n \t\t\t    struct bnxt_tx_queue *txq,\n \t\t\t    struct bnxt_rx_queue *rxq,\n \t\t\t    struct bnxt_cp_ring_info *cp_ring_info,\n+\t\t\t    struct bnxt_cp_ring_info *nq_ring_info,\n \t\t\t    const char *suffix);\n int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);\n int bnxt_alloc_hwrm_rings(struct bnxt *bp);\n \n static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)\n {\n-\trte_write32(db->db_key32 | idx, db->doorbell);\n+\tif (db->db_64)\n+\t\trte_write64_relaxed(db->db_key64 | idx, db->doorbell);\n+\telse\n+\t\trte_write32(db->db_key32 | idx, db->doorbell);\n+}\n+\n+static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)\n+{\n+\tstruct bnxt_db_info *db = &cpr->cp_db;\n+\n+\trte_smp_wmb();\n+\tif (likely(db->db_64))\n+\t\trte_write64(db->db_key64 | DBR_TYPE_NQ |\n+\t\t\t    RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons),\n+\t\t\t    db->doorbell);\n }\n \n static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)\n {\n+\tstruct bnxt_db_info *db = &cpr->cp_db;\n+\tuint32_t idx = RING_CMP(cpr->cp_ring_struct, cpr->cp_raw_cons);\n+\n \trte_smp_wmb();\n-\tB_CP_DIS_DB(cpr, cpr->cp_raw_cons);\n+\tif (db->db_64)\n+\t\trte_write64(db->db_key64 | idx, db->doorbell);\n+\telse\n+\t\tB_CP_DIS_DB(cpr, cpr->cp_raw_cons);\n }\n \n #endif\ndiff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c\nindex 2ce2ef427..67649f38f 100644\n--- a/drivers/net/bnxt/bnxt_rxq.c\n+++ b/drivers/net/bnxt/bnxt_rxq.c\n@@ -341,7 +341,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \teth_dev->data->rx_queues[queue_idx] = rxq;\n \t/* Allocate RX ring hardware descriptors */\n \tif (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,\n-\t\t\t\"rxr\")) {\n+\t\t\trxq->nq_ring, \"rxr\")) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t\"ring_dma_zone_reserve for rx_ring failed!\\n\");\n \t\tbnxt_rx_queue_release_op(rxq);\n@@ -424,15 +424,18 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tif (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {\n \t\tvnic = rxq->vnic;\n \n-\t\tif (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)\n-\t\t\treturn 0;\n+\t\tif (BNXT_HAS_RING_GRPS(bp)) {\n+\t\t\tif (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)\n+\t\t\t\treturn 0;\n+\n+\t\t\tvnic->fw_grp_ids[rx_queue_id] =\n+\t\t\t\t\tbp->grp_info[rx_queue_id].fw_grp_id;\n+\t\t}\n \n \t\tPMD_DRV_LOG(DEBUG,\n \t\t\t    \"vnic = %p fw_grp_id = %d\\n\",\n \t\t\t    vnic, bp->grp_info[rx_queue_id].fw_grp_id);\n \n-\t\tvnic->fw_grp_ids[rx_queue_id] =\n-\t\t\t\t\tbp->grp_info[rx_queue_id].fw_grp_id;\n \t\trc = bnxt_vnic_rss_configure(bp, vnic);\n \t}\n \n@@ -469,7 +472,8 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \n \tif (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {\n \t\tvnic = rxq->vnic;\n-\t\tvnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;\n+\t\tif (BNXT_HAS_RING_GRPS(bp))\n+\t\t\tvnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;\n \t\trc = bnxt_vnic_rss_configure(bp, vnic);\n \t}\n \ndiff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h\nindex 7c6b4dec9..b5e42d01c 100644\n--- a/drivers/net/bnxt/bnxt_rxq.h\n+++ b/drivers/net/bnxt/bnxt_rxq.h\n@@ -39,6 +39,7 @@ struct bnxt_rx_queue {\n \tuint32_t\t\t\trx_buf_use_size;  /* useable size */\n \tstruct bnxt_rx_ring_info\t*rx_ring;\n \tstruct bnxt_cp_ring_info\t*cp_ring;\n+\tstruct bnxt_cp_ring_info\t*nq_ring;\n \trte_atomic64_t\t\trx_mbuf_alloc_fail;\n \tconst struct rte_memzone *mz;\n };\ndiff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c\nindex 75d2c76a5..44303f3b0 100644\n--- a/drivers/net/bnxt/bnxt_rxr.c\n+++ b/drivers/net/bnxt/bnxt_rxr.c\n@@ -637,6 +637,7 @@ void bnxt_free_rx_rings(struct bnxt *bp)\n int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)\n {\n \tstruct bnxt_cp_ring_info *cpr;\n+\tstruct bnxt_cp_ring_info *nqr;\n \tstruct bnxt_rx_ring_info *rxr;\n \tstruct bnxt_ring *ring;\n \n@@ -685,6 +686,32 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)\n \tring->vmem_size = 0;\n \tring->vmem = NULL;\n \n+\tif (BNXT_HAS_NQ(rxq->bp)) {\n+\t\tnqr = rte_zmalloc_socket(\"bnxt_rx_ring_cq\",\n+\t\t\t\t\t sizeof(struct bnxt_cp_ring_info),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (nqr == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\trxq->nq_ring = nqr;\n+\n+\t\tring = rte_zmalloc_socket(\"bnxt_rx_ring_struct\",\n+\t\t\t\t\t  sizeof(struct bnxt_ring),\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (ring == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tnqr->cp_ring_struct = ring;\n+\t\tring->ring_size =\n+\t\t\trte_align32pow2(rxr->rx_ring_struct->ring_size *\n+\t\t\t\t\t(2 + AGG_RING_SIZE_FACTOR));\n+\t\tring->ring_mask = ring->ring_size - 1;\n+\t\tring->bd = (void *)nqr->cp_desc_ring;\n+\t\tring->bd_dma = nqr->cp_desc_mapping;\n+\t\tring->vmem_size = 0;\n+\t\tring->vmem = NULL;\n+\t}\n+\n \t/* Allocate Aggregator rings */\n \tring = rte_zmalloc_socket(\"bnxt_rx_ring_struct\",\n \t\t\t\t   sizeof(struct bnxt_ring),\ndiff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c\nindex 5a7bfaf3e..4b31e9ebf 100644\n--- a/drivers/net/bnxt/bnxt_txq.c\n+++ b/drivers/net/bnxt/bnxt_txq.c\n@@ -134,7 +134,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \n \t/* Allocate TX ring hardware descriptors */\n \tif (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,\n-\t\t\t\"txr\")) {\n+\t\t\ttxq->nq_ring, \"txr\")) {\n \t\tPMD_DRV_LOG(ERR, \"ring_dma_zone_reserve for tx_ring failed!\");\n \t\tbnxt_tx_queue_release_op(txq);\n \t\trc = -ENOMEM;\ndiff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h\nindex a0d4678d9..9190e3f73 100644\n--- a/drivers/net/bnxt/bnxt_txq.h\n+++ b/drivers/net/bnxt/bnxt_txq.h\n@@ -32,6 +32,7 @@ struct bnxt_tx_queue {\n \n \tunsigned int\t\tcp_nr_rings;\n \tstruct bnxt_cp_ring_info\t*cp_ring;\n+\tstruct bnxt_cp_ring_info        *nq_ring;\n \tconst struct rte_memzone *mz;\n \tstruct rte_mbuf **free;\n };\ndiff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c\nindex 26dd384a6..f34688b44 100644\n--- a/drivers/net/bnxt/bnxt_txr.c\n+++ b/drivers/net/bnxt/bnxt_txr.c\n@@ -57,6 +57,7 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)\n int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)\n {\n \tstruct bnxt_cp_ring_info *cpr;\n+\tstruct bnxt_cp_ring_info *nqr;\n \tstruct bnxt_tx_ring_info *txr;\n \tstruct bnxt_ring *ring;\n \n@@ -100,6 +101,30 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)\n \tring->vmem_size = 0;\n \tring->vmem = NULL;\n \n+\tif (BNXT_HAS_NQ(txq->bp)) {\n+\t\tnqr = rte_zmalloc_socket(\"bnxt_tx_ring_nq\",\n+\t\t\t\t\t sizeof(struct bnxt_cp_ring_info),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (nqr == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\ttxq->nq_ring = nqr;\n+\n+\t\tring = rte_zmalloc_socket(\"bnxt_tx_ring_struct\",\n+\t\t\t\t\t  sizeof(struct bnxt_ring),\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (ring == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tnqr->cp_ring_struct = ring;\n+\t\tring->ring_size = txr->tx_ring_struct->ring_size;\n+\t\tring->ring_mask = ring->ring_size - 1;\n+\t\tring->bd = (void *)nqr->cp_desc_ring;\n+\t\tring->bd_dma = nqr->cp_desc_mapping;\n+\t\tring->vmem_size = 0;\n+\t\tring->vmem = NULL;\n+\t}\n+\n \treturn 0;\n }\n \n",
    "prefixes": [
        "09/11"
    ]
}