get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/80259/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 80259,
    "url": "http://patches.dpdk.org/api/patches/80259/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201010041153.63921-5-ajit.khaparde@broadcom.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201010041153.63921-5-ajit.khaparde@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201010041153.63921-5-ajit.khaparde@broadcom.com",
    "date": "2020-10-10T04:11:45",
    "name": "[v2,04/12] net/bnxt: fix PMD PF support in SR-IOV mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "58035e8a87d37ff556e603bf0cfcf899843a3608",
    "submitter": {
        "id": 501,
        "url": "http://patches.dpdk.org/api/people/501/?format=api",
        "name": "Ajit Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "http://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201010041153.63921-5-ajit.khaparde@broadcom.com/mbox/",
    "series": [
        {
            "id": 12854,
            "url": "http://patches.dpdk.org/api/series/12854/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12854",
            "date": "2020-10-10T04:11:42",
            "name": "[v2,01/12] net/bnxt: fix the corruption of the session details",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/12854/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/80259/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/80259/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 587F1A04BC;\n\tSat, 10 Oct 2020 06:13:30 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AAB7A1D6D4;\n\tSat, 10 Oct 2020 06:13:08 +0200 (CEST)",
            "from mail-pf1-f226.google.com (mail-pf1-f226.google.com\n [209.85.210.226]) by dpdk.org (Postfix) with ESMTP id 5C8A91D69C\n for <dev@dpdk.org>; Sat, 10 Oct 2020 06:13:06 +0200 (CEST)",
            "by mail-pf1-f226.google.com with SMTP id x13so5922022pfa.9\n for <dev@dpdk.org>; Fri, 09 Oct 2020 21:13:06 -0700 (PDT)",
            "from localhost.localdomain ([192.19.223.252])\n by smtp-relay.gmail.com with ESMTPS id b4sm1597022pjn.3.2020.10.09.21.13.03\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Fri, 09 Oct 2020 21:13:04 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=H49uHC+ovBJNcWDLUzyV16ecJUPkdKoVHPVznFUY7Cs=;\n b=OxWgXTmzLQmB5ZSFDxdkfKzBAuny6NnKuoqZxZSBUbUiN3Svt08Pq/LiVqFBDwq26U\n yGX+NUXkJcVkUjdYbk3jFhYFSeuTGuX19ehEQOj2d9EL+OD7eBIQkpnx4WPQxUJ5MS6M\n R/P8sbLyY2oZaWSyletG+L5tkYefyqNmsZuSY=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=H49uHC+ovBJNcWDLUzyV16ecJUPkdKoVHPVznFUY7Cs=;\n b=ibtHkv4MnhsMIddSn6csWeL+W4ll8PXZ1em12gKVcc/lTDkJ6DZyywnQ6qzJTrBDC4\n 8ngxhoCYkpC1Fk1SbVbLGks76CB0wmuZd9XBH2xCBiNWzyV3MO0KJGBhfXOcVGpqkJ4G\n e9SVC6FzVqPjQsfcj7Mrs3JLDD7voTIEB0Cv/Tqs/W6VWiEcqnrRTFD0lmafDcV0DaVQ\n EdSCa7xByHNd817V2lKgiyDVN7kcaMT68ckLO3+9eAs1yUwJd9lx0EaKwHRVchSEpAom\n +KsFgx5gnjoG5ggPdQFO+BdvstvQmFuvgqXvK+hP6HtPjF2KM9dxCxRnZZTfGoPVAjgH\n Lviw==",
        "X-Gm-Message-State": "AOAM533Bb1WtT5T+/I/5Id5S1IF6VCaY/dc49Mhw0X/mmLWTE085iRS+\n ZItg7KMAjX+1u55K4cmRigtMxqKz1wTXdQUKrLCs03OsJ0oWzQHh5pUe+/loiCRpMQQW3Rc2a0M\n hdbN1yOjLGmsHvydt+cMIbQVNN3fSguqlHXhZGGFXUqu5Br7luVyS9PWJvBz288Xg7fvTfxQz3E\n E2Ow==",
        "X-Google-Smtp-Source": "\n ABdhPJw78iZ8RQ4nrfH18uQzOZG366GGeqdxWevsEjpzYMM70zsKbzE2asexxXmRUehJX0z26kaOQYb0yY7i",
        "X-Received": "by 2002:a17:90b:378c:: with SMTP id\n mz12mr5837097pjb.137.1602303184357;\n Fri, 09 Oct 2020 21:13:04 -0700 (PDT)",
        "X-Relaying-Domain": "broadcom.com",
        "From": "Ajit Khaparde <ajit.khaparde@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>, stable@dpdk.org,\n Somnath Kotur <somnath.kotur@broadcom.com>",
        "Date": "Fri,  9 Oct 2020 21:11:45 -0700",
        "Message-Id": "<20201010041153.63921-5-ajit.khaparde@broadcom.com>",
        "X-Mailer": "git-send-email 2.21.1 (Apple Git-122.3)",
        "In-Reply-To": "<20201010041153.63921-1-ajit.khaparde@broadcom.com>",
        "References": "<20201009111130.10422-1-somnath.kotur@broadcom.com>\n <20201010041153.63921-1-ajit.khaparde@broadcom.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV\n\tmode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>\n\n1. Implement HWRM_FUNC_VF_RESOURCE_CFG command and use it to\n   reserve resources for VFs when NEW RM is enabled.\n2. Invoke PF’s FUNC_CFG before configuring VFs resources.\n3. Don’t consider max_rx_em_flows in max_l2_ctx calculation\n   when VFs are configured.\n4. Issue HWRM_FUNC_QCFG instead of HWRM_FUNC_QCAPS to find\n   out the actual allocated resources for VF.\n5. Don’t add random mac to the VF.\n6. Handle completion type CMPL_BASE_TYPE_HWRM_FWD_REQ instead\n   of CMPL_BASE_TYPE_HWRM_FWD_RESP.\n7. Don't enable HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE\n   when the list of HWRM commands that needs to be forwarded\n   to the PF is specified in HWRM_FUNC_DRV_RGTR.\n8. Update the HWRM commands list that can be forwared to the\n   PF.\n\nFixes: b7778e8a1c00 (\"net/bnxt: refactor to properly allocate resources for PF/VF\")\nCc: stable@dpdk.org\n\nSigned-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>\nReviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>\nReviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\n---\n drivers/net/bnxt/bnxt.h        |   6 +-\n drivers/net/bnxt/bnxt_cpr.c    |   6 +-\n drivers/net/bnxt/bnxt_ethdev.c |  40 +--\n drivers/net/bnxt/bnxt_hwrm.c   | 461 ++++++++++++++++++++-------------\n drivers/net/bnxt/bnxt_hwrm.h   |  12 +-\n 5 files changed, 309 insertions(+), 216 deletions(-)",
    "diff": "diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h\nindex eca74486e..a951bca7a 100644\n--- a/drivers/net/bnxt/bnxt.h\n+++ b/drivers/net/bnxt/bnxt.h\n@@ -167,6 +167,9 @@\n #define\tBNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT\t\t\\\n \tHWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT\n \n+#define BNXT_HWRM_CMD_TO_FORWARD(cmd)\t\\\n+\t\t(bp->pf->vf_req_fwd[(cmd) / 32] |= (1 << ((cmd) % 32)))\n+\n struct bnxt_led_info {\n \tuint8_t\t     num_leds;\n \tuint8_t      led_id;\n@@ -664,9 +667,10 @@ struct bnxt {\n #define BNXT_FW_CAP_IF_CHANGE\t\tBIT(1)\n #define BNXT_FW_CAP_ERROR_RECOVERY\tBIT(2)\n #define BNXT_FW_CAP_ERR_RECOVER_RELOAD\tBIT(3)\n+#define BNXT_FW_CAP_HCOMM_FW_STATUS\tBIT(4)\n #define BNXT_FW_CAP_ADV_FLOW_MGMT\tBIT(5)\n #define BNXT_FW_CAP_ADV_FLOW_COUNTERS\tBIT(6)\n-#define BNXT_FW_CAP_HCOMM_FW_STATUS\tBIT(7)\n+#define BNXT_FW_CAP_LINK_ADMIN\t\tBIT(7)\n \n \tpthread_mutex_t         flow_lock;\n \ndiff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c\nindex a3a7e6ab7..54923948f 100644\n--- a/drivers/net/bnxt/bnxt_cpr.c\n+++ b/drivers/net/bnxt/bnxt_cpr.c\n@@ -239,7 +239,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)\n \t\tgoto reject;\n \t}\n \n-\tif (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {\n+\tif (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {\n \t\t/*\n \t\t * In older firmware versions, the MAC had to be all zeros for\n \t\t * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all\n@@ -254,6 +254,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)\n \t\t\t\t(const uint8_t *)\"\\x00\\x00\\x00\\x00\\x00\");\n \t\t\t}\n \t\t}\n+\n \t\tif (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {\n \t\t\tstruct hwrm_cfa_l2_set_rx_mask_input *srm =\n \t\t\t\t\t\t\t(void *)fwd_cmd;\n@@ -265,6 +266,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)\n \t\t\t    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |\n \t\t\t    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);\n \t\t}\n+\n \t\t/* Forward */\n \t\trc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);\n \t\tif (rc) {\n@@ -306,7 +308,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)\n \t\tbnxt_handle_async_event(bp, cmp);\n \t\tevt = 1;\n \t\tbreak;\n-\tcase CMPL_BASE_TYPE_HWRM_FWD_RESP:\n+\tcase CMPL_BASE_TYPE_HWRM_FWD_REQ:\n \t\t/* Handle HWRM forwarded responses */\n \t\tbnxt_handle_fwd_req(bp, cmp);\n \t\tevt = 1;\ndiff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c\nindex 8b63134c3..b4654ec6a 100644\n--- a/drivers/net/bnxt/bnxt_ethdev.c\n+++ b/drivers/net/bnxt/bnxt_ethdev.c\n@@ -5208,37 +5208,14 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp)\n \tif (!BNXT_PF(bp))\n \t\treturn;\n \n-#define ALLOW_FUNC(x)\t\\\n-\t{ \\\n-\t\tuint32_t arg = (x); \\\n-\t\tbp->pf->vf_req_fwd[((arg) >> 5)] &= \\\n-\t\t~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \\\n-\t}\n-\n-\t/* Forward all requests if firmware is new enough */\n-\tif (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&\n-\t     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||\n-\t    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {\n-\t\tmemset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));\n-\t} else {\n-\t\tPMD_DRV_LOG(WARNING,\n-\t\t\t    \"Firmware too old for VF mailbox functionality\\n\");\n-\t\tmemset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));\n-\t}\n+\tmemset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));\n \n-\t/*\n-\t * The following are used for driver cleanup. If we disallow these,\n-\t * VF drivers can't clean up cleanly.\n-\t */\n-\tALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);\n-\tALLOW_FUNC(HWRM_VNIC_FREE);\n-\tALLOW_FUNC(HWRM_RING_FREE);\n-\tALLOW_FUNC(HWRM_RING_GRP_FREE);\n-\tALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);\n-\tALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);\n-\tALLOW_FUNC(HWRM_STAT_CTX_FREE);\n-\tALLOW_FUNC(HWRM_PORT_PHY_QCFG);\n-\tALLOW_FUNC(HWRM_VNIC_TPA_CFG);\n+\tif (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))\n+\t\tBNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);\n+\tBNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);\n+\tBNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);\n+\tBNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);\n+\tBNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);\n }\n \n uint16_t\n@@ -6189,7 +6166,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)\n \n \tbnxt_free_int(bp);\n \tbnxt_free_mem(bp, reconfig_dev);\n+\n \tbnxt_hwrm_func_buf_unrgtr(bp);\n+\trte_free(bp->pf->vf_req_buf);\n+\n \trc = bnxt_hwrm_func_driver_unregister(bp, 0);\n \tbp->flags &= ~BNXT_FLAG_REGISTERED;\n \tbnxt_free_ctx_mem(bp);\ndiff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c\nindex faeaf4b5d..8133afc74 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.c\n+++ b/drivers/net/bnxt/bnxt_hwrm.c\n@@ -765,7 +765,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)\n \tbp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);\n \tbp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);\n \tbp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);\n-\tif (!BNXT_CHIP_THOR(bp))\n+\tif (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)\n \t\tbp->max_l2_ctx += bp->max_rx_em_flows;\n \t/* TODO: For now, do not support VMDq/RFS on VFs. */\n \tif (BNXT_PF(bp)) {\n@@ -803,6 +803,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)\n \tif (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)\n \t\tbp->fw_cap |= BNXT_FW_CAP_HOT_RESET;\n \n+\tif (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)\n+\t\tbp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;\n+\n \tHWRM_UNLOCK();\n \n \treturn rc;\n@@ -818,16 +821,15 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)\n \t\tif (rc)\n \t\t\treturn rc;\n \n+\t\t/* On older FW,\n+\t\t * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.\n+\t\t * But the error can be ignored. Return success.\n+\t\t */\n \t\trc = bnxt_hwrm_func_resc_qcaps(bp);\n \t\tif (!rc)\n \t\t\tbp->flags |= BNXT_FLAG_NEW_RM;\n \t}\n \n-\t/* On older FW,\n-\t * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.\n-\t * But the error can be ignored. Return success.\n-\t */\n-\n \treturn 0;\n }\n \n@@ -916,14 +918,6 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)\n \t\tmemcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,\n \t\t       RTE_MIN(sizeof(req.vf_req_fwd),\n \t\t\t       sizeof(bp->pf->vf_req_fwd)));\n-\n-\t\t/*\n-\t\t * PF can sniff HWRM API issued by VF. This can be set up by\n-\t\t * linux driver and inherited by the DPDK PF driver. Clear\n-\t\t * this HWRM sniffer list in FW because DPDK PF driver does\n-\t\t * not support this.\n-\t\t */\n-\t\tflags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;\n \t}\n \n \treq.flags = rte_cpu_to_le_32(flags);\n@@ -1052,21 +1046,19 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)\n \n \tHWRM_CHECK_RESULT_SILENT();\n \n-\tif (BNXT_VF(bp)) {\n-\t\tbp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);\n-\t\tbp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);\n-\t\tbp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);\n-\t\tbp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);\n-\t\tbp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);\n-\t\t/* func_resource_qcaps does not return max_rx_em_flows.\n-\t\t * So use the value provided by func_qcaps.\n-\t\t */\n-\t\tbp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);\n-\t\tif (!BNXT_CHIP_THOR(bp))\n-\t\t\tbp->max_l2_ctx += bp->max_rx_em_flows;\n-\t\tbp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);\n-\t\tbp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);\n-\t}\n+\tbp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);\n+\tbp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);\n+\tbp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);\n+\tbp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);\n+\tbp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);\n+\t/* func_resource_qcaps does not return max_rx_em_flows.\n+\t * So use the value provided by func_qcaps.\n+\t */\n+\tbp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);\n+\tif (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)\n+\t\tbp->max_l2_ctx += bp->max_rx_em_flows;\n+\tbp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);\n+\tbp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);\n \tbp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);\n \tbp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);\n \tif (bp->vf_resv_strategy >\n@@ -3300,33 +3292,8 @@ int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)\n \treturn 0;\n }\n \n-static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,\n-\t\t\t\t   struct hwrm_func_qcaps_output *qcaps)\n-{\n-\tqcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;\n-\tmemcpy(qcaps->mac_address, fcfg->dflt_mac_addr,\n-\t       sizeof(qcaps->mac_address));\n-\tqcaps->max_l2_ctxs = fcfg->num_l2_ctxs;\n-\tqcaps->max_rx_rings = fcfg->num_rx_rings;\n-\tqcaps->max_tx_rings = fcfg->num_tx_rings;\n-\tqcaps->max_cmpl_rings = fcfg->num_cmpl_rings;\n-\tqcaps->max_stat_ctx = fcfg->num_stat_ctxs;\n-\tqcaps->max_vfs = 0;\n-\tqcaps->first_vf_id = 0;\n-\tqcaps->max_vnics = fcfg->num_vnics;\n-\tqcaps->max_decap_records = 0;\n-\tqcaps->max_encap_records = 0;\n-\tqcaps->max_tx_wm_flows = 0;\n-\tqcaps->max_tx_em_flows = 0;\n-\tqcaps->max_rx_wm_flows = 0;\n-\tqcaps->max_rx_em_flows = 0;\n-\tqcaps->max_flow_id = 0;\n-\tqcaps->max_mcast_filters = fcfg->num_mcast_filters;\n-\tqcaps->max_sp_tx_rings = 0;\n-\tqcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;\n-}\n-\n-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,\n+\t\t\t\t struct bnxt_pf_resource_info *pf_resc)\n {\n \tstruct hwrm_func_cfg_input req = {0};\n \tstruct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n@@ -3345,7 +3312,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n \n \tif (BNXT_HAS_RING_GRPS(bp)) {\n \t\tenables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;\n-\t\treq.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);\n+\t\treq.num_hw_ring_grps =\n+\t\t\trte_cpu_to_le_16(pf_resc->num_hw_ring_grps);\n \t} else if (BNXT_HAS_NQ(bp)) {\n \t\tenables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;\n \t\treq.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);\n@@ -3354,12 +3322,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n \treq.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);\n \treq.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);\n \treq.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));\n-\treq.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);\n-\treq.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);\n-\treq.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);\n-\treq.num_tx_rings = rte_cpu_to_le_16(tx_rings);\n-\treq.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);\n-\treq.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);\n+\treq.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);\n+\treq.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);\n+\treq.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);\n+\treq.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);\n+\treq.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);\n+\treq.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);\n \treq.num_vnics = rte_cpu_to_le_16(bp->max_vnics);\n \treq.fid = rte_cpu_to_le_16(0xffff);\n \treq.enables = rte_cpu_to_le_32(enables);\n@@ -3374,9 +3342,43 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)\n \treturn rc;\n }\n \n-static void populate_vf_func_cfg_req(struct bnxt *bp,\n-\t\t\t\t     struct hwrm_func_cfg_input *req,\n-\t\t\t\t     int num_vfs)\n+/* min values are the guaranteed resources and max values are subject\n+ * to availability. The strategy for now is to keep both min & max\n+ * values the same.\n+ */\n+static void\n+bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,\n+\t\t\t      struct hwrm_func_vf_resource_cfg_input *req,\n+\t\t\t      int num_vfs)\n+{\n+\treq->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /\n+\t\t\t\t\t       (num_vfs + 1));\n+\treq->min_rsscos_ctx = req->max_rsscos_ctx;\n+\treq->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));\n+\treq->min_stat_ctx = req->max_stat_ctx;\n+\treq->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /\n+\t\t\t\t\t       (num_vfs + 1));\n+\treq->min_cmpl_rings = req->max_cmpl_rings;\n+\treq->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));\n+\treq->min_tx_rings = req->max_tx_rings;\n+\treq->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));\n+\treq->min_rx_rings = req->max_rx_rings;\n+\treq->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));\n+\treq->min_l2_ctxs = req->max_l2_ctxs;\n+\t/* TODO: For now, do not support VMDq/RFS on VFs. */\n+\treq->max_vnics = rte_cpu_to_le_16(1);\n+\treq->min_vnics = req->max_vnics;\n+\treq->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /\n+\t\t\t\t\t\t (num_vfs + 1));\n+\treq->min_hw_ring_grps = req->max_hw_ring_grps;\n+\treq->flags =\n+\t rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);\n+}\n+\n+static void\n+bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,\n+\t\t\t      struct hwrm_func_cfg_input *req,\n+\t\t\t      int num_vfs)\n {\n \treq->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |\n \t\t\tHWRM_FUNC_CFG_INPUT_ENABLES_MRU |\n@@ -3407,60 +3409,29 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,\n \t\t\t\t\t\t (num_vfs + 1));\n }\n \n-static void add_random_mac_if_needed(struct bnxt *bp,\n-\t\t\t\t     struct hwrm_func_cfg_input *cfg_req,\n-\t\t\t\t     int vf)\n-{\n-\tstruct rte_ether_addr mac;\n-\n-\tif (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))\n-\t\treturn;\n-\n-\tif (memcmp(mac.addr_bytes, \"\\x00\\x00\\x00\\x00\\x00\", 6) == 0) {\n-\t\tcfg_req->enables |=\n-\t\trte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);\n-\t\trte_eth_random_addr(cfg_req->dflt_mac_addr);\n-\t\tbp->pf->vf_info[vf].random_mac = true;\n-\t} else {\n-\t\tmemcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,\n-\t\t\tRTE_ETHER_ADDR_LEN);\n-\t}\n-}\n-\n-static int reserve_resources_from_vf(struct bnxt *bp,\n-\t\t\t\t     struct hwrm_func_cfg_input *cfg_req,\n+/* Update the port wide resource values based on how many resources\n+ * got allocated to the VF.\n+ */\n+static int bnxt_update_max_resources(struct bnxt *bp,\n \t\t\t\t     int vf)\n {\n-\tstruct hwrm_func_qcaps_input req = {0};\n-\tstruct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;\n+\tstruct hwrm_func_qcfg_input req = {0};\n+\tstruct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;\n \tint rc;\n \n \t/* Get the actual allocated values now */\n-\tHWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);\n+\tHWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);\n \treq.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);\n \trc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);\n+\tHWRM_CHECK_RESULT();\n \n-\tif (rc) {\n-\t\tPMD_DRV_LOG(ERR, \"hwrm_func_qcaps failed rc:%d\\n\", rc);\n-\t\tcopy_func_cfg_to_qcaps(cfg_req, resp);\n-\t} else if (resp->error_code) {\n-\t\trc = rte_le_to_cpu_16(resp->error_code);\n-\t\tPMD_DRV_LOG(ERR, \"hwrm_func_qcaps error %d\\n\", rc);\n-\t\tcopy_func_cfg_to_qcaps(cfg_req, resp);\n-\t}\n-\n-\tbp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);\n-\tbp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);\n-\tbp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);\n-\tbp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);\n-\tbp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);\n-\tbp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);\n-\t/*\n-\t * TODO: While not supporting VMDq with VFs, max_vnics is always\n-\t * forced to 1 in this case\n-\t */\n-\t//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);\n-\tbp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);\n+\tbp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);\n+\tbp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);\n+\tbp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);\n+\tbp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);\n+\tbp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);\n+\tbp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);\n+\tbp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);\n \n \tHWRM_UNLOCK();\n \n@@ -3485,7 +3456,8 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)\n \treturn rc;\n }\n \n-static int update_pf_resource_max(struct bnxt *bp)\n+static int bnxt_query_pf_resources(struct bnxt *bp,\n+\t\t\t\t   struct bnxt_pf_resource_info *pf_resc)\n {\n \tstruct hwrm_func_qcfg_input req = {0};\n \tstruct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;\n@@ -3497,8 +3469,13 @@ static int update_pf_resource_max(struct bnxt *bp)\n \trc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);\n \tHWRM_CHECK_RESULT();\n \n-\t/* Only TX ring value reflects actual allocation? TODO */\n-\tbp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);\n+\tpf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);\n+\tpf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);\n+\tpf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);\n+\tpf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);\n+\tpf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);\n+\tpf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);\n+\tpf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);\n \tbp->pf->evb_mode = resp->evb_mode;\n \n \tHWRM_UNLOCK();\n@@ -3506,8 +3483,42 @@ static int update_pf_resource_max(struct bnxt *bp)\n \treturn rc;\n }\n \n+static void\n+bnxt_calculate_pf_resources(struct bnxt *bp,\n+\t\t\t    struct bnxt_pf_resource_info *pf_resc,\n+\t\t\t    int num_vfs)\n+{\n+\tif (!num_vfs) {\n+\t\tpf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;\n+\t\tpf_resc->num_stat_ctxs = bp->max_stat_ctx;\n+\t\tpf_resc->num_cp_rings = bp->max_cp_rings;\n+\t\tpf_resc->num_tx_rings = bp->max_tx_rings;\n+\t\tpf_resc->num_rx_rings = bp->max_rx_rings;\n+\t\tpf_resc->num_l2_ctxs = bp->max_l2_ctx;\n+\t\tpf_resc->num_hw_ring_grps = bp->max_ring_grps;\n+\n+\t\treturn;\n+\t}\n+\n+\tpf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +\n+\t\t\t\t   bp->max_rsscos_ctx % (num_vfs + 1);\n+\tpf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +\n+\t\t\t\t bp->max_stat_ctx % (num_vfs + 1);\n+\tpf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +\n+\t\t\t\tbp->max_cp_rings % (num_vfs + 1);\n+\tpf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +\n+\t\t\t\tbp->max_tx_rings % (num_vfs + 1);\n+\tpf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +\n+\t\t\t\tbp->max_rx_rings % (num_vfs + 1);\n+\tpf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +\n+\t\t\t       bp->max_l2_ctx % (num_vfs + 1);\n+\tpf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +\n+\t\t\t\t    bp->max_ring_grps % (num_vfs + 1);\n+}\n+\n int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)\n {\n+\tstruct bnxt_pf_resource_info pf_resc = { 0 };\n \tint rc;\n \n \tif (!BNXT_PF(bp)) {\n@@ -3519,82 +3530,100 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)\n \tif (rc)\n \t\treturn rc;\n \n+\tbnxt_calculate_pf_resources(bp, &pf_resc, 0);\n+\n \tbp->pf->func_cfg_flags &=\n \t\t~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |\n \t\t  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);\n \tbp->pf->func_cfg_flags |=\n \t\tHWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;\n-\trc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);\n+\trc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);\n \trc = __bnxt_hwrm_func_qcaps(bp);\n \treturn rc;\n }\n \n-int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)\n+static int\n+bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)\n {\n-\tstruct hwrm_func_cfg_input req = {0};\n-\tstruct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n-\tint i;\n-\tsize_t sz;\n-\tint rc = 0;\n-\tsize_t req_buf_sz;\n-\n-\tif (!BNXT_PF(bp)) {\n-\t\tPMD_DRV_LOG(ERR, \"Attempt to allcoate VFs on a VF!\\n\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\trc = bnxt_hwrm_func_qcaps(bp);\n-\n-\tif (rc)\n-\t\treturn rc;\n-\n-\tbp->pf->active_vfs = num_vfs;\n-\n-\t/*\n-\t * First, configure the PF to only use one TX ring.  This ensures that\n-\t * there are enough rings for all VFs.\n-\t *\n-\t * If we don't do this, when we call func_alloc() later, we will lock\n-\t * extra rings to the PF that won't be available during func_cfg() of\n-\t * the VFs.\n-\t *\n-\t * This has been fixed with firmware versions above 20.6.54\n-\t */\n-\tbp->pf->func_cfg_flags &=\n-\t\t~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |\n-\t\t  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);\n-\tbp->pf->func_cfg_flags |=\n-\t\tHWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;\n-\trc = bnxt_hwrm_pf_func_cfg(bp, 1);\n-\tif (rc)\n-\t\treturn rc;\n+\tsize_t req_buf_sz, sz;\n+\tint i, rc;\n \n-\t/*\n-\t * Now, create and register a buffer to hold forwarded VF requests\n-\t */\n \treq_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;\n \tbp->pf->vf_req_buf = rte_malloc(\"bnxt_vf_fwd\", req_buf_sz,\n \t\tpage_roundup(num_vfs * HWRM_MAX_REQ_LEN));\n \tif (bp->pf->vf_req_buf == NULL) {\n-\t\trc = -ENOMEM;\n-\t\tgoto error_free;\n+\t\treturn -ENOMEM;\n \t}\n+\n \tfor (sz = 0; sz < req_buf_sz; sz += getpagesize())\n \t\trte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);\n+\n \tfor (i = 0; i < num_vfs; i++)\n \t\tbp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +\n-\t\t\t\t\t(i * HWRM_MAX_REQ_LEN);\n+\t\t\t\t\t     (i * HWRM_MAX_REQ_LEN);\n \n-\trc = bnxt_hwrm_func_buf_rgtr(bp);\n+\trc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);\n \tif (rc)\n-\t\tgoto error_free;\n+\t\trte_free(bp->pf->vf_req_buf);\n+\n+\treturn rc;\n+}\n \n-\tpopulate_vf_func_cfg_req(bp, &req, num_vfs);\n+static int\n+bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)\n+{\n+\tstruct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n+\tstruct hwrm_func_vf_resource_cfg_input req = {0};\n+\tint i, rc = 0;\n \n+\tbnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);\n \tbp->pf->active_vfs = 0;\n \tfor (i = 0; i < num_vfs; i++) {\n-\t\tadd_random_mac_if_needed(bp, &req, i);\n+\t\tHWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);\n+\t\treq.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);\n+\t\trc = bnxt_hwrm_send_message(bp,\n+\t\t\t\t\t    &req,\n+\t\t\t\t\t    sizeof(req),\n+\t\t\t\t\t    BNXT_USE_CHIMP_MB);\n+\t\tif (rc || resp->error_code) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"Failed to initialize VF %d\\n\", i);\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"Not all VFs available. (%d, %d)\\n\",\n+\t\t\t\trc, resp->error_code);\n+\t\t\tHWRM_UNLOCK();\n+\n+\t\t\t/* If the first VF configuration itself fails,\n+\t\t\t * unregister the vf_fwd_request buffer.\n+\t\t\t */\n+\t\t\tif (i == 0)\n+\t\t\t\tbnxt_hwrm_func_buf_unrgtr(bp);\n+\t\t\tbreak;\n+\t\t}\n+\t\tHWRM_UNLOCK();\n+\n+\t\t/* Update the max resource values based on the resource values\n+\t\t * allocated to the VF.\n+\t\t */\n+\t\tbnxt_update_max_resources(bp, i);\n+\t\tbp->pf->active_vfs++;\n+\t\tbnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)\n+{\n+\tstruct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;\n+\tstruct hwrm_func_cfg_input req = {0};\n+\tint i, rc;\n \n+\tbnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);\n+\n+\tbp->pf->active_vfs = 0;\n+\tfor (i = 0; i < num_vfs; i++) {\n \t\tHWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);\n \t\treq.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);\n \t\treq.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);\n@@ -3609,40 +3638,107 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)\n \n \t\tif (rc || resp->error_code) {\n \t\t\tPMD_DRV_LOG(ERR,\n-\t\t\t\t\"Failed to initizlie VF %d\\n\", i);\n+\t\t\t\t\"Failed to initialize VF %d\\n\", i);\n \t\t\tPMD_DRV_LOG(ERR,\n \t\t\t\t\"Not all VFs available. (%d, %d)\\n\",\n \t\t\t\trc, resp->error_code);\n \t\t\tHWRM_UNLOCK();\n+\n+\t\t\t/* If the first VF configuration itself fails,\n+\t\t\t * unregister the vf_fwd_request buffer.\n+\t\t\t */\n+\t\t\tif (i == 0)\n+\t\t\t\tbnxt_hwrm_func_buf_unrgtr(bp);\n \t\t\tbreak;\n \t\t}\n \n \t\tHWRM_UNLOCK();\n \n-\t\treserve_resources_from_vf(bp, &req, i);\n+\t\t/* Update the max resource values based on the resource values\n+\t\t * allocated to the VF.\n+\t\t */\n+\t\tbnxt_update_max_resources(bp, i);\n \t\tbp->pf->active_vfs++;\n \t\tbnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);\n \t}\n \n+\treturn 0;\n+}\n+\n+static void\n+bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)\n+{\n+\tif (bp->flags & BNXT_FLAG_NEW_RM)\n+\t\tbnxt_process_vf_resc_config_new(bp, num_vfs);\n+\telse\n+\t\tbnxt_process_vf_resc_config_old(bp, num_vfs);\n+}\n+\n+static void\n+bnxt_update_pf_resources(struct bnxt *bp,\n+\t\t\t struct bnxt_pf_resource_info *pf_resc)\n+{\n+\tbp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;\n+\tbp->max_stat_ctx = pf_resc->num_stat_ctxs;\n+\tbp->max_cp_rings = pf_resc->num_cp_rings;\n+\tbp->max_tx_rings = pf_resc->num_tx_rings;\n+\tbp->max_rx_rings = pf_resc->num_rx_rings;\n+\tbp->max_ring_grps = pf_resc->num_hw_ring_grps;\n+}\n+\n+static int32_t\n+bnxt_configure_pf_resources(struct bnxt *bp,\n+\t\t\t    struct bnxt_pf_resource_info *pf_resc)\n+{\n \t/*\n-\t * Now configure the PF to use \"the rest\" of the resources\n-\t * We're using STD_TX_RING_MODE here though which will limit the TX\n-\t * rings.  This will allow QoS to function properly.  Not setting this\n+\t * We're using STD_TX_RING_MODE here which will limit the TX\n+\t * rings. This will allow QoS to function properly. Not setting this\n \t * will cause PF rings to break bandwidth settings.\n \t */\n-\trc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);\n+\tbp->pf->func_cfg_flags &=\n+\t\t~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |\n+\t\t  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);\n+\tbp->pf->func_cfg_flags |=\n+\t\tHWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;\n+\treturn bnxt_hwrm_pf_func_cfg(bp, pf_resc);\n+}\n+\n+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)\n+{\n+\tstruct bnxt_pf_resource_info pf_resc = { 0 };\n+\tint rc;\n+\n+\tif (!BNXT_PF(bp)) {\n+\t\tPMD_DRV_LOG(ERR, \"Attempt to allocate VFs on a VF!\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trc = bnxt_hwrm_func_qcaps(bp);\n \tif (rc)\n-\t\tgoto error_free;\n+\t\treturn rc;\n+\n+\tbnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);\n \n-\trc = update_pf_resource_max(bp);\n+\trc = bnxt_configure_pf_resources(bp, &pf_resc);\n \tif (rc)\n-\t\tgoto error_free;\n+\t\treturn rc;\n \n-\treturn rc;\n+\trc = bnxt_query_pf_resources(bp, &pf_resc);\n+\tif (rc)\n+\t\treturn rc;\n \n-error_free:\n-\tbnxt_hwrm_func_buf_unrgtr(bp);\n-\treturn rc;\n+\t/*\n+\t * Now, create and register a buffer to hold forwarded VF requests\n+\t */\n+\trc = bnxt_configure_vf_req_buf(bp, num_vfs);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tbnxt_configure_vf_resources(bp, num_vfs);\n+\n+\tbnxt_update_pf_resources(bp, &pf_resc);\n+\n+\treturn 0;\n }\n \n int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)\n@@ -3747,23 +3843,24 @@ int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)\n \treturn bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);\n }\n \n-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)\n+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)\n {\n-\tint rc = 0;\n-\tstruct hwrm_func_buf_rgtr_input req = {.req_type = 0 };\n \tstruct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;\n+\tstruct hwrm_func_buf_rgtr_input req = {.req_type = 0 };\n+\tint rc;\n \n \tHWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);\n \n \treq.req_buf_num_pages = rte_cpu_to_le_16(1);\n-\treq.req_buf_page_size = rte_cpu_to_le_16(\n-\t\t\t page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));\n+\treq.req_buf_page_size =\n+\t\trte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));\n \treq.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);\n \treq.req_buf_page_addr0 =\n \t\trte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));\n \tif (req.req_buf_page_addr0 == RTE_BAD_IOVA) {\n \t\tPMD_DRV_LOG(ERR,\n \t\t\t\"unable to map buffer address to physical memory\\n\");\n+\t\tHWRM_UNLOCK();\n \t\treturn -ENOMEM;\n \t}\n \ndiff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h\nindex e98b1fe41..a7fa7f66b 100644\n--- a/drivers/net/bnxt/bnxt_hwrm.h\n+++ b/drivers/net/bnxt/bnxt_hwrm.h\n@@ -107,6 +107,16 @@ enum bnxt_flow_dir {\n \tBNXT_DIR_MAX\n };\n \n+struct bnxt_pf_resource_info {\n+\tuint16_t num_rsscos_ctxs;\n+\tuint16_t num_stat_ctxs;\n+\tuint16_t num_tx_rings;\n+\tuint16_t num_rx_rings;\n+\tuint16_t num_cp_rings;\n+\tuint16_t num_l2_ctxs;\n+\tuint32_t num_hw_ring_grps;\n+};\n+\n #define BNXT_CTX_VAL_INVAL\t0xFFFF\n \n int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,\n@@ -127,7 +137,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,\n int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,\n \t\t\t      void *encaped, size_t ec_size);\n \n-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);\n+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs);\n int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);\n int bnxt_hwrm_func_driver_register(struct bnxt *bp);\n int bnxt_hwrm_func_qcaps(struct bnxt *bp);\n",
    "prefixes": [
        "v2",
        "04/12"
    ]
}