get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/64322/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 64322,
    "url": "http://patches.dpdk.org/api/patches/64322/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200109031559.63194-2-huwei013@chinasoftinc.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200109031559.63194-2-huwei013@chinasoftinc.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200109031559.63194-2-huwei013@chinasoftinc.com",
    "date": "2020-01-09T03:15:49",
    "name": "[01/11] net/hns3: support different numbered Rx and Tx queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a6c1cd06170dd4a8ac7cd89ab06d1d3ec6dd40a5",
    "submitter": {
        "id": 1537,
        "url": "http://patches.dpdk.org/api/people/1537/?format=api",
        "name": "Wei Hu (Xavier)",
        "email": "huwei013@chinasoftinc.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200109031559.63194-2-huwei013@chinasoftinc.com/mbox/",
    "series": [
        {
            "id": 8023,
            "url": "http://patches.dpdk.org/api/series/8023/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8023",
            "date": "2020-01-09T03:15:48",
            "name": "misc updates and fixes for hns3 PMD driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8023/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/64322/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/64322/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 95F5EA04F3;\n\tThu,  9 Jan 2020 04:25:06 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2C2C51DB62;\n\tThu,  9 Jan 2020 04:24:54 +0100 (CET)",
            "from incedge.chinasoftinc.com (unknown [114.113.233.8])\n by dpdk.org (Postfix) with ESMTP id 7ADEE1D40E\n for <dev@dpdk.org>; Thu,  9 Jan 2020 04:23:35 +0100 (CET)",
            "from mail.chinasoftinc.com (inccas002.ito.icss [10.168.0.52]) by\n incedge.chinasoftinc.com with ESMTP id DAr1xwW4GhWcNQzB (version=TLSv1\n cipher=ECDHE-RSA-AES256-SHA bits=256 verify=NO) for <dev@dpdk.org>;\n Thu, 09 Jan 2020 11:22:48 +0800 (CST)",
            "from localhost.localdomain (203.160.91.226) by INCCAS002.ito.icss\n (10.168.0.60) with Microsoft SMTP Server id 14.3.439.0; Thu, 9 Jan 2020\n 11:16:10 +0800"
        ],
        "X-ASG-Debug-ID": "1578539661-0a3dd116cf0458000e-TfluYd",
        "X-Barracuda-Envelope-From": "huwei013@chinasoftinc.com",
        "X-Barracuda-RBL-Trusted-Forwarder": [
            "10.168.0.52",
            "10.168.0.60"
        ],
        "X-ASG-Whitelist": "Client",
        "From": "\"Wei Hu (Xavier)\" <huwei013@chinasoftinc.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Thu, 9 Jan 2020 11:15:49 +0800",
        "X-ASG-Orig-Subj": "[PATCH 01/11] net/hns3: support different numbered Rx and Tx\n queues",
        "Message-ID": "<20200109031559.63194-2-huwei013@chinasoftinc.com>",
        "X-Mailer": "git-send-email 2.23.0",
        "In-Reply-To": "<20200109031559.63194-1-huwei013@chinasoftinc.com>",
        "References": "<20200109031559.63194-1-huwei013@chinasoftinc.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[203.160.91.226]",
        "X-Barracuda-Connect": "inccas002.ito.icss[10.168.0.52]",
        "X-Barracuda-Start-Time": "1578540167",
        "X-Barracuda-Encrypted": "ECDHE-RSA-AES256-SHA",
        "X-Barracuda-URL": "https://spam.chinasoftinc.com:443/cgi-mod/mark.cgi",
        "X-Virus-Scanned": "by bsmtpd at chinasoftinc.com",
        "X-Barracuda-Scan-Msg-Size": "47336",
        "Subject": "[dpdk-dev] [PATCH 01/11] net/hns3: support different numbered Rx\n\tand Tx queues",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"Wei Hu (Xavier)\" <xavier.huwei@huawei.com>\n\nHardware does not support individually enable/disable/reset the Tx or Rx\nqueue in hns3 network engine, driver must enable/disable/reset Tx and Rx\nqueues at the same time.\n\nCurrently, hns3 PMD driver does not support the scenarios as below:\n1) When calling the following function, the input parameter nb_rx_q and\n   nb_tx_q are not equal.\n     rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q,\n                      uint16_t nb_tx_q,\n\t\t      const struct rte_eth_conf *dev_conf);\n2) When calling the following functions to setup queues, the cumulatively\n   setupped Rx queues are not the same as the setupped Tx queues.\n     rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,,,);\n     rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,,,);\nHowever, these are common usage scenarios in some applications, such as,\nl3fwd, ip_ressmbly and OVS-DPDK, etc.\n\nThis patch adds support for this usage of these functions by setupping\nfake Tx or Rx queues to adjust numbers of Tx/Rx queues. But these fake\nqueues are imperceptible, and can not be used by upper applications.\n\nSigned-off-by: Huisong Li <lihuisong@huawei.com>\nSigned-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>\n---\n drivers/net/hns3/hns3_dcb.c       |  88 ++--\n drivers/net/hns3/hns3_dcb.h       |   4 +-\n drivers/net/hns3/hns3_ethdev.c    |  56 +--\n drivers/net/hns3/hns3_ethdev.h    |  16 +-\n drivers/net/hns3/hns3_ethdev_vf.c |  68 +--\n drivers/net/hns3/hns3_flow.c      |   9 +-\n drivers/net/hns3/hns3_rxtx.c      | 675 +++++++++++++++++++++++++-----\n drivers/net/hns3/hns3_rxtx.h      |  11 +\n 8 files changed, 734 insertions(+), 193 deletions(-)",
    "diff": "diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c\nindex 19235dfb9..369a40e6a 100644\n--- a/drivers/net/hns3/hns3_dcb.c\n+++ b/drivers/net/hns3/hns3_dcb.c\n@@ -578,17 +578,33 @@ hns3_dcb_pri_shaper_cfg(struct hns3_hw *hw)\n }\n \n void\n-hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)\n+hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)\n+{\n+\tuint16_t rx_qnum_per_tc;\n+\n+\trx_qnum_per_tc = nb_rx_q / hw->num_tc;\n+\trx_qnum_per_tc = RTE_MIN(hw->rss_size_max, rx_qnum_per_tc);\n+\tif (hw->alloc_rss_size != rx_qnum_per_tc) {\n+\t\thns3_info(hw, \"rss size changes from %u to %u\",\n+\t\t\t  hw->alloc_rss_size, rx_qnum_per_tc);\n+\t\thw->alloc_rss_size = rx_qnum_per_tc;\n+\t}\n+\thw->used_rx_queues = hw->num_tc * hw->alloc_rss_size;\n+}\n+\n+void\n+hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue)\n {\n \tstruct hns3_tc_queue_info *tc_queue;\n \tuint8_t i;\n \n+\thw->tx_qnum_per_tc = nb_queue / hw->num_tc;\n \tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n \t\ttc_queue = &hw->tc_queue[i];\n \t\tif (hw->hw_tc_map & BIT(i) && i < hw->num_tc) {\n \t\t\ttc_queue->enable = true;\n-\t\t\ttc_queue->tqp_offset = i * hw->alloc_rss_size;\n-\t\t\ttc_queue->tqp_count = hw->alloc_rss_size;\n+\t\t\ttc_queue->tqp_offset = i * hw->tx_qnum_per_tc;\n+\t\t\ttc_queue->tqp_count = hw->tx_qnum_per_tc;\n \t\t\ttc_queue->tc = i;\n \t\t} else {\n \t\t\t/* Set to default queue if TC is disable */\n@@ -598,30 +614,22 @@ hns3_tc_queue_mapping_cfg(struct hns3_hw *hw)\n \t\t\ttc_queue->tc = 0;\n \t\t}\n \t}\n+\thw->used_tx_queues = hw->num_tc * hw->tx_qnum_per_tc;\n }\n \n static void\n-hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t queue_num)\n+hns3_dcb_update_tc_queue_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,\n+\t\t\t\t uint16_t nb_tx_q)\n {\n \tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n \tstruct hns3_pf *pf = &hns->pf;\n-\tuint16_t tqpnum_per_tc;\n-\tuint16_t alloc_tqps;\n-\n-\talloc_tqps = RTE_MIN(hw->tqps_num, queue_num);\n-\thw->num_tc = RTE_MIN(alloc_tqps, hw->dcb_info.num_tc);\n-\ttqpnum_per_tc = RTE_MIN(hw->rss_size_max, alloc_tqps / hw->num_tc);\n \n-\tif (hw->alloc_rss_size != tqpnum_per_tc) {\n-\t\tPMD_INIT_LOG(INFO, \"rss size changes from %d to %d\",\n-\t\t\t     hw->alloc_rss_size, tqpnum_per_tc);\n-\t\thw->alloc_rss_size = tqpnum_per_tc;\n-\t}\n-\thw->alloc_tqps = hw->num_tc * hw->alloc_rss_size;\n+\thw->num_tc = hw->dcb_info.num_tc;\n+\thns3_set_rss_size(hw, nb_rx_q);\n+\thns3_tc_queue_mapping_cfg(hw, nb_tx_q);\n \n-\thns3_tc_queue_mapping_cfg(hw);\n-\n-\tmemcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);\n+\tif (!hns->is_vf)\n+\t\tmemcpy(pf->prio_tc, hw->dcb_info.prio_tc, HNS3_MAX_USER_PRIO);\n }\n \n int\n@@ -1309,20 +1317,35 @@ hns3_dcb_info_cfg(struct hns3_adapter *hns)\n \tfor (i = 0; i < HNS3_MAX_USER_PRIO; i++)\n \t\thw->dcb_info.prio_tc[i] = dcb_rx_conf->dcb_tc[i];\n \n-\thns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues);\n+\thns3_dcb_update_tc_queue_mapping(hw, hw->data->nb_rx_queues,\n+\t\t\t\t\t hw->data->nb_tx_queues);\n }\n \n-static void\n+static int\n hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)\n {\n \tstruct hns3_pf *pf = &hns->pf;\n \tstruct hns3_hw *hw = &hns->hw;\n+\tuint16_t nb_rx_q = hw->data->nb_rx_queues;\n+\tuint16_t nb_tx_q = hw->data->nb_tx_queues;\n \tuint8_t bit_map = 0;\n \tuint8_t i;\n \n \tif (pf->tx_sch_mode != HNS3_FLAG_TC_BASE_SCH_MODE &&\n \t    hw->dcb_info.num_pg != 1)\n-\t\treturn;\n+\t\treturn -EINVAL;\n+\n+\tif (nb_rx_q < num_tc) {\n+\t\thns3_err(hw, \"number of Rx queues(%d) is less than tcs(%d).\",\n+\t\t\t nb_rx_q, num_tc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (nb_tx_q < num_tc) {\n+\t\thns3_err(hw, \"number of Tx queues(%d) is less than tcs(%d).\",\n+\t\t\t nb_tx_q, num_tc);\n+\t\treturn -EINVAL;\n+\t}\n \n \t/* Currently not support uncontinuous tc */\n \thw->dcb_info.num_tc = num_tc;\n@@ -1333,10 +1356,10 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)\n \t\tbit_map = 1;\n \t\thw->dcb_info.num_tc = 1;\n \t}\n-\n \thw->hw_tc_map = bit_map;\n-\n \thns3_dcb_info_cfg(hns);\n+\n+\treturn 0;\n }\n \n static int\n@@ -1422,10 +1445,15 @@ hns3_dcb_configure(struct hns3_adapter *hns)\n \n \thns3_dcb_cfg_validate(hns, &num_tc, &map_changed);\n \tif (map_changed || rte_atomic16_read(&hw->reset.resetting)) {\n-\t\thns3_dcb_info_update(hns, num_tc);\n+\t\tret = hns3_dcb_info_update(hns, num_tc);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"dcb info update failed: %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n \t\tret = hns3_dcb_hw_configure(hns);\n \t\tif (ret) {\n-\t\t\thns3_err(hw, \"dcb sw configure fails: %d\", ret);\n+\t\t\thns3_err(hw, \"dcb sw configure failed: %d\", ret);\n \t\t\treturn ret;\n \t\t}\n \t}\n@@ -1479,7 +1507,8 @@ hns3_dcb_init(struct hns3_hw *hw)\n \t\t\thns3_err(hw, \"dcb info init failed: %d\", ret);\n \t\t\treturn ret;\n \t\t}\n-\t\thns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num);\n+\t\thns3_dcb_update_tc_queue_mapping(hw, hw->tqps_num,\n+\t\t\t\t\t\t hw->tqps_num);\n \t}\n \n \t/*\n@@ -1502,10 +1531,11 @@ static int\n hns3_update_queue_map_configure(struct hns3_adapter *hns)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n-\tuint16_t queue_num = hw->data->nb_rx_queues;\n+\tuint16_t nb_rx_q = hw->data->nb_rx_queues;\n+\tuint16_t nb_tx_q = hw->data->nb_tx_queues;\n \tint ret;\n \n-\thns3_dcb_update_tc_queue_mapping(hw, queue_num);\n+\thns3_dcb_update_tc_queue_mapping(hw, nb_rx_q, nb_tx_q);\n \tret = hns3_q_to_qs_map(hw);\n \tif (ret) {\n \t\thns3_err(hw, \"failed to map nq to qs! ret = %d\", ret);\ndiff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h\nindex 9ec4e704b..9c2c5f21c 100644\n--- a/drivers/net/hns3/hns3_dcb.h\n+++ b/drivers/net/hns3/hns3_dcb.h\n@@ -159,7 +159,9 @@ hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);\n int\n hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf);\n \n-void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw);\n+void hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q);\n+\n+void hns3_tc_queue_mapping_cfg(struct hns3_hw *hw, uint16_t nb_queue);\n \n int hns3_dcb_cfg_update(struct hns3_adapter *hns);\n \ndiff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c\nindex 49aef7dbc..800fa47cc 100644\n--- a/drivers/net/hns3/hns3_ethdev.c\n+++ b/drivers/net/hns3/hns3_ethdev.c\n@@ -2061,10 +2061,11 @@ hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,\n static int\n hns3_dev_configure(struct rte_eth_dev *dev)\n {\n-\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n-\tstruct hns3_rss_conf *rss_cfg = &hw->rss_info;\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n \tstruct rte_eth_conf *conf = &dev->data->dev_conf;\n \tenum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_rss_conf *rss_cfg = &hw->rss_info;\n \tuint16_t nb_rx_q = dev->data->nb_rx_queues;\n \tuint16_t nb_tx_q = dev->data->nb_tx_queues;\n \tstruct rte_eth_rss_conf rss_conf;\n@@ -2072,23 +2073,28 @@ hns3_dev_configure(struct rte_eth_dev *dev)\n \tint ret;\n \n \t/*\n-\t * Hardware does not support where the number of rx and tx queues is\n-\t * not equal in hip08.\n+\t * Hardware does not support individually enable/disable/reset the Tx or\n+\t * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx\n+\t * and Rx queues at the same time. When the numbers of Tx queues\n+\t * allocated by upper applications are not equal to the numbers of Rx\n+\t * queues, driver needs to setup fake Tx or Rx queues to adjust numbers\n+\t * of Tx/Rx queues. otherwise, network engine can not work as usual. But\n+\t * these fake queues are imperceptible, and can not be used by upper\n+\t * applications.\n \t */\n-\tif (nb_rx_q != nb_tx_q) {\n-\t\thns3_err(hw,\n-\t\t\t \"nb_rx_queues(%u) not equal with nb_tx_queues(%u)! \"\n-\t\t\t \"Hardware does not support this configuration!\",\n-\t\t\t nb_rx_q, nb_tx_q);\n-\t\treturn -EINVAL;\n+\tret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Failed to set rx/tx fake queues: %d\", ret);\n+\t\treturn ret;\n \t}\n \n+\thw->adapter_state = HNS3_NIC_CONFIGURING;\n \tif (conf->link_speeds & ETH_LINK_SPEED_FIXED) {\n \t\thns3_err(hw, \"setting link speed/duplex not supported\");\n-\t\treturn -EINVAL;\n+\t\tret = -EINVAL;\n+\t\tgoto cfg_err;\n \t}\n \n-\thw->adapter_state = HNS3_NIC_CONFIGURING;\n \tif ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {\n \t\tret = hns3_check_dcb_cfg(dev);\n \t\tif (ret)\n@@ -2134,7 +2140,9 @@ hns3_dev_configure(struct rte_eth_dev *dev)\n \treturn 0;\n \n cfg_err:\n+\t(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);\n \thw->adapter_state = HNS3_NIC_INITIALIZED;\n+\n \treturn ret;\n }\n \n@@ -4084,7 +4092,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)\n \t/* check and configure queue intr-vector mapping */\n \tif (rte_intr_cap_multiple(intr_handle) ||\n \t    !RTE_ETH_DEV_SRIOV(dev).active) {\n-\t\tintr_vector = dev->data->nb_rx_queues;\n+\t\tintr_vector = hw->used_rx_queues;\n \t\t/* creates event fd for each intr vector when MSIX is used */\n \t\tif (rte_intr_efd_enable(intr_handle, intr_vector))\n \t\t\treturn -EINVAL;\n@@ -4092,10 +4100,10 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)\n \tif (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {\n \t\tintr_handle->intr_vec =\n \t\t\trte_zmalloc(\"intr_vec\",\n-\t\t\t\t    dev->data->nb_rx_queues * sizeof(int), 0);\n+\t\t\t\t    hw->used_rx_queues * sizeof(int), 0);\n \t\tif (intr_handle->intr_vec == NULL) {\n \t\t\thns3_err(hw, \"Failed to allocate %d rx_queues\"\n-\t\t\t\t     \" intr_vec\", dev->data->nb_rx_queues);\n+\t\t\t\t     \" intr_vec\", hw->used_rx_queues);\n \t\t\tret = -ENOMEM;\n \t\t\tgoto alloc_intr_vec_error;\n \t\t}\n@@ -4106,7 +4114,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)\n \t\tbase = RTE_INTR_VEC_RXTX_OFFSET;\n \t}\n \tif (rte_intr_dp_is_en(intr_handle)) {\n-\t\tfor (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {\n+\t\tfor (q_id = 0; q_id < hw->used_rx_queues; q_id++) {\n \t\t\tret = hns3_bind_ring_with_vector(dev, vec, true, q_id);\n \t\t\tif (ret)\n \t\t\t\tgoto bind_vector_error;\n@@ -4190,6 +4198,8 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)\n {\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n \tuint8_t base = 0;\n \tuint8_t vec = 0;\n \tuint16_t q_id;\n@@ -4203,7 +4213,7 @@ hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)\n \t\tbase = RTE_INTR_VEC_RXTX_OFFSET;\n \t}\n \tif (rte_intr_dp_is_en(intr_handle)) {\n-\t\tfor (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {\n+\t\tfor (q_id = 0; q_id < hw->used_rx_queues; q_id++) {\n \t\t\t(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);\n \t\t\tif (vec < base + intr_handle->nb_efd - 1)\n \t\t\t\tvec++;\n@@ -4446,15 +4456,13 @@ hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)\n \tfor (i = 0; i < dcb_info->nb_tcs; i++)\n \t\tdcb_info->tc_bws[i] = hw->dcb_info.pg_info[0].tc_dwrr[i];\n \n-\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n-\t\tdcb_info->tc_queue.tc_rxq[0][i].base =\n-\t\t\t\t\thw->tc_queue[i].tqp_offset;\n+\tfor (i = 0; i < hw->num_tc; i++) {\n+\t\tdcb_info->tc_queue.tc_rxq[0][i].base = hw->alloc_rss_size * i;\n \t\tdcb_info->tc_queue.tc_txq[0][i].base =\n-\t\t\t\t\thw->tc_queue[i].tqp_offset;\n-\t\tdcb_info->tc_queue.tc_rxq[0][i].nb_queue =\n-\t\t\t\t\thw->tc_queue[i].tqp_count;\n+\t\t\t\t\t\thw->tc_queue[i].tqp_offset;\n+\t\tdcb_info->tc_queue.tc_rxq[0][i].nb_queue = hw->alloc_rss_size;\n \t\tdcb_info->tc_queue.tc_txq[0][i].nb_queue =\n-\t\t\t\t\thw->tc_queue[i].tqp_count;\n+\t\t\t\t\t\thw->tc_queue[i].tqp_count;\n \t}\n \trte_spinlock_unlock(&hw->lock);\n \ndiff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h\nindex 7422706a8..2aa4c3cd7 100644\n--- a/drivers/net/hns3/hns3_ethdev.h\n+++ b/drivers/net/hns3/hns3_ethdev.h\n@@ -153,6 +153,12 @@ struct hns3_mac {\n \tuint32_t link_speed;      /* ETH_SPEED_NUM_ */\n };\n \n+struct hns3_fake_queue_data {\n+\tvoid **rx_queues; /* Array of pointers to fake RX queues. */\n+\tvoid **tx_queues; /* Array of pointers to fake TX queues. */\n+\tuint16_t nb_fake_rx_queues; /* Number of fake RX queues. */\n+\tuint16_t nb_fake_tx_queues; /* Number of fake TX queues. */\n+};\n \n /* Primary process maintains driver state in main thread.\n  *\n@@ -365,8 +371,14 @@ struct hns3_hw {\n \tstruct hns3_dcb_info dcb_info;\n \tenum hns3_fc_status current_fc_status; /* current flow control status */\n \tstruct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM];\n-\tuint16_t alloc_tqps;\n-\tuint16_t alloc_rss_size;    /* Queue number per TC */\n+\tuint16_t used_rx_queues;\n+\tuint16_t used_tx_queues;\n+\n+\t/* Config max queue numbers between rx and tx queues from user */\n+\tuint16_t cfg_max_queues;\n+\tstruct hns3_fake_queue_data fkq_data;     /* fake queue data */\n+\tuint16_t alloc_rss_size;    /* RX queue number per TC */\n+\tuint16_t tx_qnum_per_tc;    /* TX queue number per TC */\n \n \tuint32_t flag;\n \t/*\ndiff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c\nindex 10969011b..71e358e81 100644\n--- a/drivers/net/hns3/hns3_ethdev_vf.c\n+++ b/drivers/net/hns3/hns3_ethdev_vf.c\n@@ -428,24 +428,28 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)\n \tint ret;\n \n \t/*\n-\t * Hardware does not support where the number of rx and tx queues is\n-\t * not equal in hip08.\n+\t * Hardware does not support individually enable/disable/reset the Tx or\n+\t * Rx queue in hns3 network engine. Driver must enable/disable/reset Tx\n+\t * and Rx queues at the same time. When the numbers of Tx queues\n+\t * allocated by upper applications are not equal to the numbers of Rx\n+\t * queues, driver needs to setup fake Tx or Rx queues to adjust numbers\n+\t * of Tx/Rx queues. otherwise, network engine can not work as usual. But\n+\t * these fake queues are imperceptible, and can not be used by upper\n+\t * applications.\n \t */\n-\tif (nb_rx_q != nb_tx_q) {\n-\t\thns3_err(hw,\n-\t\t\t \"nb_rx_queues(%u) not equal with nb_tx_queues(%u)! \"\n-\t\t\t \"Hardware does not support this configuration!\",\n-\t\t\t nb_rx_q, nb_tx_q);\n-\t\treturn -EINVAL;\n+\tret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Failed to set rx/tx fake queues: %d\", ret);\n+\t\treturn ret;\n \t}\n \n+\thw->adapter_state = HNS3_NIC_CONFIGURING;\n \tif (conf->link_speeds & ETH_LINK_SPEED_FIXED) {\n \t\thns3_err(hw, \"setting link speed/duplex not supported\");\n-\t\treturn -EINVAL;\n+\t\tret = -EINVAL;\n+\t\tgoto cfg_err;\n \t}\n \n-\thw->adapter_state = HNS3_NIC_CONFIGURING;\n-\n \t/* When RSS is not configured, redirect the packet queue 0 */\n \tif ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {\n \t\trss_conf = conf->rx_adv_conf.rss_conf;\n@@ -484,7 +488,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)\n \treturn 0;\n \n cfg_err:\n+\t(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);\n \thw->adapter_state = HNS3_NIC_INITIALIZED;\n+\n \treturn ret;\n }\n \n@@ -799,12 +805,12 @@ hns3vf_get_configuration(struct hns3_hw *hw)\n \treturn hns3vf_get_tc_info(hw);\n }\n \n-static void\n+static int\n hns3vf_set_tc_info(struct hns3_adapter *hns)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n \tuint16_t nb_rx_q = hw->data->nb_rx_queues;\n-\tuint16_t new_tqps;\n+\tuint16_t nb_tx_q = hw->data->nb_tx_queues;\n \tuint8_t i;\n \n \thw->num_tc = 0;\n@@ -812,11 +818,22 @@ hns3vf_set_tc_info(struct hns3_adapter *hns)\n \t\tif (hw->hw_tc_map & BIT(i))\n \t\t\thw->num_tc++;\n \n-\tnew_tqps = RTE_MIN(hw->tqps_num, nb_rx_q);\n-\thw->alloc_rss_size = RTE_MIN(hw->rss_size_max, new_tqps / hw->num_tc);\n-\thw->alloc_tqps = hw->alloc_rss_size * hw->num_tc;\n+\tif (nb_rx_q < hw->num_tc) {\n+\t\thns3_err(hw, \"number of Rx queues(%d) is less than tcs(%d).\",\n+\t\t\t nb_rx_q, hw->num_tc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (nb_tx_q < hw->num_tc) {\n+\t\thns3_err(hw, \"number of Tx queues(%d) is less than tcs(%d).\",\n+\t\t\t nb_tx_q, hw->num_tc);\n+\t\treturn -EINVAL;\n+\t}\n \n-\thns3_tc_queue_mapping_cfg(hw);\n+\thns3_set_rss_size(hw, nb_rx_q);\n+\thns3_tc_queue_mapping_cfg(hw, nb_tx_q);\n+\n+\treturn 0;\n }\n \n static void\n@@ -1256,6 +1273,7 @@ hns3vf_do_stop(struct hns3_adapter *hns)\n static void\n hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)\n {\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n \tuint8_t base = 0;\n@@ -1271,7 +1289,7 @@ hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)\n \t\tbase = RTE_INTR_VEC_RXTX_OFFSET;\n \t}\n \tif (rte_intr_dp_is_en(intr_handle)) {\n-\t\tfor (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {\n+\t\tfor (q_id = 0; q_id < hw->used_rx_queues; q_id++) {\n \t\t\t(void)hns3vf_bind_ring_with_vector(dev, vec, false,\n \t\t\t\t\t\t\t   q_id);\n \t\t\tif (vec < base + intr_handle->nb_efd - 1)\n@@ -1381,7 +1399,9 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)\n \tstruct hns3_hw *hw = &hns->hw;\n \tint ret;\n \n-\thns3vf_set_tc_info(hns);\n+\tret = hns3vf_set_tc_info(hns);\n+\tif (ret)\n+\t\treturn ret;\n \n \tret = hns3_start_queues(hns, reset_queue);\n \tif (ret) {\n@@ -1412,8 +1432,8 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)\n \n \t/* check and configure queue intr-vector mapping */\n \tif (rte_intr_cap_multiple(intr_handle) ||\n-\t\t!RTE_ETH_DEV_SRIOV(dev).active) {\n-\t\tintr_vector = dev->data->nb_rx_queues;\n+\t    !RTE_ETH_DEV_SRIOV(dev).active) {\n+\t\tintr_vector = hw->used_rx_queues;\n \t\t/* It creates event fd for each intr vector when MSIX is used */\n \t\tif (rte_intr_efd_enable(intr_handle, intr_vector))\n \t\t\treturn -EINVAL;\n@@ -1421,10 +1441,10 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)\n \tif (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {\n \t\tintr_handle->intr_vec =\n \t\t\trte_zmalloc(\"intr_vec\",\n-\t\t\t\t    dev->data->nb_rx_queues * sizeof(int), 0);\n+\t\t\t\t    hw->used_rx_queues * sizeof(int), 0);\n \t\tif (intr_handle->intr_vec == NULL) {\n \t\t\thns3_err(hw, \"Failed to allocate %d rx_queues\"\n-\t\t\t\t     \" intr_vec\", dev->data->nb_rx_queues);\n+\t\t\t\t     \" intr_vec\", hw->used_rx_queues);\n \t\t\tret = -ENOMEM;\n \t\t\tgoto vf_alloc_intr_vec_error;\n \t\t}\n@@ -1435,7 +1455,7 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)\n \t\tbase = RTE_INTR_VEC_RXTX_OFFSET;\n \t}\n \tif (rte_intr_dp_is_en(intr_handle)) {\n-\t\tfor (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {\n+\t\tfor (q_id = 0; q_id < hw->used_rx_queues; q_id++) {\n \t\t\tret = hns3vf_bind_ring_with_vector(dev, vec, true,\n \t\t\t\t\t\t\t   q_id);\n \t\t\tif (ret)\ndiff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c\nindex bcd121f48..aa614175d 100644\n--- a/drivers/net/hns3/hns3_flow.c\n+++ b/drivers/net/hns3/hns3_flow.c\n@@ -224,14 +224,19 @@ hns3_handle_action_queue(struct rte_eth_dev *dev,\n \t\t\t struct rte_flow_error *error)\n {\n \tstruct hns3_adapter *hns = dev->data->dev_private;\n-\tstruct hns3_hw *hw = &hns->hw;\n \tconst struct rte_flow_action_queue *queue;\n+\tstruct hns3_hw *hw = &hns->hw;\n \n \tqueue = (const struct rte_flow_action_queue *)action->conf;\n-\tif (queue->index >= hw->data->nb_rx_queues)\n+\tif (queue->index >= hw->used_rx_queues) {\n+\t\thns3_err(hw, \"queue ID(%d) is greater than number of \"\n+\t\t\t  \"available queue (%d) in driver.\",\n+\t\t\t  queue->index, hw->used_rx_queues);\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n \t\t\t\t\t  \"Invalid queue ID in PF\");\n+\t}\n+\n \trule->queue_id = queue->index;\n \trule->action = HNS3_FD_ACTION_ACCEPT_PACKET;\n \treturn 0;\ndiff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c\nindex 003a5bde4..3d13ed526 100644\n--- a/drivers/net/hns3/hns3_rxtx.c\n+++ b/drivers/net/hns3/hns3_rxtx.c\n@@ -37,6 +37,7 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)\n {\n \tuint16_t i;\n \n+\t/* Note: Fake rx queue will not enter here */\n \tif (rxq->sw_ring) {\n \t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n \t\t\tif (rxq->sw_ring[i].mbuf) {\n@@ -52,6 +53,7 @@ hns3_tx_queue_release_mbufs(struct hns3_tx_queue *txq)\n {\n \tuint16_t i;\n \n+\t/* Note: Fake rx queue will not enter here */\n \tif (txq->sw_ring) {\n \t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n \t\t\tif (txq->sw_ring[i].mbuf) {\n@@ -120,22 +122,115 @@ hns3_dev_tx_queue_release(void *queue)\n \trte_spinlock_unlock(&hns->hw.lock);\n }\n \n-void\n-hns3_free_all_queues(struct rte_eth_dev *dev)\n+static void\n+hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)\n+{\n+\tstruct hns3_rx_queue *rxq = queue;\n+\tstruct hns3_adapter *hns;\n+\tstruct hns3_hw *hw;\n+\tuint16_t idx;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\thns = rxq->hns;\n+\thw = &hns->hw;\n+\tidx = rxq->queue_id;\n+\tif (hw->fkq_data.rx_queues[idx]) {\n+\t\thns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);\n+\t\thw->fkq_data.rx_queues[idx] = NULL;\n+\t}\n+\n+\t/* free fake rx queue arrays */\n+\tif (idx == (hw->fkq_data.nb_fake_rx_queues - 1)) {\n+\t\thw->fkq_data.nb_fake_rx_queues = 0;\n+\t\trte_free(hw->fkq_data.rx_queues);\n+\t\thw->fkq_data.rx_queues = NULL;\n+\t}\n+}\n+\n+static void\n+hns3_fake_tx_queue_release(struct hns3_tx_queue *queue)\n {\n+\tstruct hns3_tx_queue *txq = queue;\n+\tstruct hns3_adapter *hns;\n+\tstruct hns3_hw *hw;\n+\tuint16_t idx;\n+\n+\tif (txq == NULL)\n+\t\treturn;\n+\n+\thns = txq->hns;\n+\thw = &hns->hw;\n+\tidx = txq->queue_id;\n+\tif (hw->fkq_data.tx_queues[idx]) {\n+\t\thns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);\n+\t\thw->fkq_data.tx_queues[idx] = NULL;\n+\t}\n+\n+\t/* free fake tx queue arrays */\n+\tif (idx == (hw->fkq_data.nb_fake_tx_queues - 1)) {\n+\t\thw->fkq_data.nb_fake_tx_queues = 0;\n+\t\trte_free(hw->fkq_data.tx_queues);\n+\t\thw->fkq_data.tx_queues = NULL;\n+\t}\n+}\n+\n+static void\n+hns3_free_rx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_fake_queue_data *fkq_data;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint16_t nb_rx_q;\n \tuint16_t i;\n \n-\tif (dev->data->rx_queues)\n-\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\tnb_rx_q = hw->data->nb_rx_queues;\n+\tfor (i = 0; i < nb_rx_q; i++) {\n+\t\tif (dev->data->rx_queues[i]) {\n \t\t\thns3_rx_queue_release(dev->data->rx_queues[i]);\n \t\t\tdev->data->rx_queues[i] = NULL;\n \t\t}\n+\t}\n+\n+\t/* Free fake Rx queues */\n+\tfkq_data = &hw->fkq_data;\n+\tfor (i = 0; i < fkq_data->nb_fake_rx_queues; i++) {\n+\t\tif (fkq_data->rx_queues[i])\n+\t\t\thns3_fake_rx_queue_release(fkq_data->rx_queues[i]);\n+\t}\n+}\n \n-\tif (dev->data->tx_queues)\n-\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+static void\n+hns3_free_tx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_fake_queue_data *fkq_data;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tuint16_t nb_tx_q;\n+\tuint16_t i;\n+\n+\tnb_tx_q = hw->data->nb_tx_queues;\n+\tfor (i = 0; i < nb_tx_q; i++) {\n+\t\tif (dev->data->tx_queues[i]) {\n \t\t\thns3_tx_queue_release(dev->data->tx_queues[i]);\n \t\t\tdev->data->tx_queues[i] = NULL;\n \t\t}\n+\t}\n+\n+\t/* Free fake Tx queues */\n+\tfkq_data = &hw->fkq_data;\n+\tfor (i = 0; i < fkq_data->nb_fake_tx_queues; i++) {\n+\t\tif (fkq_data->tx_queues[i])\n+\t\t\thns3_fake_tx_queue_release(fkq_data->tx_queues[i]);\n+\t}\n+}\n+\n+void\n+hns3_free_all_queues(struct rte_eth_dev *dev)\n+{\n+\thns3_free_rx_queues(dev);\n+\thns3_free_tx_queues(dev);\n }\n \n static int\n@@ -223,17 +318,26 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq)\n static void\n hns3_enable_all_queues(struct hns3_hw *hw, bool en)\n {\n+\tuint16_t nb_rx_q = hw->data->nb_rx_queues;\n+\tuint16_t nb_tx_q = hw->data->nb_tx_queues;\n \tstruct hns3_rx_queue *rxq;\n \tstruct hns3_tx_queue *txq;\n \tuint32_t rcb_reg;\n \tint i;\n \n-\tfor (i = 0; i < hw->data->nb_rx_queues; i++) {\n-\t\trxq = hw->data->rx_queues[i];\n-\t\ttxq = hw->data->tx_queues[i];\n+\tfor (i = 0; i < hw->cfg_max_queues; i++) {\n+\t\tif (i < nb_rx_q)\n+\t\t\trxq = hw->data->rx_queues[i];\n+\t\telse\n+\t\t\trxq = hw->fkq_data.rx_queues[i - nb_rx_q];\n+\t\tif (i < nb_tx_q)\n+\t\t\ttxq = hw->data->tx_queues[i];\n+\t\telse\n+\t\t\ttxq = hw->fkq_data.tx_queues[i - nb_tx_q];\n \t\tif (rxq == NULL || txq == NULL ||\n \t\t    (en && (rxq->rx_deferred_start || txq->tx_deferred_start)))\n \t\t\tcontinue;\n+\n \t\trcb_reg = hns3_read_dev(rxq, HNS3_RING_EN_REG);\n \t\tif (en)\n \t\t\trcb_reg |= BIT(HNS3_RING_EN_B);\n@@ -382,10 +486,9 @@ int\n hns3_reset_all_queues(struct hns3_adapter *hns)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n-\tint ret;\n-\tuint16_t i;\n+\tint ret, i;\n \n-\tfor (i = 0; i < hw->data->nb_rx_queues; i++) {\n+\tfor (i = 0; i < hw->cfg_max_queues; i++) {\n \t\tret = hns3_reset_queue(hns, i);\n \t\tif (ret) {\n \t\t\thns3_err(hw, \"Failed to reset No.%d queue: %d\", i, ret);\n@@ -445,12 +548,11 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\trxq = hw->data->rx_queues[idx];\n-\n+\trxq = (struct hns3_rx_queue *)hw->data->rx_queues[idx];\n \tret = hns3_alloc_rx_queue_mbufs(hw, rxq);\n \tif (ret) {\n \t\thns3_err(hw, \"Failed to alloc mbuf for No.%d rx queue: %d\",\n-\t\t\t    idx, ret);\n+\t\t\t idx, ret);\n \t\treturn ret;\n \t}\n \n@@ -462,15 +564,24 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n }\n \n static void\n-hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n+hns3_fake_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n-\tstruct hns3_tx_queue *txq;\n+\tstruct hns3_rx_queue *rxq;\n+\n+\trxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[idx];\n+\trxq->next_to_use = 0;\n+\trxq->next_to_clean = 0;\n+\thns3_init_rx_queue_hw(rxq);\n+}\n+\n+static void\n+hns3_init_tx_queue(struct hns3_tx_queue *queue)\n+{\n+\tstruct hns3_tx_queue *txq = queue;\n \tstruct hns3_desc *desc;\n \tint i;\n \n-\ttxq = hw->data->tx_queues[idx];\n-\n \t/* Clear tx bd */\n \tdesc = txq->tx_ring;\n \tfor (i = 0; i < txq->nb_tx_desc; i++) {\n@@ -480,10 +591,30 @@ hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n \n \ttxq->next_to_use = 0;\n \ttxq->next_to_clean = 0;\n-\ttxq->tx_bd_ready   = txq->nb_tx_desc;\n+\ttxq->tx_bd_ready = txq->nb_tx_desc;\n \thns3_init_tx_queue_hw(txq);\n }\n \n+static void\n+hns3_dev_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_tx_queue *txq;\n+\n+\ttxq = (struct hns3_tx_queue *)hw->data->tx_queues[idx];\n+\thns3_init_tx_queue(txq);\n+}\n+\n+static void\n+hns3_fake_tx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_tx_queue *txq;\n+\n+\ttxq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[idx];\n+\thns3_init_tx_queue(txq);\n+}\n+\n static void\n hns3_init_tx_ring_tc(struct hns3_adapter *hns)\n {\n@@ -500,7 +631,7 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns)\n \n \t\tfor (j = 0; j < tc_queue->tqp_count; j++) {\n \t\t\tnum = tc_queue->tqp_offset + j;\n-\t\t\ttxq = hw->data->tx_queues[num];\n+\t\t\ttxq = (struct hns3_tx_queue *)hw->data->tx_queues[num];\n \t\t\tif (txq == NULL)\n \t\t\t\tcontinue;\n \n@@ -509,16 +640,13 @@ hns3_init_tx_ring_tc(struct hns3_adapter *hns)\n \t}\n }\n \n-int\n-hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)\n+static int\n+hns3_start_rx_queues(struct hns3_adapter *hns)\n {\n \tstruct hns3_hw *hw = &hns->hw;\n-\tstruct rte_eth_dev_data *dev_data = hw->data;\n \tstruct hns3_rx_queue *rxq;\n-\tstruct hns3_tx_queue *txq;\n+\tint i, j;\n \tint ret;\n-\tint i;\n-\tint j;\n \n \t/* Initialize RSS for queues */\n \tret = hns3_config_rss(hns);\n@@ -527,49 +655,85 @@ hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)\n \t\treturn ret;\n \t}\n \n-\tif (reset_queue) {\n-\t\tret = hns3_reset_all_queues(hns);\n-\t\tif (ret) {\n-\t\t\thns3_err(hw, \"Failed to reset all queues %d\", ret);\n-\t\t\treturn ret;\n-\t\t}\n-\t}\n-\n-\t/*\n-\t * Hardware does not support where the number of rx and tx queues is\n-\t * not equal in hip08. In .dev_configure callback function we will\n-\t * check the two values, here we think that the number of rx and tx\n-\t * queues is equal.\n-\t */\n \tfor (i = 0; i < hw->data->nb_rx_queues; i++) {\n-\t\trxq = dev_data->rx_queues[i];\n-\t\ttxq = dev_data->tx_queues[i];\n-\t\tif (rxq == NULL || txq == NULL || rxq->rx_deferred_start ||\n-\t\t    txq->tx_deferred_start)\n+\t\trxq = (struct hns3_rx_queue *)hw->data->rx_queues[i];\n+\t\tif (rxq == NULL || rxq->rx_deferred_start)\n \t\t\tcontinue;\n-\n \t\tret = hns3_dev_rx_queue_start(hns, i);\n \t\tif (ret) {\n \t\t\thns3_err(hw, \"Failed to start No.%d rx queue: %d\", i,\n \t\t\t\t ret);\n \t\t\tgoto out;\n \t\t}\n-\t\thns3_dev_tx_queue_start(hns, i);\n \t}\n-\thns3_init_tx_ring_tc(hns);\n \n-\thns3_enable_all_queues(hw, true);\n+\tfor (i = 0; i < hw->fkq_data.nb_fake_rx_queues; i++) {\n+\t\trxq = (struct hns3_rx_queue *)hw->fkq_data.rx_queues[i];\n+\t\tif (rxq == NULL || rxq->rx_deferred_start)\n+\t\t\tcontinue;\n+\t\thns3_fake_rx_queue_start(hns, i);\n+\t}\n \treturn 0;\n \n out:\n \tfor (j = 0; j < i; j++) {\n-\t\trxq = dev_data->rx_queues[j];\n+\t\trxq = (struct hns3_rx_queue *)hw->data->rx_queues[j];\n \t\thns3_rx_queue_release_mbufs(rxq);\n \t}\n \n \treturn ret;\n }\n \n+static void\n+hns3_start_tx_queues(struct hns3_adapter *hns)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_tx_queue *txq;\n+\tint i;\n+\n+\tfor (i = 0; i < hw->data->nb_tx_queues; i++) {\n+\t\ttxq = (struct hns3_tx_queue *)hw->data->tx_queues[i];\n+\t\tif (txq == NULL || txq->tx_deferred_start)\n+\t\t\tcontinue;\n+\t\thns3_dev_tx_queue_start(hns, i);\n+\t}\n+\n+\tfor (i = 0; i < hw->fkq_data.nb_fake_tx_queues; i++) {\n+\t\ttxq = (struct hns3_tx_queue *)hw->fkq_data.tx_queues[i];\n+\t\tif (txq == NULL || txq->tx_deferred_start)\n+\t\t\tcontinue;\n+\t\thns3_fake_tx_queue_start(hns, i);\n+\t}\n+\n+\thns3_init_tx_ring_tc(hns);\n+}\n+\n+int\n+hns3_start_queues(struct hns3_adapter *hns, bool reset_queue)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tint ret;\n+\n+\tif (reset_queue) {\n+\t\tret = hns3_reset_all_queues(hns);\n+\t\tif (ret) {\n+\t\t\thns3_err(hw, \"Failed to reset all queues %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tret = hns3_start_rx_queues(hns);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Failed to start rx queues: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\thns3_start_tx_queues(hns);\n+\thns3_enable_all_queues(hw, true);\n+\n+\treturn 0;\n+}\n+\n int\n hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)\n {\n@@ -587,6 +751,337 @@ hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue)\n \treturn 0;\n }\n \n+static void*\n+hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,\n+\t\t\t    struct hns3_queue_info *q_info)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tconst struct rte_memzone *rx_mz;\n+\tstruct hns3_rx_queue *rxq;\n+\tunsigned int rx_desc;\n+\n+\trxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, q_info->socket_id);\n+\tif (rxq == NULL) {\n+\t\thns3_err(hw, \"Failed to allocate memory for No.%d rx ring!\",\n+\t\t\t q_info->idx);\n+\t\treturn NULL;\n+\t}\n+\n+\t/* Allocate rx ring hardware descriptors. */\n+\trxq->queue_id = q_info->idx;\n+\trxq->nb_rx_desc = q_info->nb_desc;\n+\trx_desc = rxq->nb_rx_desc * sizeof(struct hns3_desc);\n+\trx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,\n+\t\t\t\t\t rx_desc, HNS3_RING_BASE_ALIGN,\n+\t\t\t\t\t q_info->socket_id);\n+\tif (rx_mz == NULL) {\n+\t\thns3_err(hw, \"Failed to reserve DMA memory for No.%d rx ring!\",\n+\t\t\t q_info->idx);\n+\t\thns3_rx_queue_release(rxq);\n+\t\treturn NULL;\n+\t}\n+\trxq->mz = rx_mz;\n+\trxq->rx_ring = (struct hns3_desc *)rx_mz->addr;\n+\trxq->rx_ring_phys_addr = rx_mz->iova;\n+\n+\thns3_dbg(hw, \"No.%d rx descriptors iova 0x%\" PRIx64, q_info->idx,\n+\t\t rxq->rx_ring_phys_addr);\n+\n+\treturn rxq;\n+}\n+\n+static int\n+hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id)\n+{\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_queue_info q_info;\n+\tstruct hns3_rx_queue *rxq;\n+\tuint16_t nb_rx_q;\n+\n+\tif (hw->fkq_data.rx_queues[idx]) {\n+\t\thns3_rx_queue_release(hw->fkq_data.rx_queues[idx]);\n+\t\thw->fkq_data.rx_queues[idx] = NULL;\n+\t}\n+\n+\tq_info.idx = idx;\n+\tq_info.socket_id = socket_id;\n+\tq_info.nb_desc = nb_desc;\n+\tq_info.type = \"hns3 fake RX queue\";\n+\tq_info.ring_name = \"rx_fake_ring\";\n+\trxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);\n+\tif (rxq == NULL) {\n+\t\thns3_err(hw, \"Failed to setup No.%d fake rx ring.\", idx);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Don't need alloc sw_ring, because upper applications don't use it */\n+\trxq->sw_ring = NULL;\n+\n+\trxq->hns = hns;\n+\trxq->rx_deferred_start = false;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->configured = true;\n+\tnb_rx_q = dev->data->nb_rx_queues;\n+\trxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +\n+\t\t\t\t(nb_rx_q + idx) * HNS3_TQP_REG_SIZE);\n+\trxq->rx_buf_len = hw->rx_buf_len;\n+\n+\trte_spinlock_lock(&hw->lock);\n+\thw->fkq_data.rx_queues[idx] = rxq;\n+\trte_spinlock_unlock(&hw->lock);\n+\n+\treturn 0;\n+}\n+\n+static void*\n+hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,\n+\t\t\t    struct hns3_queue_info *q_info)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tconst struct rte_memzone *tx_mz;\n+\tstruct hns3_tx_queue *txq;\n+\tstruct hns3_desc *desc;\n+\tunsigned int tx_desc;\n+\tint i;\n+\n+\ttxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, q_info->socket_id);\n+\tif (txq == NULL) {\n+\t\thns3_err(hw, \"Failed to allocate memory for No.%d tx ring!\",\n+\t\t\t q_info->idx);\n+\t\treturn NULL;\n+\t}\n+\n+\t/* Allocate tx ring hardware descriptors. */\n+\ttxq->queue_id = q_info->idx;\n+\ttxq->nb_tx_desc = q_info->nb_desc;\n+\ttx_desc = txq->nb_tx_desc * sizeof(struct hns3_desc);\n+\ttx_mz = rte_eth_dma_zone_reserve(dev, q_info->ring_name, q_info->idx,\n+\t\t\t\t\t tx_desc, HNS3_RING_BASE_ALIGN,\n+\t\t\t\t\t q_info->socket_id);\n+\tif (tx_mz == NULL) {\n+\t\thns3_err(hw, \"Failed to reserve DMA memory for No.%d tx ring!\",\n+\t\t\t q_info->idx);\n+\t\thns3_tx_queue_release(txq);\n+\t\treturn NULL;\n+\t}\n+\ttxq->mz = tx_mz;\n+\ttxq->tx_ring = (struct hns3_desc *)tx_mz->addr;\n+\ttxq->tx_ring_phys_addr = tx_mz->iova;\n+\n+\thns3_dbg(hw, \"No.%d tx descriptors iova 0x%\" PRIx64, q_info->idx,\n+\t\t txq->tx_ring_phys_addr);\n+\n+\t/* Clear tx bd */\n+\tdesc = txq->tx_ring;\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tdesc->tx.tp_fe_sc_vld_ra_ri = 0;\n+\t\tdesc++;\n+\t}\n+\n+\treturn txq;\n+}\n+\n+static int\n+hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id)\n+{\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_queue_info q_info;\n+\tstruct hns3_tx_queue *txq;\n+\tuint16_t nb_tx_q;\n+\n+\tif (hw->fkq_data.tx_queues[idx] != NULL) {\n+\t\thns3_tx_queue_release(hw->fkq_data.tx_queues[idx]);\n+\t\thw->fkq_data.tx_queues[idx] = NULL;\n+\t}\n+\n+\tq_info.idx = idx;\n+\tq_info.socket_id = socket_id;\n+\tq_info.nb_desc = nb_desc;\n+\tq_info.type = \"hns3 fake TX queue\";\n+\tq_info.ring_name = \"tx_fake_ring\";\n+\ttxq = hns3_alloc_txq_and_dma_zone(dev, &q_info);\n+\tif (txq == NULL) {\n+\t\thns3_err(hw, \"Failed to setup No.%d fake tx ring.\", idx);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Don't need alloc sw_ring, because upper applications don't use it */\n+\ttxq->sw_ring = NULL;\n+\n+\ttxq->hns = hns;\n+\ttxq->tx_deferred_start = false;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->configured = true;\n+\tnb_tx_q = dev->data->nb_tx_queues;\n+\ttxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +\n+\t\t\t\t(nb_tx_q + idx) * HNS3_TQP_REG_SIZE);\n+\n+\trte_spinlock_lock(&hw->lock);\n+\thw->fkq_data.tx_queues[idx] = txq;\n+\trte_spinlock_unlock(&hw->lock);\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n+{\n+\tuint16_t old_nb_queues = hw->fkq_data.nb_fake_rx_queues;\n+\tvoid **rxq;\n+\tuint8_t i;\n+\n+\tif (hw->fkq_data.rx_queues == NULL && nb_queues != 0) {\n+\t\t/* first time configuration */\n+\n+\t\tuint32_t size;\n+\t\tsize = sizeof(hw->fkq_data.rx_queues[0]) * nb_queues;\n+\t\thw->fkq_data.rx_queues = rte_zmalloc(\"fake_rx_queues\", size,\n+\t\t\t\t\t\t     RTE_CACHE_LINE_SIZE);\n+\t\tif (hw->fkq_data.rx_queues == NULL) {\n+\t\t\thw->fkq_data.nb_fake_rx_queues = 0;\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t} else if (hw->fkq_data.rx_queues != NULL && nb_queues != 0) {\n+\t\t/* re-configure */\n+\n+\t\trxq = hw->fkq_data.rx_queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\thns3_dev_rx_queue_release(rxq[i]);\n+\n+\t\trxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,\n+\t\t\t\t  RTE_CACHE_LINE_SIZE);\n+\t\tif (rxq == NULL)\n+\t\t\treturn -ENOMEM;\n+\t\tif (nb_queues > old_nb_queues) {\n+\t\t\tuint16_t new_qs = nb_queues - old_nb_queues;\n+\t\t\tmemset(rxq + old_nb_queues, 0, sizeof(rxq[0]) * new_qs);\n+\t\t}\n+\n+\t\thw->fkq_data.rx_queues = rxq;\n+\t} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {\n+\t\trxq = hw->fkq_data.rx_queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\thns3_dev_rx_queue_release(rxq[i]);\n+\n+\t\trte_free(hw->fkq_data.rx_queues);\n+\t\thw->fkq_data.rx_queues = NULL;\n+\t}\n+\n+\thw->fkq_data.nb_fake_rx_queues = nb_queues;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n+{\n+\tuint16_t old_nb_queues = hw->fkq_data.nb_fake_tx_queues;\n+\tvoid **txq;\n+\tuint8_t i;\n+\n+\tif (hw->fkq_data.tx_queues == NULL && nb_queues != 0) {\n+\t\t/* first time configuration */\n+\n+\t\tuint32_t size;\n+\t\tsize = sizeof(hw->fkq_data.tx_queues[0]) * nb_queues;\n+\t\thw->fkq_data.tx_queues = rte_zmalloc(\"fake_tx_queues\", size,\n+\t\t\t\t\t\t     RTE_CACHE_LINE_SIZE);\n+\t\tif (hw->fkq_data.tx_queues == NULL) {\n+\t\t\thw->fkq_data.nb_fake_tx_queues = 0;\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t} else if (hw->fkq_data.tx_queues != NULL && nb_queues != 0) {\n+\t\t/* re-configure */\n+\n+\t\ttxq = hw->fkq_data.tx_queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\thns3_dev_tx_queue_release(txq[i]);\n+\t\ttxq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,\n+\t\t\t\t  RTE_CACHE_LINE_SIZE);\n+\t\tif (txq == NULL)\n+\t\t\treturn -ENOMEM;\n+\t\tif (nb_queues > old_nb_queues) {\n+\t\t\tuint16_t new_qs = nb_queues - old_nb_queues;\n+\t\t\tmemset(txq + old_nb_queues, 0, sizeof(txq[0]) * new_qs);\n+\t\t}\n+\n+\t\thw->fkq_data.tx_queues = txq;\n+\t} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {\n+\t\ttxq = hw->fkq_data.tx_queues;\n+\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\thns3_dev_tx_queue_release(txq[i]);\n+\n+\t\trte_free(hw->fkq_data.tx_queues);\n+\t\thw->fkq_data.tx_queues = NULL;\n+\t}\n+\thw->fkq_data.nb_fake_tx_queues = nb_queues;\n+\n+\treturn 0;\n+}\n+\n+int\n+hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,\n+\t\t\t      uint16_t nb_tx_q)\n+{\n+\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint16_t rx_need_add_nb_q;\n+\tuint16_t tx_need_add_nb_q;\n+\tuint16_t port_id;\n+\tuint16_t q;\n+\tint ret;\n+\n+\t/* Setup new number of fake RX/TX queues and reconfigure device. */\n+\thw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);\n+\trx_need_add_nb_q = hw->cfg_max_queues - nb_rx_q;\n+\ttx_need_add_nb_q = hw->cfg_max_queues - nb_tx_q;\n+\tret = hns3_fake_rx_queue_config(hw, rx_need_add_nb_q);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Fail to configure fake rx queues: %d\", ret);\n+\t\tgoto cfg_fake_rx_q_fail;\n+\t}\n+\n+\tret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Fail to configure fake rx queues: %d\", ret);\n+\t\tgoto cfg_fake_tx_q_fail;\n+\t}\n+\n+\t/* Allocate and set up fake RX queue per Ethernet port. */\n+\tport_id = hw->data->port_id;\n+\tfor (q = 0; q < rx_need_add_nb_q; q++) {\n+\t\tret = hns3_fake_rx_queue_setup(dev, q, HNS3_MIN_RING_DESC,\n+\t\t\t\t\t       rte_eth_dev_socket_id(port_id));\n+\t\tif (ret)\n+\t\t\tgoto setup_fake_rx_q_fail;\n+\t}\n+\n+\t/* Allocate and set up fake TX queue per Ethernet port. */\n+\tfor (q = 0; q < tx_need_add_nb_q; q++) {\n+\t\tret = hns3_fake_tx_queue_setup(dev, q, HNS3_MIN_RING_DESC,\n+\t\t\t\t\t       rte_eth_dev_socket_id(port_id));\n+\t\tif (ret)\n+\t\t\tgoto setup_fake_tx_q_fail;\n+\t}\n+\n+\treturn 0;\n+\n+setup_fake_tx_q_fail:\n+setup_fake_rx_q_fail:\n+\t(void)hns3_fake_tx_queue_config(hw, 0);\n+cfg_fake_tx_q_fail:\n+\t(void)hns3_fake_rx_queue_config(hw, 0);\n+cfg_fake_rx_q_fail:\n+\thw->cfg_max_queues = 0;\n+\n+\treturn ret;\n+}\n+\n void\n hns3_dev_release_mbufs(struct hns3_adapter *hns)\n {\n@@ -618,11 +1113,9 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\t    struct rte_mempool *mp)\n {\n \tstruct hns3_adapter *hns = dev->data->dev_private;\n-\tconst struct rte_memzone *rx_mz;\n \tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_queue_info q_info;\n \tstruct hns3_rx_queue *rxq;\n-\tunsigned int desc_size = sizeof(struct hns3_desc);\n-\tunsigned int rx_desc;\n \tint rx_entry_len;\n \n \tif (dev->data->dev_started) {\n@@ -642,17 +1135,20 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\tdev->data->rx_queues[idx] = NULL;\n \t}\n \n-\trxq = rte_zmalloc_socket(\"hns3 RX queue\", sizeof(struct hns3_rx_queue),\n-\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tq_info.idx = idx;\n+\tq_info.socket_id = socket_id;\n+\tq_info.nb_desc = nb_desc;\n+\tq_info.type = \"hns3 RX queue\";\n+\tq_info.ring_name = \"rx_ring\";\n+\trxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);\n \tif (rxq == NULL) {\n-\t\thns3_err(hw, \"Failed to allocate memory for rx queue!\");\n+\t\thns3_err(hw,\n+\t\t\t \"Failed to alloc mem and reserve DMA mem for rx ring!\");\n \t\treturn -ENOMEM;\n \t}\n \n \trxq->hns = hns;\n \trxq->mb_pool = mp;\n-\trxq->nb_rx_desc = nb_desc;\n-\trxq->queue_id = idx;\n \tif (conf->rx_free_thresh <= 0)\n \t\trxq->rx_free_thresh = DEFAULT_RX_FREE_THRESH;\n \telse\n@@ -668,23 +1164,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\treturn -ENOMEM;\n \t}\n \n-\t/* Allocate rx ring hardware descriptors. */\n-\trx_desc = rxq->nb_rx_desc * desc_size;\n-\trx_mz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", idx, rx_desc,\n-\t\t\t\t\t HNS3_RING_BASE_ALIGN, socket_id);\n-\tif (rx_mz == NULL) {\n-\t\thns3_err(hw, \"Failed to reserve DMA memory for No.%d rx ring!\",\n-\t\t\t idx);\n-\t\thns3_rx_queue_release(rxq);\n-\t\treturn -ENOMEM;\n-\t}\n-\trxq->mz = rx_mz;\n-\trxq->rx_ring = (struct hns3_desc *)rx_mz->addr;\n-\trxq->rx_ring_phys_addr = rx_mz->iova;\n-\n-\thns3_dbg(hw, \"No.%d rx descriptors iova 0x%\" PRIx64, idx,\n-\t\t rxq->rx_ring_phys_addr);\n-\n \trxq->next_to_use = 0;\n \trxq->next_to_clean = 0;\n \trxq->nb_rx_hold = 0;\n@@ -1062,14 +1541,10 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\t    unsigned int socket_id, const struct rte_eth_txconf *conf)\n {\n \tstruct hns3_adapter *hns = dev->data->dev_private;\n-\tconst struct rte_memzone *tx_mz;\n \tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_queue_info q_info;\n \tstruct hns3_tx_queue *txq;\n-\tstruct hns3_desc *desc;\n-\tunsigned int desc_size = sizeof(struct hns3_desc);\n-\tunsigned int tx_desc;\n \tint tx_entry_len;\n-\tint i;\n \n \tif (dev->data->dev_started) {\n \t\thns3_err(hw, \"tx_queue_setup after dev_start no supported\");\n@@ -1088,17 +1563,19 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\tdev->data->tx_queues[idx] = NULL;\n \t}\n \n-\ttxq = rte_zmalloc_socket(\"hns3 TX queue\", sizeof(struct hns3_tx_queue),\n-\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tq_info.idx = idx;\n+\tq_info.socket_id = socket_id;\n+\tq_info.nb_desc = nb_desc;\n+\tq_info.type = \"hns3 TX queue\";\n+\tq_info.ring_name = \"tx_ring\";\n+\ttxq = hns3_alloc_txq_and_dma_zone(dev, &q_info);\n \tif (txq == NULL) {\n-\t\thns3_err(hw, \"Failed to allocate memory for tx queue!\");\n+\t\thns3_err(hw,\n+\t\t\t \"Failed to alloc mem and reserve DMA mem for tx ring!\");\n \t\treturn -ENOMEM;\n \t}\n \n-\ttxq->nb_tx_desc = nb_desc;\n-\ttxq->queue_id = idx;\n \ttxq->tx_deferred_start = conf->tx_deferred_start;\n-\n \ttx_entry_len = sizeof(struct hns3_entry) * txq->nb_tx_desc;\n \ttxq->sw_ring = rte_zmalloc_socket(\"hns3 TX sw ring\", tx_entry_len,\n \t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n@@ -1108,34 +1585,10 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\treturn -ENOMEM;\n \t}\n \n-\t/* Allocate tx ring hardware descriptors. */\n-\ttx_desc = txq->nb_tx_desc * desc_size;\n-\ttx_mz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", idx, tx_desc,\n-\t\t\t\t\t HNS3_RING_BASE_ALIGN, socket_id);\n-\tif (tx_mz == NULL) {\n-\t\thns3_err(hw, \"Failed to reserve DMA memory for No.%d tx ring!\",\n-\t\t\t idx);\n-\t\thns3_tx_queue_release(txq);\n-\t\treturn -ENOMEM;\n-\t}\n-\ttxq->mz = tx_mz;\n-\ttxq->tx_ring = (struct hns3_desc *)tx_mz->addr;\n-\ttxq->tx_ring_phys_addr = tx_mz->iova;\n-\n-\thns3_dbg(hw, \"No.%d tx descriptors iova 0x%\" PRIx64, idx,\n-\t\t txq->tx_ring_phys_addr);\n-\n-\t/* Clear tx bd */\n-\tdesc = txq->tx_ring;\n-\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n-\t\tdesc->tx.tp_fe_sc_vld_ra_ri = 0;\n-\t\tdesc++;\n-\t}\n-\n \ttxq->hns = hns;\n \ttxq->next_to_use = 0;\n \ttxq->next_to_clean = 0;\n-\ttxq->tx_bd_ready   = txq->nb_tx_desc;\n+\ttxq->tx_bd_ready = txq->nb_tx_desc;\n \ttxq->port_id = dev->data->port_id;\n \ttxq->configured = true;\n \ttxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +\ndiff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h\nindex cc210268a..a042c9902 100644\n--- a/drivers/net/hns3/hns3_rxtx.h\n+++ b/drivers/net/hns3/hns3_rxtx.h\n@@ -273,6 +273,14 @@ struct hns3_tx_queue {\n \tbool configured;        /* indicate if tx queue has been configured */\n };\n \n+struct hns3_queue_info {\n+\tconst char *type;   /* point to queue memory name */\n+\tconst char *ring_name;  /* point to hardware ring name */\n+\tuint16_t idx;\n+\tuint16_t nb_desc;\n+\tunsigned int socket_id;\n+};\n+\n #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \\\n \tPKT_TX_OUTER_IPV6 | \\\n \tPKT_TX_OUTER_IPV4 | \\\n@@ -314,4 +322,7 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);\n void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);\n+int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,\n+\t\t\t\t  uint16_t nb_tx_q);\n+\n #endif /* _HNS3_RXTX_H_ */\n",
    "prefixes": [
        "01/11"
    ]
}