get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/57860/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 57860,
    "url": "https://patches.dpdk.org/api/patches/57860/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1566568031-45991-6-git-send-email-xavier.huwei@huawei.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1566568031-45991-6-git-send-email-xavier.huwei@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1566568031-45991-6-git-send-email-xavier.huwei@huawei.com",
    "date": "2019-08-23T13:46:54",
    "name": "[05/22] net/hns3: add the initialization of hns3 PMD driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "b30486f806b917ada2cab9a410b11c984ce58398",
    "submitter": {
        "id": 1405,
        "url": "https://patches.dpdk.org/api/people/1405/?format=api",
        "name": "Wei Hu (Xavier)",
        "email": "xavier.huwei@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1566568031-45991-6-git-send-email-xavier.huwei@huawei.com/mbox/",
    "series": [
        {
            "id": 6114,
            "url": "https://patches.dpdk.org/api/series/6114/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6114",
            "date": "2019-08-23T13:46:49",
            "name": "add hns3 ethernet PMD driver",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/6114/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/57860/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/57860/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D815C1C0BE;\n\tFri, 23 Aug 2019 15:50:18 +0200 (CEST)",
            "from huawei.com (szxga07-in.huawei.com [45.249.212.35])\n\tby dpdk.org (Postfix) with ESMTP id 0951D1BFD0\n\tfor <dev@dpdk.org>; Fri, 23 Aug 2019 15:49:40 +0200 (CEST)",
            "from DGGEMS406-HUB.china.huawei.com (unknown [172.30.72.59])\n\tby Forcepoint Email with ESMTP id 07C3EDE9B7E61FA7AB78;\n\tFri, 23 Aug 2019 21:49:37 +0800 (CST)",
            "from localhost.localdomain (10.67.212.132) by\n\tDGGEMS406-HUB.china.huawei.com (10.3.19.206) with Microsoft SMTP\n\tServer id 14.3.439.0; Fri, 23 Aug 2019 21:49:26 +0800"
        ],
        "From": "\"Wei Hu (Xavier)\" <xavier.huwei@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<linuxarm@huawei.com>, <xavier_huwei@163.com>, <liudongdong3@huawei.com>,\n\t<forest.zhouchang@huawei.com>",
        "Date": "Fri, 23 Aug 2019 21:46:54 +0800",
        "Message-ID": "<1566568031-45991-6-git-send-email-xavier.huwei@huawei.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1566568031-45991-1-git-send-email-xavier.huwei@huawei.com>",
        "References": "<1566568031-45991-1-git-send-email-xavier.huwei@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.67.212.132]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH 05/22] net/hns3: add the initialization of hns3\n\tPMD driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds the initialization of hns3 PF PMD driver.\nIt gets configuration from IMP such as queue information,\nconfigures queue, inits mac, inits manage table, disables\ngro etc.\n\nSigned-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>\nSigned-off-by: Chunsong Feng <fengchunsong@huawei.com>\nSigned-off-by: Min Hu (Connor) <humin29@huawei.com>\nSigned-off-by: Hao Chen <chenhao164@huawei.com>\nSigned-off-by: Huisong Li <lihuisong@huawei.com>\n---\n drivers/net/hns3/hns3_ethdev.c | 1497 ++++++++++++++++++++++++++++++++++++++++\n drivers/net/hns3/hns3_ethdev.h |    3 +\n 2 files changed, 1500 insertions(+)",
    "diff": "diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c\nindex 4f4de6d..3b5deb0 100644\n--- a/drivers/net/hns3/hns3_ethdev.c\n+++ b/drivers/net/hns3/hns3_ethdev.c\n@@ -34,10 +34,1469 @@\n #include \"hns3_logs.h\"\n #include \"hns3_regs.h\"\n \n+#define HNS3_DEFAULT_PORT_CONF_BURST_SIZE\t32\n+#define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM\t1\n+\n int hns3_logtype_init;\n int hns3_logtype_driver;\n \n static int\n+hns3_config_tso(struct hns3_hw *hw, unsigned int tso_mss_min,\n+\t\tunsigned int tso_mss_max)\n+{\n+\tstruct hns3_cfg_tso_status_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tuint16_t tso_mss;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TSO_GENERIC_CONFIG, false);\n+\n+\treq = (struct hns3_cfg_tso_status_cmd *)desc.data;\n+\n+\ttso_mss = 0;\n+\thns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,\n+\t\t       tso_mss_min);\n+\treq->tso_mss_min = rte_cpu_to_le_16(tso_mss);\n+\n+\ttso_mss = 0;\n+\thns3_set_field(tso_mss, HNS3_TSO_MSS_MIN_M, HNS3_TSO_MSS_MIN_S,\n+\t\t       tso_mss_max);\n+\treq->tso_mss_max = rte_cpu_to_le_16(tso_mss);\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+int\n+hns3_config_gro(struct hns3_hw *hw, bool en)\n+{\n+\tstruct hns3_cfg_gro_status_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false);\n+\treq = (struct hns3_cfg_gro_status_cmd *)desc.data;\n+\n+\treq->gro_en = rte_cpu_to_le_16(en ? 1 : 0);\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\thns3_err(hw, \"GRO hardware config cmd failed, ret = %d\\n\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_set_umv_space(struct hns3_hw *hw, uint16_t space_size,\n+\t\t   uint16_t *allocated_size, bool is_alloc)\n+{\n+\tstruct hns3_umv_spc_alc_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\treq = (struct hns3_umv_spc_alc_cmd *)desc.data;\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_VLAN_ALLOCATE, false);\n+\thns3_set_bit(req->allocate, HNS3_UMV_SPC_ALC_B, is_alloc ? 0 : 1);\n+\treq->space_size = rte_cpu_to_le_32(space_size);\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"%s umv space failed for cmd_send, ret =%d\",\n+\t\t\t     is_alloc ? \"allocate\" : \"free\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tif (is_alloc && allocated_size)\n+\t\t*allocated_size = rte_le_to_cpu_32(desc.data[1]);\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_init_umv_space(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint16_t allocated_size = 0;\n+\tint ret;\n+\n+\tret = hns3_set_umv_space(hw, pf->wanted_umv_size, &allocated_size,\n+\t\t\t\t true);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (allocated_size < pf->wanted_umv_size)\n+\t\tPMD_INIT_LOG(WARNING, \"Alloc umv space failed, want %u, get %u\",\n+\t\t\t     pf->wanted_umv_size, allocated_size);\n+\n+\tpf->max_umv_size = (!!allocated_size) ? allocated_size :\n+\t\t\t\t\t\tpf->wanted_umv_size;\n+\tpf->used_umv_size = 0;\n+\treturn 0;\n+}\n+\n+static int\n+hns3_uninit_umv_space(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tif (pf->max_umv_size == 0)\n+\t\treturn 0;\n+\n+\tret = hns3_set_umv_space(hw, pf->max_umv_size, NULL, false);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tpf->max_umv_size = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)\n+{\n+\tstruct hns3_config_max_frm_size_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_MAX_FRM_SIZE, false);\n+\n+\treq = (struct hns3_config_max_frm_size_cmd *)desc.data;\n+\treq->max_frm_size = rte_cpu_to_le_16(new_mps);\n+\treq->min_frm_size = HNS3_MIN_FRAME_LEN;\n+\n+\treturn hns3_cmd_send(hw, &desc, 1);\n+}\n+\n+static int\n+hns3_config_mtu(struct hns3_hw *hw, uint16_t mps)\n+{\n+\tint ret;\n+\n+\tret = hns3_set_mac_mtu(hw, mps);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Failed to set mtu, ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_buffer_alloc(hw);\n+\tif (ret) {\n+\t\thns3_err(hw, \"Failed to allocate buffer, ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_parse_func_status(struct hns3_hw *hw, struct hns3_func_status_cmd *status)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\n+\tif (!(status->pf_state & HNS3_PF_STATE_DONE))\n+\t\treturn -EINVAL;\n+\n+\tpf->is_main_pf = (status->pf_state & HNS3_PF_STATE_MAIN) ? true : false;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_query_function_status(struct hns3_hw *hw)\n+{\n+#define HNS3_QUERY_MAX_CNT\t\t10\n+#define HNS3_QUERY_SLEEP_MSCOEND\t1\n+\tstruct hns3_func_status_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint timeout = 0;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FUNC_STATUS, true);\n+\treq = (struct hns3_func_status_cmd *)desc.data;\n+\n+\tdo {\n+\t\tret = hns3_cmd_send(hw, &desc, 1);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR, \"query function status failed %d\",\n+\t\t\t\t     ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\t/* Check pf reset is done */\n+\t\tif (req->pf_state)\n+\t\t\tbreak;\n+\n+\t\trte_delay_ms(HNS3_QUERY_SLEEP_MSCOEND);\n+\t} while (timeout++ < HNS3_QUERY_MAX_CNT);\n+\n+\treturn hns3_parse_func_status(hw, req);\n+}\n+\n+static int\n+hns3_query_pf_resource(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_pf_res_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_PF_RSRC, true);\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"query pf resource failed %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treq = (struct hns3_pf_res_cmd *)desc.data;\n+\thw->total_tqps_num = rte_le_to_cpu_16(req->tqp_num);\n+\tpf->pkt_buf_size = rte_le_to_cpu_16(req->buf_size) << HNS3_BUF_UNIT_S;\n+\thw->tqps_num = RTE_MIN(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);\n+\n+\tif (req->tx_buf_size)\n+\t\tpf->tx_buf_size =\n+\t\t    rte_le_to_cpu_16(req->tx_buf_size) << HNS3_BUF_UNIT_S;\n+\telse\n+\t\tpf->tx_buf_size = HNS3_DEFAULT_TX_BUF;\n+\n+\tpf->tx_buf_size = roundup(pf->tx_buf_size, HNS3_BUF_SIZE_UNIT);\n+\n+\tif (req->dv_buf_size)\n+\t\tpf->dv_buf_size =\n+\t\t    rte_le_to_cpu_16(req->dv_buf_size) << HNS3_BUF_UNIT_S;\n+\telse\n+\t\tpf->dv_buf_size = HNS3_DEFAULT_DV;\n+\n+\tpf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);\n+\n+\thw->num_msi =\n+\t    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),\n+\t\t\t   HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);\n+\n+\treturn 0;\n+}\n+\n+static void\n+hns3_parse_cfg(struct hns3_cfg *cfg, struct hns3_cmd_desc *desc)\n+{\n+\tstruct hns3_cfg_param_cmd *req;\n+\tuint64_t mac_addr_tmp_high;\n+\tuint64_t mac_addr_tmp;\n+\tuint32_t i;\n+\n+\treq = (struct hns3_cfg_param_cmd *)desc[0].data;\n+\n+\t/* get the configuration */\n+\tcfg->vmdq_vport_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),\n+\t\t\t\t\t     HNS3_CFG_VMDQ_M, HNS3_CFG_VMDQ_S);\n+\tcfg->tc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),\n+\t\t\t\t     HNS3_CFG_TC_NUM_M, HNS3_CFG_TC_NUM_S);\n+\tcfg->tqp_desc_num = hns3_get_field(rte_le_to_cpu_32(req->param[0]),\n+\t\t\t\t\t   HNS3_CFG_TQP_DESC_N_M,\n+\t\t\t\t\t   HNS3_CFG_TQP_DESC_N_S);\n+\n+\tcfg->phy_addr = hns3_get_field(rte_le_to_cpu_32(req->param[1]),\n+\t\t\t\t       HNS3_CFG_PHY_ADDR_M,\n+\t\t\t\t       HNS3_CFG_PHY_ADDR_S);\n+\tcfg->media_type = hns3_get_field(rte_le_to_cpu_32(req->param[1]),\n+\t\t\t\t\t HNS3_CFG_MEDIA_TP_M,\n+\t\t\t\t\t HNS3_CFG_MEDIA_TP_S);\n+\tcfg->rx_buf_len = hns3_get_field(rte_le_to_cpu_32(req->param[1]),\n+\t\t\t\t\t HNS3_CFG_RX_BUF_LEN_M,\n+\t\t\t\t\t HNS3_CFG_RX_BUF_LEN_S);\n+\t/* get mac address */\n+\tmac_addr_tmp = rte_le_to_cpu_32(req->param[2]);\n+\tmac_addr_tmp_high = hns3_get_field(rte_le_to_cpu_32(req->param[3]),\n+\t\t\t\t\t   HNS3_CFG_MAC_ADDR_H_M,\n+\t\t\t\t\t   HNS3_CFG_MAC_ADDR_H_S);\n+\n+\tmac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;\n+\n+\tcfg->default_speed = hns3_get_field(rte_le_to_cpu_32(req->param[3]),\n+\t\t\t\t\t    HNS3_CFG_DEFAULT_SPEED_M,\n+\t\t\t\t\t    HNS3_CFG_DEFAULT_SPEED_S);\n+\tcfg->rss_size_max = hns3_get_field(rte_le_to_cpu_32(req->param[3]),\n+\t\t\t\t\t   HNS3_CFG_RSS_SIZE_M,\n+\t\t\t\t\t   HNS3_CFG_RSS_SIZE_S);\n+\n+\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n+\t\tcfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;\n+\n+\treq = (struct hns3_cfg_param_cmd *)desc[1].data;\n+\tcfg->numa_node_map = rte_le_to_cpu_32(req->param[0]);\n+\n+\tcfg->speed_ability = hns3_get_field(rte_le_to_cpu_32(req->param[1]),\n+\t\t\t\t\t    HNS3_CFG_SPEED_ABILITY_M,\n+\t\t\t\t\t    HNS3_CFG_SPEED_ABILITY_S);\n+\tcfg->umv_space = hns3_get_field(rte_le_to_cpu_32(req->param[1]),\n+\t\t\t\t\tHNS3_CFG_UMV_TBL_SPACE_M,\n+\t\t\t\t\tHNS3_CFG_UMV_TBL_SPACE_S);\n+\tif (!cfg->umv_space)\n+\t\tcfg->umv_space = HNS3_DEFAULT_UMV_SPACE_PER_PF;\n+}\n+\n+/* hns3_get_board_cfg: query the static parameter from NCL_config file in flash\n+ * @hw: pointer to struct hns3_hw\n+ * @hcfg: the config structure to be getted\n+ */\n+static int\n+hns3_get_board_cfg(struct hns3_hw *hw, struct hns3_cfg *hcfg)\n+{\n+\tstruct hns3_cmd_desc desc[HNS3_PF_CFG_DESC_NUM];\n+\tstruct hns3_cfg_param_cmd *req;\n+\tuint32_t offset;\n+\tuint32_t i;\n+\tint ret;\n+\n+\tfor (i = 0; i < HNS3_PF_CFG_DESC_NUM; i++) {\n+\t\toffset = 0;\n+\t\treq = (struct hns3_cfg_param_cmd *)desc[i].data;\n+\t\thns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_CFG_PARAM,\n+\t\t\t\t\t  true);\n+\t\thns3_set_field(offset, HNS3_CFG_OFFSET_M, HNS3_CFG_OFFSET_S,\n+\t\t\t       i * HNS3_CFG_RD_LEN_BYTES);\n+\t\t/* Len should be divided by 4 when send to hardware */\n+\t\thns3_set_field(offset, HNS3_CFG_RD_LEN_M, HNS3_CFG_RD_LEN_S,\n+\t\t\t       HNS3_CFG_RD_LEN_BYTES / HNS3_CFG_RD_LEN_UNIT);\n+\t\treq->offset = rte_cpu_to_le_32(offset);\n+\t}\n+\n+\tret = hns3_cmd_send(hw, desc, HNS3_PF_CFG_DESC_NUM);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"get config failed %d.\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\thns3_parse_cfg(hcfg, desc);\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_parse_speed(int speed_cmd, uint32_t *speed)\n+{\n+\tswitch (speed_cmd) {\n+\tcase HNS3_CFG_SPEED_10M:\n+\t\t*speed = ETH_SPEED_NUM_10M;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_100M:\n+\t\t*speed = ETH_SPEED_NUM_100M;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_1G:\n+\t\t*speed = ETH_SPEED_NUM_1G;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_10G:\n+\t\t*speed = ETH_SPEED_NUM_10G;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_25G:\n+\t\t*speed = ETH_SPEED_NUM_25G;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_40G:\n+\t\t*speed = ETH_SPEED_NUM_40G;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_50G:\n+\t\t*speed = ETH_SPEED_NUM_50G;\n+\t\tbreak;\n+\tcase HNS3_CFG_SPEED_100G:\n+\t\t*speed = ETH_SPEED_NUM_100G;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_get_board_configuration(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_cfg cfg;\n+\tint ret;\n+\n+\tret = hns3_get_board_cfg(hw, &cfg);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"get board config failed %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tif (cfg.media_type == HNS3_MEDIA_TYPE_COPPER) {\n+\t\tPMD_INIT_LOG(ERR, \"media type is copper, not supported.\");\n+\t\treturn -EOPNOTSUPP;\n+\t}\n+\n+\thw->mac.media_type = cfg.media_type;\n+\thw->rss_size_max = cfg.rss_size_max;\n+\thw->rx_buf_len = cfg.rx_buf_len;\n+\tmemcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN);\n+\thw->mac.phy_addr = cfg.phy_addr;\n+\thw->mac.default_addr_setted = false;\n+\thw->num_tx_desc = cfg.tqp_desc_num;\n+\thw->num_rx_desc = cfg.tqp_desc_num;\n+\thw->dcb_info.num_pg = 1;\n+\thw->dcb_info.hw_pfc_map = 0;\n+\n+\tret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Get wrong speed %d, ret = %d\",\n+\t\t\t     cfg.default_speed, ret);\n+\t\treturn ret;\n+\t}\n+\n+\tpf->tc_max = cfg.tc_num;\n+\tif (pf->tc_max > HNS3_MAX_TC_NUM || pf->tc_max < 1) {\n+\t\tPMD_INIT_LOG(WARNING,\n+\t\t\t     \"Get TC num(%u) from flash, set TC num to 1\",\n+\t\t\t     pf->tc_max);\n+\t\tpf->tc_max = 1;\n+\t}\n+\n+\t/* Dev does not support DCB */\n+\tif (!hns3_dev_dcb_supported(hw)) {\n+\t\tpf->tc_max = 1;\n+\t\tpf->pfc_max = 0;\n+\t} else\n+\t\tpf->pfc_max = pf->tc_max;\n+\n+\thw->dcb_info.num_tc = 1;\n+\thw->alloc_rss_size = RTE_MIN(hw->rss_size_max,\n+\t\t\t\t     hw->tqps_num / hw->dcb_info.num_tc);\n+\thns3_set_bit(hw->hw_tc_map, 0, 1);\n+\tpf->tx_sch_mode = HNS3_FLAG_TC_BASE_SCH_MODE;\n+\n+\tpf->wanted_umv_size = cfg.umv_space;\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_get_configuration(struct hns3_hw *hw)\n+{\n+\tint ret;\n+\n+\tret = hns3_query_function_status(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to query function status: %d.\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Get pf resource */\n+\tret = hns3_query_pf_resource(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to query pf resource: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_get_board_configuration(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to get board configuration: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_map_tqps_to_func(struct hns3_hw *hw, uint16_t func_id, uint16_t tqp_pid,\n+\t\t      uint16_t tqp_vid, bool is_pf)\n+{\n+\tstruct hns3_tqp_map_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_SET_TQP_MAP, false);\n+\n+\treq = (struct hns3_tqp_map_cmd *)desc.data;\n+\treq->tqp_id = rte_cpu_to_le_16(tqp_pid);\n+\treq->tqp_vf = func_id;\n+\treq->tqp_flag = 1 << HNS3_TQP_MAP_EN_B;\n+\tif (!is_pf)\n+\t\treq->tqp_flag |= (1 << HNS3_TQP_MAP_TYPE_B);\n+\treq->tqp_vid = rte_cpu_to_le_16(tqp_vid);\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"TQP map failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_map_tqp(struct hns3_hw *hw)\n+{\n+\tuint16_t tqps_num = hw->total_tqps_num;\n+\tuint16_t func_id;\n+\tuint16_t tqp_id;\n+\tint num;\n+\tint ret;\n+\tint i;\n+\n+\t/*\n+\t * In current version VF is not supported when PF is taken over by DPDK,\n+\t * so we allocate tqps to PF as much as possible.\n+\t */\n+\ttqp_id = 0;\n+\tnum = DIV_ROUND_UP(hw->total_tqps_num, HNS3_MAX_TQP_NUM_PER_FUNC);\n+\tfor (func_id = 0; func_id < num; func_id++) {\n+\t\tfor (i = 0;\n+\t\t     i < HNS3_MAX_TQP_NUM_PER_FUNC && tqp_id < tqps_num; i++) {\n+\t\t\tret = hns3_map_tqps_to_func(hw, func_id, tqp_id++, i,\n+\t\t\t\t\t\t    true);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_cfg_mac_speed_dup_hw(struct hns3_hw *hw, uint32_t speed, uint8_t duplex)\n+{\n+\tstruct hns3_config_mac_speed_dup_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\treq = (struct hns3_config_mac_speed_dup_cmd *)desc.data;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_SPEED_DUP, false);\n+\n+\thns3_set_bit(req->speed_dup, HNS3_CFG_DUPLEX_B, !!duplex ? 1 : 0);\n+\n+\tswitch (speed) {\n+\tcase ETH_SPEED_NUM_10M:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10M);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_100M:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100M);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_1G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_1G);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_10G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_10G);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_25G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_25G);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_40G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_40G);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_50G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_50G);\n+\t\tbreak;\n+\tcase ETH_SPEED_NUM_100G:\n+\t\thns3_set_field(req->speed_dup, HNS3_CFG_SPEED_M,\n+\t\t\t       HNS3_CFG_SPEED_S, HNS3_CFG_SPEED_100G);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_INIT_LOG(ERR, \"invalid speed (%u)\", speed);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thns3_set_bit(req->mac_change_fec_en, HNS3_CFG_MAC_SPEED_CHANGE_EN_B, 1);\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"mac speed/duplex config cmd failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_tx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_priv_buf *priv;\n+\tuint32_t i, total_size;\n+\n+\ttotal_size = pf->pkt_buf_size;\n+\n+\t/* alloc tx buffer for all enabled tc */\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\n+\t\tif (hw->hw_tc_map & BIT(i)) {\n+\t\t\tif (total_size < pf->tx_buf_size)\n+\t\t\t\treturn -ENOMEM;\n+\n+\t\t\tpriv->tx_buf_size = pf->tx_buf_size;\n+\t\t} else\n+\t\t\tpriv->tx_buf_size = 0;\n+\n+\t\ttotal_size -= priv->tx_buf_size;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_tx_buffer_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+/* TX buffer size is unit by 128 byte */\n+#define HNS3_BUF_SIZE_UNIT_SHIFT\t7\n+#define HNS3_BUF_SIZE_UPDATE_EN_MSK\tBIT(15)\n+\tstruct hns3_tx_buff_alloc_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tuint32_t buf_size;\n+\tuint32_t i;\n+\tint ret;\n+\n+\treq = (struct hns3_tx_buff_alloc_cmd *)desc.data;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TX_BUFF_ALLOC, 0);\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tbuf_size = buf_alloc->priv_buf[i].tx_buf_size;\n+\n+\t\tbuf_size = buf_size >> HNS3_BUF_SIZE_UNIT_SHIFT;\n+\t\treq->tx_pkt_buff[i] = rte_cpu_to_le_16(buf_size |\n+\t\t\t\t\t\tHNS3_BUF_SIZE_UPDATE_EN_MSK);\n+\t}\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"tx buffer alloc cmd failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_get_tc_num(struct hns3_hw *hw)\n+{\n+\tint cnt = 0;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++)\n+\t\tif (hw->hw_tc_map & BIT(i))\n+\t\t\tcnt++;\n+\treturn cnt;\n+}\n+\n+static uint32_t\n+hns3_get_rx_priv_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_priv_buf *priv;\n+\tuint32_t rx_priv = 0;\n+\tint i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\t\tif (priv->enable)\n+\t\t\trx_priv += priv->buf_size;\n+\t}\n+\treturn rx_priv;\n+}\n+\n+static uint32_t\n+hns3_get_tx_buff_alloced(struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tuint32_t total_tx_size = 0;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++)\n+\t\ttotal_tx_size += buf_alloc->priv_buf[i].tx_buf_size;\n+\n+\treturn total_tx_size;\n+}\n+\n+/* Get the number of pfc enabled TCs, which have private buffer */\n+static int\n+hns3_get_pfc_priv_num(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_priv_buf *priv;\n+\tint cnt = 0;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\t\tif ((hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)\n+\t\t\tcnt++;\n+\t}\n+\n+\treturn cnt;\n+}\n+\n+/* Get the number of pfc disabled TCs, which have private buffer */\n+static int\n+hns3_get_no_pfc_priv_num(struct hns3_hw *hw,\n+\t\t\t struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_priv_buf *priv;\n+\tint cnt = 0;\n+\tuint8_t i;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\t\tif (hw->hw_tc_map & BIT(i) &&\n+\t\t    !(hw->dcb_info.hw_pfc_map & BIT(i)) && priv->enable)\n+\t\t\tcnt++;\n+\t}\n+\n+\treturn cnt;\n+}\n+\n+static bool\n+hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,\n+\t\t  uint32_t rx_all)\n+{\n+\tuint32_t shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint32_t shared_buf, aligned_mps;\n+\tuint32_t rx_priv;\n+\tuint8_t tc_num;\n+\tuint8_t i;\n+\n+\ttc_num = hns3_get_tc_num(hw);\n+\taligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);\n+\n+\tif (hns3_dev_dcb_supported(hw))\n+\t\tshared_buf_min = HNS3_BUF_MUL_BY * aligned_mps +\n+\t\t\t\t\tpf->dv_buf_size;\n+\telse\n+\t\tshared_buf_min = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF\n+\t\t\t\t\t+ pf->dv_buf_size;\n+\n+\tshared_buf_tc = tc_num * aligned_mps + aligned_mps;\n+\tshared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),\n+\t\t\t     HNS3_BUF_SIZE_UNIT);\n+\n+\trx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);\n+\tif (rx_all < rx_priv + shared_std)\n+\t\treturn false;\n+\n+\tshared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT);\n+\tbuf_alloc->s_buf.buf_size = shared_buf;\n+\tif (hns3_dev_dcb_supported(hw)) {\n+\t\tbuf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size;\n+\t\tbuf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high\n+\t\t\t- roundup(aligned_mps / HNS3_BUF_DIV_BY,\n+\t\t\t\t  HNS3_BUF_SIZE_UNIT);\n+\t} else {\n+\t\tbuf_alloc->s_buf.self.high =\n+\t\t\taligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;\n+\t\tbuf_alloc->s_buf.self.low = aligned_mps;\n+\t}\n+\n+\tif (hns3_dev_dcb_supported(hw)) {\n+\t\thi_thrd = shared_buf - pf->dv_buf_size;\n+\n+\t\tif (tc_num <= NEED_RESERVE_TC_NUM)\n+\t\t\thi_thrd = hi_thrd * BUF_RESERVE_PERCENT\n+\t\t\t\t\t/ BUF_MAX_PERCENT;\n+\n+\t\tif (tc_num)\n+\t\t\thi_thrd = hi_thrd / tc_num;\n+\n+\t\thi_thrd = max_t(uint32_t, hi_thrd,\n+\t\t\t\tHNS3_BUF_MUL_BY * aligned_mps);\n+\t\thi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);\n+\t\tlo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;\n+\t} else {\n+\t\thi_thrd = aligned_mps + HNS3_NON_DCB_ADDITIONAL_BUF;\n+\t\tlo_thrd = aligned_mps;\n+\t}\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tbuf_alloc->s_buf.tc_thrd[i].low = lo_thrd;\n+\t\tbuf_alloc->s_buf.tc_thrd[i].high = hi_thrd;\n+\t}\n+\n+\treturn true;\n+}\n+\n+static bool\n+hns3_rx_buf_calc_all(struct hns3_hw *hw, bool max,\n+\t\t     struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_priv_buf *priv;\n+\tuint32_t aligned_mps;\n+\tuint32_t rx_all;\n+\tuint8_t i;\n+\n+\trx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);\n+\taligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT);\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\n+\t\tpriv->enable = 0;\n+\t\tpriv->wl.low = 0;\n+\t\tpriv->wl.high = 0;\n+\t\tpriv->buf_size = 0;\n+\n+\t\tif (!(hw->hw_tc_map & BIT(i)))\n+\t\t\tcontinue;\n+\n+\t\tpriv->enable = 1;\n+\t\tif (hw->dcb_info.hw_pfc_map & BIT(i)) {\n+\t\t\tpriv->wl.low = max ? aligned_mps : HNS3_BUF_SIZE_UNIT;\n+\t\t\tpriv->wl.high = roundup(priv->wl.low + aligned_mps,\n+\t\t\t\t\t\tHNS3_BUF_SIZE_UNIT);\n+\t\t} else {\n+\t\t\tpriv->wl.low = 0;\n+\t\t\tpriv->wl.high = max ? (aligned_mps * HNS3_BUF_MUL_BY) :\n+\t\t\t\t\taligned_mps;\n+\t\t}\n+\n+\t\tpriv->buf_size = priv->wl.high + pf->dv_buf_size;\n+\t}\n+\n+\treturn hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);\n+}\n+\n+static bool\n+hns3_drop_nopfc_buf_till_fit(struct hns3_hw *hw,\n+\t\t\t     struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_priv_buf *priv;\n+\tint no_pfc_priv_num;\n+\tuint32_t rx_all;\n+\tuint8_t mask;\n+\tint i;\n+\n+\trx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);\n+\tno_pfc_priv_num = hns3_get_no_pfc_priv_num(hw, buf_alloc);\n+\n+\t/* let the last to be cleared first */\n+\tfor (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\t\tmask = BIT((uint8_t)i);\n+\n+\t\tif (hw->hw_tc_map & mask &&\n+\t\t    !(hw->dcb_info.hw_pfc_map & mask)) {\n+\t\t\t/* Clear the no pfc TC private buffer */\n+\t\t\tpriv->wl.low = 0;\n+\t\t\tpriv->wl.high = 0;\n+\t\t\tpriv->buf_size = 0;\n+\t\t\tpriv->enable = 0;\n+\t\t\tno_pfc_priv_num--;\n+\t\t}\n+\n+\t\tif (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||\n+\t\t    no_pfc_priv_num == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);\n+}\n+\n+static bool\n+hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw,\n+\t\t\t   struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tstruct hns3_priv_buf *priv;\n+\tuint32_t rx_all;\n+\tint pfc_priv_num;\n+\tuint8_t mask;\n+\tint i;\n+\n+\trx_all = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);\n+\tpfc_priv_num = hns3_get_pfc_priv_num(hw, buf_alloc);\n+\n+\t/* let the last to be cleared first */\n+\tfor (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\t\tmask = BIT((uint8_t)i);\n+\n+\t\tif (hw->hw_tc_map & mask &&\n+\t\t    hw->dcb_info.hw_pfc_map & mask) {\n+\t\t\t/* Reduce the number of pfc TC with private buffer */\n+\t\t\tpriv->wl.low = 0;\n+\t\t\tpriv->enable = 0;\n+\t\t\tpriv->wl.high = 0;\n+\t\t\tpriv->buf_size = 0;\n+\t\t\tpfc_priv_num--;\n+\t\t}\n+\t\tif (hns3_is_rx_buf_ok(hw, buf_alloc, rx_all) ||\n+\t\t    pfc_priv_num == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn hns3_is_rx_buf_ok(hw, buf_alloc, rx_all);\n+}\n+\n+static bool\n+hns3_only_alloc_priv_buff(struct hns3_hw *hw,\n+\t\t\t  struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+#define COMPENSATE_BUFFER\t0x3C00\n+#define COMPENSATE_HALF_MPS_NUM\t5\n+#define PRIV_WL_GAP\t\t0x1800\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tuint32_t tc_num = hns3_get_tc_num(hw);\n+\tuint32_t half_mps = pf->mps >> 1;\n+\tstruct hns3_priv_buf *priv;\n+\tuint32_t min_rx_priv;\n+\tuint32_t rx_priv;\n+\tuint8_t i;\n+\n+\trx_priv = pf->pkt_buf_size - hns3_get_tx_buff_alloced(buf_alloc);\n+\tif (tc_num)\n+\t\trx_priv = rx_priv / tc_num;\n+\n+\tif (tc_num <= NEED_RESERVE_TC_NUM)\n+\t\trx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;\n+\n+\t/*\n+\t * Minimum value of private buffer in rx direction (min_rx_priv) is\n+\t * equal to \"DV + 2.5 * MPS + 15KB\". Driver only allocates rx private\n+\t * buffer if rx_priv is greater than min_rx_priv.\n+\t */\n+\tmin_rx_priv = pf->dv_buf_size + COMPENSATE_BUFFER +\n+\t\t\tCOMPENSATE_HALF_MPS_NUM * half_mps;\n+\tmin_rx_priv = roundup(min_rx_priv, HNS3_BUF_SIZE_UNIT);\n+\trx_priv = rounddown(rx_priv, HNS3_BUF_SIZE_UNIT);\n+\n+\tif (rx_priv < min_rx_priv)\n+\t\treturn false;\n+\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tpriv = &buf_alloc->priv_buf[i];\n+\n+\t\tpriv->enable = 0;\n+\t\tpriv->wl.low = 0;\n+\t\tpriv->wl.high = 0;\n+\t\tpriv->buf_size = 0;\n+\n+\t\tif (!(hw->hw_tc_map & BIT(i)))\n+\t\t\tcontinue;\n+\n+\t\tpriv->enable = 1;\n+\t\tpriv->buf_size = rx_priv;\n+\t\tpriv->wl.high = rx_priv - pf->dv_buf_size;\n+\t\tpriv->wl.low = priv->wl.high - PRIV_WL_GAP;\n+\t}\n+\n+\tbuf_alloc->s_buf.buf_size = 0;\n+\n+\treturn true;\n+}\n+\n+/*\n+ * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs\n+ * @hw: pointer to struct hns3_hw\n+ * @buf_alloc: pointer to buffer calculation data\n+ * @return: 0: calculate sucessful, negative: fail\n+ */\n+static int\n+hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\t/* When DCB is not supported, rx private buffer is not allocated. */\n+\tif (!hns3_dev_dcb_supported(hw)) {\n+\t\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\t\tstruct hns3_pf *pf = &hns->pf;\n+\t\tuint32_t rx_all = pf->pkt_buf_size;\n+\n+\t\trx_all -= hns3_get_tx_buff_alloced(buf_alloc);\n+\t\tif (!hns3_is_rx_buf_ok(hw, buf_alloc, rx_all))\n+\t\t\treturn -ENOMEM;\n+\n+\t\treturn 0;\n+\t}\n+\n+\t/*\n+\t * Try to allocate privated packet buffer for all TCs without share\n+\t * buffer.\n+\t */\n+\tif (hns3_only_alloc_priv_buff(hw, buf_alloc))\n+\t\treturn 0;\n+\n+\t/*\n+\t * Try to allocate privated packet buffer for all TCs with share\n+\t * buffer.\n+\t */\n+\tif (hns3_rx_buf_calc_all(hw, true, buf_alloc))\n+\t\treturn 0;\n+\n+\t/*\n+\t * For different application scenes, the enabled port number, TC number\n+\t * and no_drop TC number are different. In order to obtain the better\n+\t * performance, software could allocate the buffer size and configure\n+\t * the waterline by tring to decrease the private buffer size according\n+\t * to the order, namely, waterline of valided tc, pfc disabled tc, pfc\n+\t * enabled tc.\n+\t */\n+\tif (hns3_rx_buf_calc_all(hw, false, buf_alloc))\n+\t\treturn 0;\n+\n+\tif (hns3_drop_nopfc_buf_till_fit(hw, buf_alloc))\n+\t\treturn 0;\n+\n+\tif (hns3_drop_pfc_buf_till_fit(hw, buf_alloc))\n+\t\treturn 0;\n+\n+\treturn -ENOMEM;\n+}\n+\n+static int\n+hns3_rx_priv_buf_alloc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_rx_priv_buff_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tuint32_t buf_size;\n+\tint ret;\n+\tint i;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_PRIV_BUFF_ALLOC, false);\n+\treq = (struct hns3_rx_priv_buff_cmd *)desc.data;\n+\n+\t/* Alloc private buffer TCs */\n+\tfor (i = 0; i < HNS3_MAX_TC_NUM; i++) {\n+\t\tstruct hns3_priv_buf *priv = &buf_alloc->priv_buf[i];\n+\n+\t\treq->buf_num[i] =\n+\t\t\trte_cpu_to_le_16(priv->buf_size >> HNS3_BUF_UNIT_S);\n+\t\treq->buf_num[i] |= rte_cpu_to_le_16(1 << HNS3_TC0_PRI_BUF_EN_B);\n+\t}\n+\n+\tbuf_size = buf_alloc->s_buf.buf_size;\n+\treq->shared_buf = rte_cpu_to_le_16((buf_size >> HNS3_BUF_UNIT_S) |\n+\t\t\t\t\t   (1 << HNS3_TC0_PRI_BUF_EN_B));\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"rx private buffer alloc cmd failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_rx_priv_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+#define HNS3_RX_PRIV_WL_ALLOC_DESC_NUM 2\n+\tstruct hns3_rx_priv_wl_buf *req;\n+\tstruct hns3_priv_buf *priv;\n+\tstruct hns3_cmd_desc desc[HNS3_RX_PRIV_WL_ALLOC_DESC_NUM];\n+\tint i, j;\n+\tint ret;\n+\n+\tfor (i = 0; i < HNS3_RX_PRIV_WL_ALLOC_DESC_NUM; i++) {\n+\t\thns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_PRIV_WL_ALLOC,\n+\t\t\t\t\t  false);\n+\t\treq = (struct hns3_rx_priv_wl_buf *)desc[i].data;\n+\n+\t\t/* The first descriptor set the NEXT bit to 1 */\n+\t\tif (i == 0)\n+\t\t\tdesc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);\n+\t\telse\n+\t\t\tdesc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);\n+\n+\t\tfor (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {\n+\t\t\tuint32_t idx = i * HNS3_TC_NUM_ONE_DESC + j;\n+\n+\t\t\tpriv = &buf_alloc->priv_buf[idx];\n+\t\t\treq->tc_wl[j].high = rte_cpu_to_le_16(priv->wl.high >>\n+\t\t\t\t\t\t\tHNS3_BUF_UNIT_S);\n+\t\t\treq->tc_wl[j].high |=\n+\t\t\t\trte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\t\t\treq->tc_wl[j].low = rte_cpu_to_le_16(priv->wl.low >>\n+\t\t\t\t\t\t\tHNS3_BUF_UNIT_S);\n+\t\t\treq->tc_wl[j].low |=\n+\t\t\t\trte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\t\t}\n+\t}\n+\n+\t/* Send 2 descriptor at one time */\n+\tret = hns3_cmd_send(hw, desc, HNS3_RX_PRIV_WL_ALLOC_DESC_NUM);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"rx private waterline config cmd failed %d\",\n+\t\t\t     ret);\n+\treturn ret;\n+}\n+\n+static int\n+hns3_common_thrd_config(struct hns3_hw *hw,\n+\t\t\tstruct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+#define HNS3_RX_COM_THRD_ALLOC_DESC_NUM 2\n+\tstruct hns3_shared_buf *s_buf = &buf_alloc->s_buf;\n+\tstruct hns3_rx_com_thrd *req;\n+\tstruct hns3_cmd_desc desc[HNS3_RX_COM_THRD_ALLOC_DESC_NUM];\n+\tstruct hns3_tc_thrd *tc;\n+\tint tc_idx;\n+\tint i, j;\n+\tint ret;\n+\n+\tfor (i = 0; i < HNS3_RX_COM_THRD_ALLOC_DESC_NUM; i++) {\n+\t\thns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_RX_COM_THRD_ALLOC,\n+\t\t\t\t\t  false);\n+\t\treq = (struct hns3_rx_com_thrd *)&desc[i].data;\n+\n+\t\t/* The first descriptor set the NEXT bit to 1 */\n+\t\tif (i == 0)\n+\t\t\tdesc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);\n+\t\telse\n+\t\t\tdesc[i].flag &= ~rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);\n+\n+\t\tfor (j = 0; j < HNS3_TC_NUM_ONE_DESC; j++) {\n+\t\t\ttc_idx = i * HNS3_TC_NUM_ONE_DESC + j;\n+\t\t\ttc = &s_buf->tc_thrd[tc_idx];\n+\n+\t\t\treq->com_thrd[j].high =\n+\t\t\t\trte_cpu_to_le_16(tc->high >> HNS3_BUF_UNIT_S);\n+\t\t\treq->com_thrd[j].high |=\n+\t\t\t\t rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\t\t\treq->com_thrd[j].low =\n+\t\t\t\trte_cpu_to_le_16(tc->low >> HNS3_BUF_UNIT_S);\n+\t\t\treq->com_thrd[j].low |=\n+\t\t\t\t rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\t\t}\n+\t}\n+\n+\t/* Send 2 descriptors at one time */\n+\tret = hns3_cmd_send(hw, desc, HNS3_RX_COM_THRD_ALLOC_DESC_NUM);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"common threshold config cmd failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_common_wl_config(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)\n+{\n+\tstruct hns3_shared_buf *buf = &buf_alloc->s_buf;\n+\tstruct hns3_rx_com_wl *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RX_COM_WL_ALLOC, false);\n+\n+\treq = (struct hns3_rx_com_wl *)desc.data;\n+\treq->com_wl.high = rte_cpu_to_le_16(buf->self.high >> HNS3_BUF_UNIT_S);\n+\treq->com_wl.high |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\n+\treq->com_wl.low = rte_cpu_to_le_16(buf->self.low >> HNS3_BUF_UNIT_S);\n+\treq->com_wl.low |= rte_cpu_to_le_16(BIT(HNS3_RX_PRIV_EN_B));\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"common waterline config cmd failed %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+int\n+hns3_buffer_alloc(struct hns3_hw *hw)\n+{\n+\tstruct hns3_pkt_buf_alloc pkt_buf;\n+\tint ret;\n+\n+\tmemset(&pkt_buf, 0, sizeof(pkt_buf));\n+\tret = hns3_tx_buffer_calc(hw, &pkt_buf);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"could not calc tx buffer size for all TCs %d\",\n+\t\t\t     ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_tx_buffer_alloc(hw, &pkt_buf);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"could not alloc tx buffers %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_rx_buffer_calc(hw, &pkt_buf);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"could not calc rx priv buffer size for all TCs %d\",\n+\t\t\t     ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_rx_priv_buf_alloc(hw, &pkt_buf);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"could not alloc rx priv buffer %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tif (hns3_dev_dcb_supported(hw)) {\n+\t\tret = hns3_rx_priv_wl_config(hw, &pkt_buf);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t     \"could not configure rx private waterline %d\",\n+\t\t\t\t     ret);\n+\t\t\treturn ret;\n+\t\t}\n+\n+\t\tret = hns3_common_thrd_config(hw, &pkt_buf);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t     \"could not configure common threshold %d\",\n+\t\t\t\t     ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tret = hns3_common_wl_config(hw, &pkt_buf);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"could not configure common waterline %d\",\n+\t\t\t     ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_mac_init(struct hns3_hw *hw)\n+{\n+\tstruct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);\n+\tstruct hns3_mac *mac = &hw->mac;\n+\tstruct hns3_pf *pf = &hns->pf;\n+\tint ret;\n+\n+\tpf->support_sfp_query = true;\n+\tmac->link_duplex = ETH_LINK_FULL_DUPLEX;\n+\tret = hns3_cfg_mac_speed_dup_hw(hw, mac->link_speed, mac->link_duplex);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Config mac speed dup fail ret = %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tmac->link_status = ETH_LINK_DOWN;\n+\n+\treturn hns3_config_mtu(hw, pf->mps);\n+}\n+\n+static int\n+hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)\n+{\n+#define HNS3_ETHERTYPE_SUCCESS_ADD\t\t0\n+#define HNS3_ETHERTYPE_ALREADY_ADD\t\t1\n+#define HNS3_ETHERTYPE_MGR_TBL_OVERFLOW\t\t2\n+#define HNS3_ETHERTYPE_KEY_CONFLICT\t\t3\n+\tint return_status;\n+\n+\tif (cmdq_resp) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\\n\",\n+\t\t\t     cmdq_resp);\n+\t\treturn -EIO;\n+\t}\n+\n+\tswitch (resp_code) {\n+\tcase HNS3_ETHERTYPE_SUCCESS_ADD:\n+\tcase HNS3_ETHERTYPE_ALREADY_ADD:\n+\t\treturn_status = 0;\n+\t\tbreak;\n+\tcase HNS3_ETHERTYPE_MGR_TBL_OVERFLOW:\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"add mac ethertype failed for manager table overflow.\");\n+\t\treturn_status = -EIO;\n+\t\tbreak;\n+\tcase HNS3_ETHERTYPE_KEY_CONFLICT:\n+\t\tPMD_INIT_LOG(ERR, \"add mac ethertype failed for key conflict.\");\n+\t\treturn_status = -EIO;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"add mac ethertype failed for undefined, code=%d.\",\n+\t\t\t     resp_code);\n+\t\treturn_status = -EIO;\n+\t}\n+\n+\treturn return_status;\n+}\n+\n+static int\n+hns3_add_mgr_tbl(struct hns3_hw *hw,\n+\t\t const struct hns3_mac_mgr_tbl_entry_cmd *req)\n+{\n+\tstruct hns3_cmd_desc desc;\n+\tuint8_t resp_code;\n+\tuint16_t retval;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_ETHTYPE_ADD, false);\n+\tmemcpy(desc.data, req, sizeof(struct hns3_mac_mgr_tbl_entry_cmd));\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t     \"add mac ethertype failed for cmd_send, ret =%d.\",\n+\t\t\t     ret);\n+\t\treturn ret;\n+\t}\n+\n+\tresp_code = (rte_le_to_cpu_32(desc.data[0]) >> 8) & 0xff;\n+\tretval = rte_le_to_cpu_16(desc.retval);\n+\n+\treturn hns3_get_mac_ethertype_cmd_status(retval, resp_code);\n+}\n+\n+static void\n+hns3_prepare_mgr_tbl(struct hns3_mac_mgr_tbl_entry_cmd *mgr_table,\n+\t\t     int *table_item_num)\n+{\n+\tstruct hns3_mac_mgr_tbl_entry_cmd *tbl;\n+\n+\t/*\n+\t * In current version, we add one item in management table as below:\n+\t * 0x0180C200000E -- LLDP MC address\n+\t */\n+\ttbl = mgr_table;\n+\ttbl->flags = HNS3_MAC_MGR_MASK_VLAN_B;\n+\ttbl->ethter_type = rte_cpu_to_le_16(HNS3_MAC_ETHERTYPE_LLDP);\n+\ttbl->mac_addr_hi32 = rte_cpu_to_le_32(htonl(0x0180C200));\n+\ttbl->mac_addr_lo16 = rte_cpu_to_le_16(htons(0x000E));\n+\ttbl->i_port_bitmap = 0x1;\n+\t*table_item_num = 1;\n+}\n+\n+static int\n+hns3_init_mgr_tbl(struct hns3_hw *hw)\n+{\n+#define HNS_MAC_MGR_TBL_MAX_SIZE\t16\n+\tstruct hns3_mac_mgr_tbl_entry_cmd mgr_table[HNS_MAC_MGR_TBL_MAX_SIZE];\n+\tint table_item_num;\n+\tint ret;\n+\tint i;\n+\n+\thns3_prepare_mgr_tbl(mgr_table, &table_item_num);\n+\tfor (i = 0; i < table_item_num; i++) {\n+\t\tret = hns3_add_mgr_tbl(hw, &mgr_table[i]);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR, \"add mac ethertype failed, ret =%d\",\n+\t\t\t\t     ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+hns3_promisc_param_init(struct hns3_promisc_param *param, bool en_uc,\n+\t\t\tbool en_mc, bool en_bc, int vport_id)\n+{\n+\tif (!param)\n+\t\treturn;\n+\n+\tmemset(param, 0, sizeof(struct hns3_promisc_param));\n+\tif (en_uc)\n+\t\tparam->enable = HNS3_PROMISC_EN_UC;\n+\tif (en_mc)\n+\t\tparam->enable |= HNS3_PROMISC_EN_MC;\n+\tif (en_bc)\n+\t\tparam->enable |= HNS3_PROMISC_EN_BC;\n+\tparam->vf_id = vport_id;\n+}\n+\n+static int\n+hns3_cmd_set_promisc_mode(struct hns3_hw *hw, struct hns3_promisc_param *param)\n+{\n+\tstruct hns3_promisc_cfg_cmd *req;\n+\tstruct hns3_cmd_desc desc;\n+\tint ret;\n+\n+\thns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PROMISC_MODE, false);\n+\n+\treq = (struct hns3_promisc_cfg_cmd *)desc.data;\n+\treq->vf_id = param->vf_id;\n+\treq->flag = (param->enable << HNS3_PROMISC_EN_B) |\n+\t    HNS3_PROMISC_TX_EN_B | HNS3_PROMISC_RX_EN_B;\n+\n+\tret = hns3_cmd_send(hw, &desc, 1);\n+\tif (ret)\n+\t\tPMD_INIT_LOG(ERR, \"Set promisc mode fail, status is %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+static int\n+hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)\n+{\n+\tstruct hns3_promisc_param param;\n+\tbool en_bc_pmc = true;\n+\tuint8_t vf_id;\n+\tint ret;\n+\n+\t/*\n+\t * In current version VF is not supported when PF is taken over by DPDK,\n+\t * the PF-related vf_id is 0, just need to configure parameters for\n+\t * vf_id 0.\n+\t */\n+\tvf_id = 0;\n+\n+\thns3_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc, vf_id);\n+\tret = hns3_cmd_set_promisc_mode(hw, &param);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hns3_init_hardware(struct hns3_adapter *hns)\n+{\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tint ret;\n+\n+\tret = hns3_map_tqp(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to map tqp: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_init_umv_space(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init umv space: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\tret = hns3_mac_init(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init MAC: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\n+\tret = hns3_init_mgr_tbl(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init manager table: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\n+\tret = hns3_set_promisc_mode(hw, false, false);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to set promisc mode: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\n+\tret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to config tso: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\n+\tret = hns3_config_gro(hw, false);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to config gro: %d\", ret);\n+\t\tgoto err_mac_init;\n+\t}\n+\treturn 0;\n+\n+err_mac_init:\n+\thns3_uninit_umv_space(hw);\n+\treturn ret;\n+}\n+\n+static int\n hns3_init_pf(struct rte_eth_dev *eth_dev)\n {\n \tstruct rte_device *dev = eth_dev->device;\n@@ -67,8 +1526,24 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)\n \t\tgoto err_cmd_init;\n \t}\n \n+\t/* Get configuration */\n+\tret = hns3_get_configuration(hw);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to fetch configuration: %d\", ret);\n+\t\tgoto err_get_config;\n+\t}\n+\n+\tret = hns3_init_hardware(hns);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to init hardware: %d\", ret);\n+\t\tgoto err_get_config;\n+\t}\n+\n \treturn 0;\n \n+err_get_config:\n+\thns3_cmd_uninit(hw);\n+\n err_cmd_init:\n \thns3_cmd_destroy_queue(hw);\n \n@@ -88,6 +1563,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)\n \n \tPMD_INIT_FUNC_TRACE();\n \n+\thns3_uninit_umv_space(hw);\n \thns3_cmd_uninit(hw);\n \thns3_cmd_destroy_queue(hw);\n \thw->io_base = NULL;\n@@ -128,6 +1604,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)\n \n \thns->is_vf = false;\n \thw->data = eth_dev->data;\n+\thns->pf.mps = HNS3_DEFAULT_FRAME_LEN;\n \n \tret = hns3_init_pf(eth_dev);\n \tif (ret) {\n@@ -135,10 +1612,30 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)\n \t\tgoto err_init_pf;\n \t}\n \n+\t/* Allocate memory for storing MAC addresses */\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"hns3-mac\",\n+\t\t\t\t\t       sizeof(struct rte_ether_addr) *\n+\t\t\t\t\t       HNS3_UC_MACADDR_NUM, 0);\n+\tif (eth_dev->data->mac_addrs == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate %ld bytes needed to store\"\n+\t\t\t     \" MAC addresses\",\n+\t\t\t     sizeof(struct rte_ether_addr) *\n+\t\t\t     HNS3_UC_MACADDR_NUM);\n+\t\tret = -ENOMEM;\n+\t\tgoto err_rte_zmalloc;\n+\t}\n+\n+\trte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,\n+\t\t\t    &eth_dev->data->mac_addrs[0]);\n+\n \thw->adapter_state = HNS3_NIC_INITIALIZED;\n+\thns3_info(hw, \"hns3 dev initialization successful!\");\n \n \treturn 0;\n \n+err_rte_zmalloc:\n+\thns3_uninit_pf(eth_dev);\n+\n err_init_pf:\n \teth_dev->dev_ops = NULL;\n \treturn ret;\ndiff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h\nindex 84fcf34..d5f62fe 100644\n--- a/drivers/net/hns3/hns3_ethdev.h\n+++ b/drivers/net/hns3/hns3_ethdev.h\n@@ -606,4 +606,7 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)\n \treturn __sync_fetch_and_and(addr, ~mask) & mask;\n }\n \n+int hns3_buffer_alloc(struct hns3_hw *hw);\n+int hns3_config_gro(struct hns3_hw *hw, bool en);\n+\n #endif /* _HNS3_ETHDEV_H_ */\n",
    "prefixes": [
        "05/22"
    ]
}