get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53564/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53564,
    "url": "http://patches.dpdk.org/api/patches/53564/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1558426862-187176-1-git-send-email-xuanziyang2@huawei.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1558426862-187176-1-git-send-email-xuanziyang2@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1558426862-187176-1-git-send-email-xuanziyang2@huawei.com",
    "date": "2019-05-21T08:21:02",
    "name": "[06/11] net/hinic/base: add code for nic business",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8be13300590210ecce842406febd7d8a4a65b583",
    "submitter": {
        "id": 1321,
        "url": "http://patches.dpdk.org/api/people/1321/?format=api",
        "name": "Ziyang Xuan",
        "email": "xuanziyang2@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1558426862-187176-1-git-send-email-xuanziyang2@huawei.com/mbox/",
    "series": [
        {
            "id": 4727,
            "url": "http://patches.dpdk.org/api/series/4727/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4727",
            "date": "2019-05-21T08:13:07",
            "name": "A new net PMD - hinic",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4727/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53564/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/53564/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8730F58EC;\n\tTue, 21 May 2019 10:10:27 +0200 (CEST)",
            "from huawei.com (szxga04-in.huawei.com [45.249.212.190])\n\tby dpdk.org (Postfix) with ESMTP id 2141C4CA7\n\tfor <dev@dpdk.org>; Tue, 21 May 2019 10:10:25 +0200 (CEST)",
            "from DGGEMS403-HUB.china.huawei.com (unknown [172.30.72.60])\n\tby Forcepoint Email with ESMTP id 87731FEB46DC66F4FFF0\n\tfor <dev@dpdk.org>; Tue, 21 May 2019 16:10:23 +0800 (CST)",
            "from tester_149.localdomain (10.175.119.39) by\n\tDGGEMS403-HUB.china.huawei.com (10.3.19.203) with Microsoft SMTP\n\tServer id 14.3.439.0; Tue, 21 May 2019 16:10:16 +0800"
        ],
        "From": "Ziyang Xuan <xuanziyang2@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,\n\t<zhouguoyang@huawei.com>, <rami.rosen@huawei.com>, Ziyang Xuan\n\t<xuanziyang2@huawei.com>",
        "Date": "Tue, 21 May 2019 16:21:02 +0800",
        "Message-ID": "<1558426862-187176-1-git-send-email-xuanziyang2@huawei.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.175.119.39]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH 06/11] net/hinic/base: add code for nic business",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add code for nic business, including qps structures, qps configuration,\nwqs configuration for qps, nic business configuration functionalities.\n\nSigned-off-by: Ziyang Xuan <xuanziyang2@huawei.com>\n---\n drivers/net/hinic/base/hinic_pmd_nic.h    |   85 ++\n drivers/net/hinic/base/hinic_pmd_niccfg.c | 1408 +++++++++++++++++++++++++++++\n drivers/net/hinic/base/hinic_pmd_niccfg.h |  333 +++++++\n drivers/net/hinic/base/hinic_pmd_nicio.c  |  920 +++++++++++++++++++\n drivers/net/hinic/base/hinic_pmd_nicio.h  |   53 ++\n drivers/net/hinic/base/hinic_pmd_qp.c     |   26 +\n drivers/net/hinic/base/hinic_pmd_qp.h     |   76 ++\n drivers/net/hinic/base/hinic_pmd_wq.c     |  164 ++++\n drivers/net/hinic/base/hinic_pmd_wq.h     |   52 ++\n 9 files changed, 3117 insertions(+)\n create mode 100644 drivers/net/hinic/base/hinic_pmd_nic.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_niccfg.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_niccfg.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_qp.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_qp.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.h",
    "diff": "diff --git a/drivers/net/hinic/base/hinic_pmd_nic.h b/drivers/net/hinic/base/hinic_pmd_nic.h\nnew file mode 100644\nindex 0000000..7bea294\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_nic.h\n@@ -0,0 +1,85 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_NIC_H_\n+#define _HINIC_PMD_NIC_H_\n+\n+#define HINIC_FLUSH_QUEUE_TIMEOUT 3000\n+\n+struct hinic_hwdev;\n+struct hinic_wq;\n+\n+struct hinic_sq {\n+\tstruct hinic_wq\t\t*wq;\n+\tvolatile u16\t\t*cons_idx_addr;\n+\tvoid __iomem\t\t*db_addr;\n+\n+\tu16\tq_id;\n+\tu16\towner;\n+\tu16\tsq_depth;\n+};\n+\n+struct hinic_rq {\n+\tstruct hinic_wq\t\t*wq;\n+\tvolatile u16\t\t*pi_virt_addr;\n+\tdma_addr_t\t\tpi_dma_addr;\n+\n+\tu16\t\t\tirq_id;\n+\tu16\t\t\tmsix_entry_idx;\n+\tu16\t\t\tq_id;\n+\tu16\t\t\trq_depth;\n+};\n+\n+struct hinic_qp {\n+\tstruct hinic_sq\t\tsq;\n+\tstruct hinic_rq\t\trq;\n+};\n+\n+struct vf_data_storage {\n+\tu8 vf_mac_addr[ETH_ALEN];\n+\tbool registered;\n+\tbool pf_set_mac;\n+\tu16 pf_vlan;\n+\tu8 pf_qos;\n+\n+\tbool link_forced;\n+\tbool link_up;\t\t/* only valid if VF link is forced */\n+};\n+\n+struct hinic_nic_io {\n+\tstruct hinic_hwdev\t*hwdev;\n+\n+\tu16\t\t\tglobal_qpn;\n+\tu8\t\t\tlink_status;\n+\n+\tstruct hinic_wq\t\t*sq_wq;\n+\tstruct hinic_wq\t\t*rq_wq;\n+\n+\tu16\t\t\tmax_qps;\n+\tu16\t\t\tnum_qps;\n+\n+\tu16\t\t\tnum_sqs;\n+\tu16\t\t\tnum_rqs;\n+\n+\tu16\t\t\tsq_depth;\n+\tu16\t\t\trq_depth;\n+\n+\tu16\t\t\trq_buf_size;\n+\tu16\t\t\tvhd_mode;\n+\n+\tstruct hinic_qp\t\t*qps;\n+\t/* sq ci mem base addr of the function*/\n+\tvoid\t\t\t*ci_vaddr_base;\n+\tdma_addr_t\t\tci_dma_base;\n+\n+\tstruct hinic_event\tevent;\n+\tvoid\t\t\t*event_handle;\n+\n+\tu16\t\t\tmax_vfs;\n+\tu16\t\t\tnum_vfs;\n+\tu8\t\t\tvf_link_mode;\n+\tstruct vf_data_storage\t*vf_infos;\n+};\n+\n+#endif /* _HINIC_PMD_NIC_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c b/drivers/net/hinic/base/hinic_pmd_niccfg.c\nnew file mode 100644\nindex 0000000..6da2172\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c\n@@ -0,0 +1,1408 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_pmd_dpdev.h\"\n+\n+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in,\t\t\\\n+\t\t\t       in_size, buf_out, out_size)\t\\\n+\thinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd,\t\\\n+\t\t\tbuf_in, in_size,\t\t\t\\\n+\t\t\tbuf_out, out_size, 0)\n+\n+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz)\n+{\n+\tstruct hinic_function_table function_table;\n+\tu16 out_size = sizeof(function_table);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&function_table, 0, sizeof(function_table));\n+\tfunction_table.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tfunction_table.func_id = hinic_global_func_id(hwdev);\n+\tfunction_table.mtu = 0x3FFF;\t/* default, max mtu */\n+\tfunction_table.rx_wqe_buf_size = rx_buf_sz;\n+\n+\terr = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_PORT_CMD_INIT_FUNC,\n+\t\t\t\t     &function_table, sizeof(function_table),\n+\t\t\t\t     &function_table, &out_size, 0);\n+\tif (err || function_table.mgmt_msg_head.status || !out_size) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to init func table, ret = %d\",\n+\t\t\tfunction_table.mgmt_msg_head.status);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_get_base_qpn - get global number of queue\n+ * @hwdev: the hardware interface of a nic device\n+ * @global_qpn: vat page size\n+ * @return\n+ *   0 on success,\n+ *   negative error value otherwise.\n+ **/\n+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn)\n+{\n+\tstruct hinic_cmd_qpn cmd_qpn;\n+\tu16 out_size = sizeof(cmd_qpn);\n+\tint err;\n+\n+\tif (!hwdev || !global_qpn) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or global_qpn is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&cmd_qpn, 0, sizeof(cmd_qpn));\n+\tcmd_qpn.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tcmd_qpn.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_PORT_CMD_GET_GLOBAL_QPN,\n+\t\t\t\t     &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn,\n+\t\t\t\t     &out_size, 0);\n+\tif (err || !out_size || cmd_qpn.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get base qpn, status(%d)\",\n+\t\t\tcmd_qpn.mgmt_msg_head.status);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t*global_qpn = cmd_qpn.base_qpn;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_set_mac - Init mac_vlan table in NIC.\n+ * @hwdev: the hardware interface of a nic device\n+ * @mac_addr: mac address\n+ * @vlan_id: set 0 for mac_vlan table initialization\n+ * @func_id: global function id of NIC\n+ * @return\n+ *   0 on success and stats is filled,\n+ *   negative error value otherwise.\n+ */\n+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_mac_set mac_info;\n+\tu16 out_size = sizeof(mac_info);\n+\tint err;\n+\n+\tif (!hwdev || !mac_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or mac_addr is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&mac_info, 0, sizeof(mac_info));\n+\tmac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tmac_info.func_id = func_id;\n+\tmac_info.vlan_id = vlan_id;\n+\tmemmove(mac_info.mac, mac_addr, ETH_ALEN);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,\n+\t\t\t\t     sizeof(mac_info), &mac_info, &out_size);\n+\tif (err || !out_size || (mac_info.mgmt_msg_head.status &&\n+\t    mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, mac_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {\n+\t\tPMD_DRV_LOG(WARNING, \"PF has already set vf mac, Ignore set operation.\");\n+\t\treturn HINIC_PF_SET_VF_ALREADY;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_del_mac - Uninit mac_vlan table in NIC.\n+ * @hwdev: the hardware interface of a nic device\n+ * @mac_addr: mac address\n+ * @vlan_id: set 0 for mac_vlan table initialization\n+ * @func_id: global function id of NIC\n+ * @return\n+ *   0 on success and stats is filled,\n+ *   negative error value otherwise.\n+ */\n+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id,\n+\t\t  u16 func_id)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_mac_set mac_info;\n+\tu16 out_size = sizeof(mac_info);\n+\tint err;\n+\n+\tif (!hwdev || !mac_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or mac_addr is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (vlan_id >= VLAN_N_VID) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid VLAN number\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&mac_info, 0, sizeof(mac_info));\n+\tmac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tmac_info.func_id = func_id;\n+\tmac_info.vlan_id = vlan_id;\n+\tmemmove(mac_info.mac, mac_addr, ETH_ALEN);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info,\n+\t\t\t\t     sizeof(mac_info), &mac_info, &out_size);\n+\tif (err || !out_size || (mac_info.mgmt_msg_head.status &&\n+\t\tmac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, mac_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {\n+\t\tPMD_DRV_LOG(WARNING, \"PF has already set vf mac, Ignore delete operation.\");\n+\t\treturn HINIC_PF_SET_VF_ALREADY;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_default_mac(void *hwdev, u8 *mac_addr)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_mac_set mac_info;\n+\tu16 out_size = sizeof(mac_info);\n+\tint err;\n+\n+\tif (!hwdev || !mac_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or mac_addr is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&mac_info, 0, sizeof(mac_info));\n+\tmac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tmac_info.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC,\n+\t\t\t\t     &mac_info, sizeof(mac_info),\n+\t\t\t\t     &mac_info, &out_size);\n+\tif (err || !out_size || mac_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, mac_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemmove(mac_addr, mac_info.mac, ETH_ALEN);\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_port_mtu(void *hwdev, u32 new_mtu)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_mtu mtu_info;\n+\tu16 out_size = sizeof(mtu_info);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&mtu_info, 0, sizeof(mtu_info));\n+\tmtu_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tmtu_info.func_id = hinic_global_func_id(hwdev);\n+\tmtu_info.mtu = new_mtu;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU,\n+\t\t\t\t     &mtu_info, sizeof(mtu_info),\n+\t\t\t\t     &mtu_info, &out_size);\n+\tif (err || !out_size || mtu_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, mtu_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_link_status(void *hwdev, u8 *link_state)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_get_link get_link;\n+\tu16 out_size = sizeof(get_link);\n+\tint err;\n+\n+\tif (!hwdev || !link_state) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or link_state is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&get_link, 0, sizeof(get_link));\n+\tget_link.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tget_link.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,\n+\t\t\t\t     &get_link, sizeof(get_link),\n+\t\t\t\t     &get_link, &out_size);\n+\tif (err || !out_size || get_link.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, get_link.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t*link_state = get_link.link_status;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_set_vport_enable - Notify firmware that driver is ready or not.\n+ * @hwdev: the hardware interface of a nic device\n+ * @enable: 1: driver is ready; 0: driver is not ok.\n+ * Return: 0 on success and state is filled, negative error value otherwise.\n+ **/\n+int hinic_set_vport_enable(void *hwdev, bool enable)\n+{\n+\tstruct hinic_hwdev *hardware_dev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_vport_state en_state;\n+\tu16 out_size = sizeof(en_state);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&en_state, 0, sizeof(en_state));\n+\ten_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ten_state.func_id = hinic_global_func_id(hwdev);\n+\ten_state.state = (enable ? 1 : 0);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VPORT_ENABLE,\n+\t\t\t\t     &en_state, sizeof(en_state),\n+\t\t\t\t     &en_state, &out_size);\n+\tif (err || !out_size || en_state.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, en_state.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_set_port_enable - open MAG to receive packets.\n+ * @hwdev: the hardware interface of a nic device\n+ * @enable: 1: open MAG; 0: close MAG.\n+ * @return\n+ *   0 on success and stats is filled,\n+ *   negative error value otherwise.\n+ */\n+int hinic_set_port_enable(void *hwdev, bool enable)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_state en_state;\n+\tu16 out_size = sizeof(en_state);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&en_state, 0, sizeof(en_state));\n+\ten_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ten_state.state = (enable ? HINIC_PORT_ENABLE : HINIC_PORT_DISABLE);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE,\n+\t\t\t\t     &en_state, sizeof(en_state),\n+\t\t\t\t     &en_state, &out_size);\n+\tif (err || !out_size || en_state.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set phy port state, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, en_state.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_info port_msg;\n+\tu16 out_size = sizeof(port_msg);\n+\tint err;\n+\n+\tif (!hwdev || !port_info) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or port_info is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&port_msg, 0, sizeof(port_msg));\n+\tport_msg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tport_msg.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO,\n+\t\t\t\t     &port_msg, sizeof(port_msg),\n+\t\t\t\t     &port_msg, &out_size);\n+\tif (err || !out_size || port_msg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get port info, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, port_msg.mgmt_msg_head.status, out_size);\n+\t\treturn err;\n+\t}\n+\n+\tport_info->autoneg_cap = port_msg.autoneg_cap;\n+\tport_info->autoneg_state = port_msg.autoneg_state;\n+\tport_info->duplex = port_msg.duplex;\n+\tport_info->port_type = port_msg.port_type;\n+\tport_info->speed = port_msg.speed;\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_pause_config pause_info;\n+\tu16 out_size = sizeof(pause_info);\n+\tint err;\n+\n+\tif (!nic_hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Nic_hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&pause_info, 0, sizeof(pause_info));\n+\tpause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tpause_info.func_id = hinic_global_func_id(hwdev);\n+\tpause_info.auto_neg = nic_pause.auto_neg;\n+\tpause_info.rx_pause = nic_pause.rx_pause;\n+\tpause_info.tx_pause = nic_pause.tx_pause;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,\n+\t\t\t\t     &pause_info, sizeof(pause_info),\n+\t\t\t\t     &pause_info, &out_size);\n+\tif (err || !out_size || pause_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, pause_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw,\n+\t\t      u8 *pgid, u8 *up_bw, u8 *prio)\n+{\n+\tstruct hinic_up_ets_cfg ets;\n+\tu16 out_size = sizeof(ets);\n+\tu16 up_bw_t = 0;\n+\tu8 pg_bw_t = 0;\n+\tint i, err;\n+\n+\tif (!hwdev || !up_tc || !pg_bw || !pgid || !up_bw || !prio) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev, up_tc, pg_bw, pgid, up_bw or prio is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tfor (i = 0; i < HINIC_DCB_TC_MAX; i++) {\n+\t\tup_bw_t += *(up_bw + i);\n+\t\tpg_bw_t += *(pg_bw + i);\n+\n+\t\tif (*(up_tc + i) > HINIC_DCB_TC_MAX) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"Invalid up %d mapping tc: %d\", i,\n+\t\t\t\t*(up_tc + i));\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tif (pg_bw_t != 100 || (up_bw_t % 100) != 0) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Invalid pg_bw: %d or up_bw: %d\", pg_bw_t, up_bw_t);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&ets, 0, sizeof(ets));\n+\tets.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tets.port_id = 0;    /* reserved */\n+\tmemcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX);\n+\tmemcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX);\n+\tmemcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX);\n+\tmemcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX);\n+\tmemcpy(ets.prio, prio, HINIC_DCB_UP_MAX);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS,\n+\t\t\t\t     &ets, sizeof(ets), &ets, &out_size);\n+\tif (err || ets.mgmt_msg_head.status || !out_size) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, ets.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats)\n+{\n+\tstruct hinic_port_stats_info vport_stats_cmd;\n+\tstruct hinic_cmd_vport_stats vport_stats_rsp;\n+\tu16 out_size = sizeof(vport_stats_rsp);\n+\tint err;\n+\n+\tif (!hwdev || !stats) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or stats is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&vport_stats_rsp, 0, sizeof(vport_stats_rsp));\n+\tmemset(&vport_stats_cmd, 0, sizeof(vport_stats_cmd));\n+\tvport_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tvport_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;\n+\tvport_stats_cmd.func_id = hinic_global_func_id(hwdev);\n+\tvport_stats_cmd.stats_size = sizeof(vport_stats_rsp);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,\n+\t\t\t\t     &vport_stats_cmd, sizeof(vport_stats_cmd),\n+\t\t\t\t     &vport_stats_rsp, &out_size);\n+\tif (err || !out_size || vport_stats_rsp.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Get vport stats from fw failed, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, vport_stats_rsp.mgmt_msg_head.status, out_size);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tmemcpy(stats, &vport_stats_rsp.stats, sizeof(*stats));\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats)\n+{\n+\tstruct hinic_port_stats_info port_stats_cmd;\n+\tstruct hinic_port_stats port_stats_rsp;\n+\tu16 out_size = sizeof(port_stats_rsp);\n+\tint err;\n+\n+\tif (!hwdev || !stats) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or stats is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&port_stats_rsp, 0, sizeof(port_stats_rsp));\n+\tmemset(&port_stats_cmd, 0, sizeof(port_stats_cmd));\n+\tport_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tport_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;\n+\tport_stats_cmd.stats_size = sizeof(port_stats_rsp);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,\n+\t\t\t\t     &port_stats_cmd, sizeof(port_stats_cmd),\n+\t\t\t\t     &port_stats_rsp, &out_size);\n+\tif (err || !out_size || port_stats_rsp.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, port_stats_rsp.mgmt_msg_head.status, out_size);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tmemcpy(stats, &port_stats_rsp.stats, sizeof(*stats));\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct nic_rss_context_tbl *ctx_tbl;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tu32 ctx = 0;\n+\tu64 out_param;\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tctx |= HINIC_RSS_TYPE_SET(1, VALID) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |\n+\t\tHINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);\n+\n+\tcmd_buf->size = sizeof(struct nic_rss_context_tbl);\n+\n+\tctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf;\n+\tctx_tbl->group_index = cpu_to_be32(tmpl_idx);\n+\tctx_tbl->offset = 0;\n+\tctx_tbl->size = sizeof(u32);\n+\tctx_tbl->size = cpu_to_be32(ctx_tbl->size);\n+\tctx_tbl->rsvd = 0;\n+\tctx_tbl->ctx = cpu_to_be32(ctx);\n+\n+\t/* cfg the rss context table by command queue */\n+\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,\n+\t\t\t\t     cmd_buf, &out_param, 0);\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\tif (err || out_param != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rss context table\");\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type *rss_type)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_context_table ctx_tbl;\n+\tu16 out_size = sizeof(ctx_tbl);\n+\tint err;\n+\n+\tif (!hwdev || !rss_type) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or rss_type is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tctx_tbl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tctx_tbl.func_id = hinic_global_func_id(hwdev);\n+\tctx_tbl.template_id = (u8)tmpl_idx;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,\n+\t\t\t\t     &ctx_tbl, sizeof(ctx_tbl),\n+\t\t\t\t     &ctx_tbl, &out_size);\n+\tif (err || !out_size || ctx_tbl.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, ctx_tbl.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);\n+\trss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);\n+\trss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);\n+\trss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);\n+\trss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);\n+\trss_type->tcp_ipv6_ext =\n+\t\t\tHINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT);\n+\trss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);\n+\trss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);\n+\n+\treturn 0;\n+}\n+\n+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_template_key temp_key;\n+\tu16 out_size = sizeof(temp_key);\n+\tint err;\n+\n+\tif (!hwdev || !temp) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or temp is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&temp_key, 0, sizeof(temp_key));\n+\ttemp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ttemp_key.func_id = hinic_global_func_id(hwdev);\n+\ttemp_key.template_id = (u8)tmpl_idx;\n+\tmemcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,\n+\t\t\t\t     &temp_key, sizeof(temp_key),\n+\t\t\t\t     &temp_key, &out_size);\n+\tif (err || !out_size || temp_key.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, temp_key.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_template_key temp_key;\n+\tu16 out_size = sizeof(temp_key);\n+\tint err;\n+\n+\tif (!hwdev || !temp) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or temp is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&temp_key, 0, sizeof(temp_key));\n+\ttemp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ttemp_key.func_id = hinic_global_func_id(hwdev);\n+\ttemp_key.template_id = (u8)tmpl_idx;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,\n+\t\t\t\t     &temp_key, sizeof(temp_key),\n+\t\t\t\t     &temp_key, &out_size);\n+\tif (err || !out_size || temp_key.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get hash key, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, temp_key.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_rss_set_hash_engine - Init rss hash function .\n+ * @hwdev: the hardware interface of a nic device\n+ * @tmpl_idx: index of rss template from NIC.\n+ * @type: hash function, such as Toeplitz or XOR.\n+ * @return\n+ *   0 on success and stats is filled,\n+ *   negative error value otherwise.\n+ */\n+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_engine_type hash_type;\n+\tu16 out_size = sizeof(hash_type);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&hash_type, 0, sizeof(hash_type));\n+\thash_type.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\thash_type.func_id = hinic_global_func_id(hwdev);\n+\thash_type.hash_engine = type;\n+\thash_type.template_id = tmpl_idx;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,\n+\t\t\t\t     &hash_type, sizeof(hash_type),\n+\t\t\t\t     &hash_type, &out_size);\n+\tif (err || !out_size || hash_type.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, hash_type.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct nic_rss_indirect_tbl *indir_tbl;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tint i;\n+\tu32 *temp;\n+\tu32 indir_size;\n+\tu64 out_param;\n+\tint err;\n+\n+\tif (!hwdev || !indir_table) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or indir_table is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tcmd_buf->size = sizeof(struct nic_rss_indirect_tbl);\n+\tindir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf;\n+\tindir_tbl->group_index = cpu_to_be32(tmpl_idx);\n+\n+\tfor (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {\n+\t\tindir_tbl->entry[i] = (u8)(*(indir_table + i));\n+\n+\t\tif (0x3 == (i & 0x3)) {\n+\t\t\ttemp = (u32 *)&indir_tbl->entry[i - 3];\n+\t\t\t*temp = cpu_to_be32(*temp);\n+\t\t}\n+\t}\n+\n+\t/* configure the rss indirect table by command queue */\n+\tindir_size = HINIC_RSS_INDIR_SIZE / 2;\n+\tindir_tbl->offset = 0;\n+\tindir_tbl->size = cpu_to_be32(indir_size);\n+\n+\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,\n+\t\t\t\t     cmd_buf, &out_param, 0);\n+\tif (err || out_param != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rss indir table\");\n+\t\terr = -EFAULT;\n+\t\tgoto free_buf;\n+\t}\n+\n+\tindir_tbl->offset = cpu_to_be32(indir_size);\n+\tindir_tbl->size = cpu_to_be32(indir_size);\n+\tmemcpy(indir_tbl->entry, &indir_tbl->entry[indir_size], indir_size);\n+\n+\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,\n+\t\t\t\t     cmd_buf, &out_param, 0);\n+\tif (err || out_param != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rss indir table\");\n+\t\terr = -EFAULT;\n+\t}\n+\n+free_buf:\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_indir_table rss_cfg;\n+\tu16 out_size = sizeof(rss_cfg);\n+\tint err = 0, i;\n+\n+\tif (!hwdev || !indir_table) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or indir_table is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&rss_cfg, 0, sizeof(rss_cfg));\n+\trss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\trss_cfg.func_id = hinic_global_func_id(hwdev);\n+\trss_cfg.template_id = (u8)tmpl_idx;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev,\n+\t\t\t\t     HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,\n+\t\t\t\t     &rss_cfg, sizeof(rss_cfg), &rss_cfg,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || rss_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, rss_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);\n+\tfor (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)\n+\t\tindir_table[i] = rss_cfg.indir[i];\n+\n+\treturn 0;\n+}\n+\n+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_config rss_cfg;\n+\tu16 out_size = sizeof(rss_cfg);\n+\tint err;\n+\n+\t/* micro code required: number of TC should be power of 2 */\n+\tif (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or prio_tc is NULL, or tc_num: %u Not power of 2\",\n+\t\t\ttc_num);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&rss_cfg, 0, sizeof(rss_cfg));\n+\trss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\trss_cfg.func_id = hinic_global_func_id(hwdev);\n+\trss_cfg.rss_en = rss_en;\n+\trss_cfg.template_id = tmpl_idx;\n+\trss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0;\n+\n+\tmemcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG,\n+\t\t\t\t     &rss_cfg, sizeof(rss_cfg), &rss_cfg,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || rss_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, rss_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_rss_template_alloc - get rss template id from the chip,\n+ *\t\t\t      all functions share 96 templates.\n+ * @hwdev: the pointer to the private hardware device object\n+ * @tmpl_idx: index of rss template from chip.\n+ * Return: 0 on success and stats is filled, negative error value otherwise.\n+ **/\n+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_template_mgmt template_mgmt;\n+\tu16 out_size = sizeof(template_mgmt);\n+\tint err;\n+\n+\tif (!hwdev || !tmpl_idx) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev or tmpl_idx is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&template_mgmt, 0, sizeof(template_mgmt));\n+\ttemplate_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ttemplate_mgmt.func_id = hinic_global_func_id(hwdev);\n+\ttemplate_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,\n+\t\t\t\t     &template_mgmt, sizeof(template_mgmt),\n+\t\t\t\t     &template_mgmt, &out_size);\n+\tif (err || !out_size || template_mgmt.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, template_mgmt.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t*tmpl_idx = template_mgmt.template_id;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_rss_template_alloc - free rss template id to the chip\n+ * @hwdev: the hardware interface of a nic device\n+ * @tmpl_idx: index of rss template from NIC.\n+ * Return: 0 on success and stats is filled, negative error value otherwise.\n+ **/\n+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rss_template_mgmt template_mgmt;\n+\tu16 out_size = sizeof(template_mgmt);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&template_mgmt, 0, sizeof(template_mgmt));\n+\ttemplate_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ttemplate_mgmt.func_id = hinic_global_func_id(hwdev);\n+\ttemplate_mgmt.template_id = tmpl_idx;\n+\ttemplate_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,\n+\t\t\t\t     &template_mgmt, sizeof(template_mgmt),\n+\t\t\t\t     &template_mgmt, &out_size);\n+\tif (err || !out_size || template_mgmt.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, template_mgmt.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_set_rx_vhd_mode - change rx buffer size after initialization,\n+ * @hwdev: the hardware interface of a nic device\n+ * @mode: not needed.\n+ * @rx_buf_sz: receive buffer size.\n+ * @return\n+ *   0 on success and stats is filled,\n+ *   negative error value otherwise.\n+ */\n+int hinic_set_rx_vhd_mode(void *hwdev, u16 mode, u16 rx_buf_sz)\n+{\n+\tstruct hinic_set_vhd_mode vhd_mode_cfg;\n+\tu16 out_size = sizeof(vhd_mode_cfg);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&vhd_mode_cfg, 0, sizeof(vhd_mode_cfg));\n+\n+\tvhd_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tvhd_mode_cfg.func_id = hinic_global_func_id(hwdev);\n+\tvhd_mode_cfg.vhd_type = mode;\n+\tvhd_mode_cfg.rx_wqe_buffer_size = rx_buf_sz;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VHD_CFG,\n+\t\t\t\t     &vhd_mode_cfg, sizeof(vhd_mode_cfg),\n+\t\t\t\t     &vhd_mode_cfg, &out_size);\n+\tif (err || !out_size || vhd_mode_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set vhd mode, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, vhd_mode_cfg.mgmt_msg_head.status, out_size);\n+\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_rx_mode(void *hwdev, u32 enable)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_rx_mode_config rx_mode_cfg;\n+\tu16 out_size = sizeof(rx_mode_cfg);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg));\n+\trx_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\trx_mode_cfg.func_id = hinic_global_func_id(hwdev);\n+\trx_mode_cfg.rx_mode = enable;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE,\n+\t\t\t\t     &rx_mode_cfg, sizeof(rx_mode_cfg),\n+\t\t\t\t     &rx_mode_cfg, &out_size);\n+\tif (err || !out_size || rx_mode_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, rx_mode_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_rx_csum_offload(void *hwdev, u32 en)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_checksum_offload rx_csum_cfg;\n+\tu16 out_size = sizeof(rx_csum_cfg);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&rx_csum_cfg, 0, sizeof(rx_csum_cfg));\n+\trx_csum_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\trx_csum_cfg.func_id = hinic_global_func_id(hwdev);\n+\trx_csum_cfg.rx_csum_offload = en;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,\n+\t\t\t\t     &rx_csum_cfg, sizeof(rx_csum_cfg),\n+\t\t\t\t     &rx_csum_cfg, &out_size);\n+\tif (err || !out_size || rx_csum_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, rx_csum_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_tx_tso(void *hwdev, u8 tso_en)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_tso_config tso_cfg;\n+\tu16 out_size = sizeof(tso_cfg);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&tso_cfg, 0, sizeof(tso_cfg));\n+\ttso_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\ttso_cfg.func_id = hinic_global_func_id(hwdev);\n+\ttso_cfg.tso_en = tso_en;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO,\n+\t\t\t\t     &tso_cfg, sizeof(tso_cfg), &tso_cfg,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || tso_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set tso, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, tso_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_lro_config lro_cfg;\n+\tu16 out_size = sizeof(lro_cfg);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&lro_cfg, 0, sizeof(lro_cfg));\n+\tlro_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tlro_cfg.func_id = hinic_global_func_id(hwdev);\n+\tlro_cfg.lro_ipv4_en = ipv4_en;\n+\tlro_cfg.lro_ipv6_en = ipv6_en;\n+\tlro_cfg.lro_max_wqe_num = max_wqe_num;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO,\n+\t\t\t\t     &lro_cfg, sizeof(lro_cfg), &lro_cfg,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || lro_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, lro_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_anti_attack(void *hwdev, bool enable)\n+{\n+\tstruct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_port_anti_attack_rate rate;\n+\tu16 out_size = sizeof(rate);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&rate, 0, sizeof(rate));\n+\trate.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\trate.func_id = hinic_global_func_id(hwdev);\n+\trate.enable = enable;\n+\trate.cir = ANTI_ATTACK_DEFAULT_CIR;\n+\trate.xir = ANTI_ATTACK_DEFAULT_XIR;\n+\trate.cbs = ANTI_ATTACK_DEFAULT_CBS;\n+\trate.xbs = ANTI_ATTACK_DEFAULT_XBS;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE,\n+\t\t\t\t     &rate, sizeof(rate), &rate,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || rate.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"can't %s port Anti-Attack rate limit, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\t(enable ? \"enable\" : \"disable\"), err,\n+\t\t\trate.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Set autoneg status and restart port link status */\n+int hinic_reset_port_link_cfg(void *hwdev)\n+{\n+\tstruct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_reset_link_cfg reset_cfg;\n+\tu16 out_size = sizeof(reset_cfg);\n+\tint err;\n+\n+\tmemset(&reset_cfg, 0, sizeof(reset_cfg));\n+\treset_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\treset_cfg.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG,\n+\t\t\t\t     &reset_cfg, sizeof(reset_cfg),\n+\t\t\t\t     &reset_cfg, &out_size);\n+\tif (err || !out_size || reset_cfg.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Reset port link configure failed, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, reset_cfg.mgmt_msg_head.status, out_size);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode)\n+{\n+\tstruct hinic_fast_recycled_mode fast_recycled_mode;\n+\tu16 out_size = sizeof(fast_recycled_mode);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&fast_recycled_mode, 0, sizeof(fast_recycled_mode));\n+\tfast_recycled_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tfast_recycled_mode.func_id = hinic_global_func_id(hwdev);\n+\tfast_recycled_mode.fast_recycled_mode = mode;\n+\n+\terr = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,\n+\t\t\t\t     HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET,\n+\t\t\t\t     &fast_recycled_mode,\n+\t\t\t\t     sizeof(fast_recycled_mode),\n+\t\t\t\t     &fast_recycled_mode, &out_size, 0);\n+\tif (err || fast_recycled_mode.mgmt_msg_head.status || !out_size) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set recycle mode, ret = %d\",\n+\t\t\tfast_recycled_mode.mgmt_msg_head.status);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_config_rx_mode(void *nic_dev, u32 rx_mode_ctrl)\n+{\n+\thinic_nic_dev *hinic_dev;\n+\tstruct hinic_hwdev *nic_hwdev;\n+\tint err;\n+\n+\tif (!nic_dev) {\n+\t\tPMD_DRV_LOG(ERR, \"nic_dev is NULL\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thinic_dev = (hinic_nic_dev *)nic_dev;\n+\tnic_hwdev = (struct hinic_hwdev *)hinic_dev->hwdev;\n+\terr = hinic_set_rx_mode(nic_hwdev, rx_mode_ctrl);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set rx mode\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thinic_dev->rx_mode_status = rx_mode_ctrl;\n+\n+\treturn 0;\n+}\n+\n+void hinic_clear_vport_stats(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_clear_vport_stats clear_vport_stats;\n+\tu16 out_size = sizeof(clear_vport_stats);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tmemset(&clear_vport_stats, 0, sizeof(clear_vport_stats));\n+\tclear_vport_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tclear_vport_stats.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAN_VPORT_STAT,\n+\t\t\t\t     &clear_vport_stats,\n+\t\t\t\t     sizeof(clear_vport_stats),\n+\t\t\t\t     &clear_vport_stats, &out_size);\n+\tif (err || !out_size || clear_vport_stats.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear vport statistics, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, clear_vport_stats.mgmt_msg_head.status, out_size);\n+\t}\n+}\n+\n+void hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_clear_port_stats clear_phy_port_stats;\n+\tu16 out_size = sizeof(clear_phy_port_stats);\n+\tint err;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"Hwdev is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tmemset(&clear_phy_port_stats, 0, sizeof(clear_phy_port_stats));\n+\tclear_phy_port_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tclear_phy_port_stats.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev,\n+\t\t\t\t     HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,\n+\t\t\t\t     &clear_phy_port_stats,\n+\t\t\t\t     sizeof(clear_phy_port_stats),\n+\t\t\t\t     &clear_phy_port_stats, &out_size);\n+\tif (err || !out_size || clear_phy_port_stats.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear phy port statistics, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, clear_phy_port_stats.mgmt_msg_head.status,\n+\t\t\tout_size);\n+\t}\n+}\n+\n+int hinic_set_link_status_follow(void *hwdev,\n+\t\t\t\t enum hinic_link_follow_status status)\n+{\n+\tstruct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_set_link_follow follow;\n+\tu16 out_size = sizeof(follow);\n+\tint err;\n+\n+\tif (!hwdev)\n+\t\treturn -EINVAL;\n+\n+\tif (status >= HINIC_LINK_FOLLOW_STATUS_MAX) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Invalid link follow status: %d\", status);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&follow, 0, sizeof(follow));\n+\tfollow.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tfollow.func_id = hinic_global_func_id(hwdev);\n+\tfollow.follow_status = status;\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW,\n+\t\t\t\t     &follow, sizeof(follow),\n+\t\t\t\t     &follow, &out_size);\n+\tif ((follow.mgmt_msg_head.status != HINIC_MGMT_CMD_UNSUPPORTED &&\n+\t     follow.mgmt_msg_head.status) || err || !out_size) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to set link status follow phy port status, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, follow.mgmt_msg_head.status, out_size);\n+\t\treturn -EFAULT;\n+\t}\n+\n+\treturn follow.mgmt_msg_head.status;\n+}\n+\n+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised)\n+{\n+\tstruct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_link_mode_cmd link_mode;\n+\tu16 out_size = sizeof(link_mode);\n+\tint err;\n+\n+\tif (!hwdev || !supported || !advertised)\n+\t\treturn -EINVAL;\n+\n+\tmemset(&link_mode, 0, sizeof(link_mode));\n+\tlink_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tlink_mode.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,\n+\t\t\t\t     &link_mode, sizeof(link_mode),\n+\t\t\t\t     &link_mode, &out_size);\n+\tif (err || !out_size || link_mode.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, link_mode.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t*supported = link_mode.supported;\n+\t*advertised = link_mode.advertised;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport fake\n+ * failed when device start.\n+ * @hwdev: the hardware interface of a nic device\n+ * Return: 0 on success, negative error value otherwise.\n+ **/\n+int hinic_flush_qp_res(void *hwdev)\n+{\n+\tstruct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;\n+\tstruct hinic_clear_qp_resource qp_res;\n+\tu16 out_size = sizeof(qp_res);\n+\tint err;\n+\n+\tmemset(&qp_res, 0, sizeof(qp_res));\n+\tqp_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tqp_res.func_id = hinic_global_func_id(hwdev);\n+\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_QP_RES,\n+\t\t\t\t     &qp_res, sizeof(qp_res), &qp_res,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || qp_res.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, qp_res.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver)\n+{\n+\tstruct hinic_hwdev *dev = hwdev;\n+\tstruct hinic_version_info ver_info;\n+\tu16 out_size = sizeof(ver_info);\n+\tint err;\n+\n+\tif (!hwdev || !fw_ver)\n+\t\treturn -EINVAL;\n+\n+\tmemset(&ver_info, 0, sizeof(ver_info));\n+\tver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,\n+\t\t\t\t     &ver_info, sizeof(ver_info), &ver_info,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || ver_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\"Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\terr, ver_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME);\n+\n+\tmemset(&ver_info, 0, sizeof(ver_info));\n+\tver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tout_size = sizeof(ver_info);\n+\terr = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION,\n+\t\t\t\t     &ver_info, sizeof(ver_info), &ver_info,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || ver_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get boot version,err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, ver_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME);\n+\n+\tmemset(&ver_info, 0, sizeof(ver_info));\n+\tver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tout_size = sizeof(ver_info);\n+\terr = l2nic_msg_to_mgmt_sync(hwdev,\n+\t\t\t\t     HINIC_PORT_CMD_GET_MICROCODE_VERSION,\n+\t\t\t\t     &ver_info, sizeof(ver_info), &ver_info,\n+\t\t\t\t     &out_size);\n+\tif (err || !out_size || ver_info.mgmt_msg_head.status) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\"Failed to get microcode version, err: %d, status: 0x%x, out size: 0x%x\",\n+\t\t\terr, ver_info.mgmt_msg_head.status, out_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME);\n+\n+\treturn 0;\n+}\n+\ndiff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h b/drivers/net/hinic/base/hinic_pmd_niccfg.h\nnew file mode 100644\nindex 0000000..0cc143e\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h\n@@ -0,0 +1,333 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_NICCFG_H_\n+#define _HINIC_PMD_NICCFG_H_\n+\n+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)\n+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)\n+\n+#define HINIC_VLAN_PRIORITY_SHIFT\t13\n+\n+#define HINIC_RSS_INDIR_SIZE\t\t256\n+#define HINIC_DCB_TC_MAX\t\t0x8\n+#define HINIC_DCB_UP_MAX\t\t0x8\n+#define HINIC_DCB_PG_MAX\t\t0x8\n+#define HINIC_RSS_KEY_SIZE\t\t40\n+\n+#define HINIC_MAX_NUM_RQ\t\t64\n+\n+enum hinic_rss_hash_type {\n+\tHINIC_RSS_HASH_ENGINE_TYPE_XOR = 0,\n+\tHINIC_RSS_HASH_ENGINE_TYPE_TOEP,\n+\n+\tHINIC_RSS_HASH_ENGINE_TYPE_MAX,\n+};\n+\n+struct nic_port_info {\n+\tu8\tport_type;\n+\tu8\tautoneg_cap;\n+\tu8\tautoneg_state;\n+\tu8\tduplex;\n+\tu8\tspeed;\n+};\n+\n+enum nic_speed_level {\n+\tLINK_SPEED_10MB = 0,\n+\tLINK_SPEED_100MB,\n+\tLINK_SPEED_1GB,\n+\tLINK_SPEED_10GB,\n+\tLINK_SPEED_25GB,\n+\tLINK_SPEED_40GB,\n+\tLINK_SPEED_100GB,\n+\tLINK_SPEED_MAX\n+};\n+\n+enum hinic_link_status {\n+\tHINIC_LINK_DOWN = 0,\n+\tHINIC_LINK_UP\n+};\n+\n+struct nic_pause_config {\n+\tu32 auto_neg;\n+\tu32 rx_pause;\n+\tu32 tx_pause;\n+};\n+\n+struct nic_rss_type {\n+\tu8 tcp_ipv6_ext;\n+\tu8 ipv6_ext;\n+\tu8 tcp_ipv6;\n+\tu8 ipv6;\n+\tu8 tcp_ipv4;\n+\tu8 ipv4;\n+\tu8 udp_ipv6;\n+\tu8 udp_ipv4;\n+};\n+\n+enum hinic_rx_mod {\n+\tHINIC_RX_MODE_UC = 1 << 0,\n+\tHINIC_RX_MODE_MC = 1 << 1,\n+\tHINIC_RX_MODE_BC = 1 << 2,\n+\tHINIC_RX_MODE_MC_ALL = 1 << 3,\n+\tHINIC_RX_MODE_PROMISC = 1 << 4,\n+};\n+\n+enum hinic_link_mode {\n+\tHINIC_10GE_BASE_KR = 0,\n+\tHINIC_40GE_BASE_KR4 = 1,\n+\tHINIC_40GE_BASE_CR4 = 2,\n+\tHINIC_100GE_BASE_KR4 = 3,\n+\tHINIC_100GE_BASE_CR4 = 4,\n+\tHINIC_25GE_BASE_KR_S = 5,\n+\tHINIC_25GE_BASE_CR_S = 6,\n+\tHINIC_25GE_BASE_KR = 7,\n+\tHINIC_25GE_BASE_CR = 8,\n+\tHINIC_GE_BASE_KX = 9,\n+\tHINIC_LINK_MODE_NUMBERS,\n+\n+\tHINIC_SUPPORTED_UNKNOWN = 0xFFFF,\n+};\n+\n+#define HINIC_DEFAULT_RX_MODE\t(HINIC_RX_MODE_UC | HINIC_RX_MODE_MC |\t\\\n+\t\t\t\tHINIC_RX_MODE_BC)\n+\n+#define HINIC_MAX_MTU_SIZE\t\t(9600)\n+#define HINIC_MIN_MTU_SIZE\t\t(256)\n+\n+/* MIN_MTU + ETH_HLEN + CRC (256+14+4) */\n+#define HINIC_MIN_FRAME_SIZE\t\t274\n+\n+/* MAX_MTU + ETH_HLEN + CRC + VLAN(9600+14+4+4) */\n+#define HINIC_MAX_JUMBO_FRAME_SIZE\t(9622)\n+\n+#define HINIC_PORT_DISABLE\t\t0x0\n+#define HINIC_PORT_ENABLE\t\t0x3\n+\n+struct hinic_vport_stats {\n+\tu64 tx_unicast_pkts_vport;\n+\tu64 tx_unicast_bytes_vport;\n+\tu64 tx_multicast_pkts_vport;\n+\tu64 tx_multicast_bytes_vport;\n+\tu64 tx_broadcast_pkts_vport;\n+\tu64 tx_broadcast_bytes_vport;\n+\n+\tu64 rx_unicast_pkts_vport;\n+\tu64 rx_unicast_bytes_vport;\n+\tu64 rx_multicast_pkts_vport;\n+\tu64 rx_multicast_bytes_vport;\n+\tu64 rx_broadcast_pkts_vport;\n+\tu64 rx_broadcast_bytes_vport;\n+\n+\tu64 tx_discard_vport;\n+\tu64 rx_discard_vport;\n+\tu64 tx_err_vport;\n+\tu64 rx_err_vport; /* rx checksum err pkts in ucode */\n+};\n+\n+struct hinic_phy_port_stats {\n+\tu64 mac_rx_total_pkt_num;\n+\tu64 mac_rx_total_oct_num;\n+\tu64 mac_rx_bad_pkt_num;\n+\tu64 mac_rx_bad_oct_num;\n+\tu64 mac_rx_good_pkt_num;\n+\tu64 mac_rx_good_oct_num;\n+\tu64 mac_rx_uni_pkt_num;\n+\tu64 mac_rx_multi_pkt_num;\n+\tu64 mac_rx_broad_pkt_num;\n+\n+\tu64 mac_tx_total_pkt_num;\n+\tu64 mac_tx_total_oct_num;\n+\tu64 mac_tx_bad_pkt_num;\n+\tu64 mac_tx_bad_oct_num;\n+\tu64 mac_tx_good_pkt_num;\n+\tu64 mac_tx_good_oct_num;\n+\tu64 mac_tx_uni_pkt_num;\n+\tu64 mac_tx_multi_pkt_num;\n+\tu64 mac_tx_broad_pkt_num;\n+\n+\tu64 mac_rx_fragment_pkt_num;\n+\tu64 mac_rx_undersize_pkt_num;\n+\tu64 mac_rx_undermin_pkt_num;\n+\tu64 mac_rx_64_oct_pkt_num;\n+\tu64 mac_rx_65_127_oct_pkt_num;\n+\tu64 mac_rx_128_255_oct_pkt_num;\n+\tu64 mac_rx_256_511_oct_pkt_num;\n+\tu64 mac_rx_512_1023_oct_pkt_num;\n+\tu64 mac_rx_1024_1518_oct_pkt_num;\n+\tu64 mac_rx_1519_2047_oct_pkt_num;\n+\tu64 mac_rx_2048_4095_oct_pkt_num;\n+\tu64 mac_rx_4096_8191_oct_pkt_num;\n+\tu64 mac_rx_8192_9216_oct_pkt_num;\n+\tu64 mac_rx_9217_12287_oct_pkt_num;\n+\tu64 mac_rx_12288_16383_oct_pkt_num;\n+\tu64 mac_rx_1519_max_bad_pkt_num;\n+\tu64 mac_rx_1519_max_good_pkt_num;\n+\tu64 mac_rx_oversize_pkt_num;\n+\tu64 mac_rx_jabber_pkt_num;\n+\n+\tu64 mac_rx_mac_pause_num;\n+\tu64 mac_rx_pfc_pkt_num;\n+\tu64 mac_rx_pfc_pri0_pkt_num;\n+\tu64 mac_rx_pfc_pri1_pkt_num;\n+\tu64 mac_rx_pfc_pri2_pkt_num;\n+\tu64 mac_rx_pfc_pri3_pkt_num;\n+\tu64 mac_rx_pfc_pri4_pkt_num;\n+\tu64 mac_rx_pfc_pri5_pkt_num;\n+\tu64 mac_rx_pfc_pri6_pkt_num;\n+\tu64 mac_rx_pfc_pri7_pkt_num;\n+\tu64 mac_rx_mac_control_pkt_num;\n+\tu64 mac_rx_y1731_pkt_num;\n+\tu64 mac_rx_sym_err_pkt_num;\n+\tu64 mac_rx_fcs_err_pkt_num;\n+\tu64 mac_rx_send_app_good_pkt_num;\n+\tu64 mac_rx_send_app_bad_pkt_num;\n+\n+\tu64 mac_tx_fragment_pkt_num;\n+\tu64 mac_tx_undersize_pkt_num;\n+\tu64 mac_tx_undermin_pkt_num;\n+\tu64 mac_tx_64_oct_pkt_num;\n+\tu64 mac_tx_65_127_oct_pkt_num;\n+\tu64 mac_tx_128_255_oct_pkt_num;\n+\tu64 mac_tx_256_511_oct_pkt_num;\n+\tu64 mac_tx_512_1023_oct_pkt_num;\n+\tu64 mac_tx_1024_1518_oct_pkt_num;\n+\tu64 mac_tx_1519_2047_oct_pkt_num;\n+\tu64 mac_tx_2048_4095_oct_pkt_num;\n+\tu64 mac_tx_4096_8191_oct_pkt_num;\n+\tu64 mac_tx_8192_9216_oct_pkt_num;\n+\tu64 mac_tx_9217_12287_oct_pkt_num;\n+\tu64 mac_tx_12288_16383_oct_pkt_num;\n+\tu64 mac_tx_1519_max_bad_pkt_num;\n+\tu64 mac_tx_1519_max_good_pkt_num;\n+\tu64 mac_tx_oversize_pkt_num;\n+\tu64 mac_trans_jabber_pkt_num;\n+\n+\tu64 mac_tx_mac_pause_num;\n+\tu64 mac_tx_pfc_pkt_num;\n+\tu64 mac_tx_pfc_pri0_pkt_num;\n+\tu64 mac_tx_pfc_pri1_pkt_num;\n+\tu64 mac_tx_pfc_pri2_pkt_num;\n+\tu64 mac_tx_pfc_pri3_pkt_num;\n+\tu64 mac_tx_pfc_pri4_pkt_num;\n+\tu64 mac_tx_pfc_pri5_pkt_num;\n+\tu64 mac_tx_pfc_pri6_pkt_num;\n+\tu64 mac_tx_pfc_pri7_pkt_num;\n+\tu64 mac_tx_mac_control_pkt_num;\n+\tu64 mac_tx_y1731_pkt_num;\n+\tu64 mac_tx_1588_pkt_num;\n+\tu64 mac_tx_err_all_pkt_num;\n+\tu64 mac_tx_from_app_good_pkt_num;\n+\tu64 mac_tx_from_app_bad_pkt_num;\n+\n+\tu64 rx_higig2_ext_pkts_port;\n+\tu64 rx_higig2_message_pkts_port;\n+\tu64 rx_higig2_error_pkts_port;\n+\tu64 rx_higig2_cpu_ctrl_pkts_port;\n+\tu64 rx_higig2_unicast_pkts_port;\n+\tu64 rx_higig2_broadcast_pkts_port;\n+\tu64 rx_higig2_l2_multicast_pkts;\n+\tu64 rx_higig2_l3_multicast_pkts;\n+\n+\tu64 tx_higig2_message_pkts_port;\n+\tu64 tx_higig2_ext_pkts_port;\n+\tu64 tx_higig2_cpu_ctrl_pkts_port;\n+\tu64 tx_higig2_unicast_pkts_port;\n+\tu64 tx_higig2_broadcast_pkts_port;\n+\tu64 tx_higig2_l2_multicast_pkts;\n+\tu64 tx_higig2_l3_multicast_pkts;\n+};\n+\n+enum hinic_link_follow_status {\n+\tHINIC_LINK_FOLLOW_DEFAULT,\n+\tHINIC_LINK_FOLLOW_PORT,\n+\tHINIC_LINK_FOLLOW_SEPARATE,\n+\tHINIC_LINK_FOLLOW_STATUS_MAX,\n+};\n+\n+#define HINIC_FW_VERSION_NAME\t16\n+struct hinic_fw_version {\n+\tu8\tmgmt_ver[HINIC_FW_VERSION_NAME];\n+\tu8\tmicrocode_ver[HINIC_FW_VERSION_NAME];\n+\tu8\tboot_ver[HINIC_FW_VERSION_NAME];\n+};\n+\n+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);\n+\n+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);\n+\n+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,\n+\t\t     u16 func_id);\n+\n+int hinic_get_default_mac(void *hwdev, u8 *mac_addr);\n+\n+int hinic_set_port_mtu(void *hwdev, u32 new_mtu);\n+\n+int hinic_set_vport_enable(void *hwdev, bool enable);\n+\n+int hinic_set_port_enable(void *hwdev, bool enable);\n+\n+int hinic_get_link_status(void *hwdev, u8 *link_state);\n+\n+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info);\n+\n+int hinic_config_rx_mode(void *nic_dev, u32 rx_mode_ctrl);\n+\n+int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz);\n+\n+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause);\n+\n+int hinic_reset_port_link_cfg(void *hwdev);\n+\n+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,\n+\t\t      u8 *prio);\n+\n+int hinic_set_anti_attack(void *hwdev, bool enable);\n+\n+/* offload feature */\n+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num);\n+\n+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats);\n+\n+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats);\n+\n+/* rss */\n+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx,\n+\t\t       struct nic_rss_type rss_type);\n+\n+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx,\n+\t\t       struct nic_rss_type *rss_type);\n+\n+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);\n+\n+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);\n+\n+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type);\n+\n+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);\n+\n+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);\n+\n+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc);\n+\n+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx);\n+\n+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx);\n+\n+int hinic_set_rx_mode(void *hwdev, u32 enable);\n+\n+int hinic_set_rx_csum_offload(void *hwdev, u32 en);\n+\n+int hinic_set_tx_tso(void *hwdev, u8 tso_en);\n+\n+int hinic_set_link_status_follow(void *hwdev,\n+\t\t\t\t enum hinic_link_follow_status status);\n+\n+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised);\n+\n+int hinic_flush_qp_res(void *hwdev);\n+\n+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver);\n+\n+#endif /* _HINIC_PMD_NICCFG_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c\nnew file mode 100644\nindex 0000000..7989fc7\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c\n@@ -0,0 +1,920 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_pmd_dpdev.h\"\n+#include \"../hinic_pmd_rx.h\"\n+\n+#define WQ_PREFETCH_MAX\t\t\t6\n+#define WQ_PREFETCH_MIN\t\t\t1\n+#define WQ_PREFETCH_THRESHOLD\t\t256\n+\n+struct hinic_qp_ctxt_header {\n+\tu16\tnum_queues;\n+\tu16\tqueue_type;\n+\tu32\taddr_offset;\n+};\n+\n+struct hinic_sq_ctxt {\n+\tu32\tceq_attr;\n+\n+\tu32\tci_owner;\n+\n+\tu32\twq_pfn_hi;\n+\tu32\twq_pfn_lo;\n+\n+\tu32\tpref_cache;\n+\tu32\tpref_owner;\n+\tu32\tpref_wq_pfn_hi_ci;\n+\tu32\tpref_wq_pfn_lo;\n+\n+\tu32\trsvd8;\n+\tu32\trsvd9;\n+\n+\tu32\twq_block_pfn_hi;\n+\tu32\twq_block_pfn_lo;\n+};\n+\n+struct hinic_rq_ctxt {\n+\tu32\tceq_attr;\n+\n+\tu32\tpi_intr_attr;\n+\n+\tu32\twq_pfn_hi_ci;\n+\tu32\twq_pfn_lo;\n+\n+\tu32\tpref_cache;\n+\tu32\tpref_owner;\n+\n+\tu32\tpref_wq_pfn_hi_ci;\n+\tu32\tpref_wq_pfn_lo;\n+\n+\tu32\tpi_paddr_hi;\n+\tu32\tpi_paddr_lo;\n+\n+\tu32\twq_block_pfn_hi;\n+\tu32\twq_block_pfn_lo;\n+};\n+\n+struct hinic_sq_ctxt_block {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tstruct hinic_sq_ctxt\t\tsq_ctxt[HINIC_Q_CTXT_MAX];\n+};\n+\n+struct hinic_rq_ctxt_block {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tstruct hinic_rq_ctxt\t\trq_ctxt[HINIC_Q_CTXT_MAX];\n+};\n+\n+struct hinic_clean_queue_ctxt {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tu32\t\t\t\tctxt_size;\n+};\n+\n+static void init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id,\n+\t\t   volatile void *cons_idx_addr, void __iomem *db_addr)\n+{\n+\tsq->wq = wq;\n+\tsq->q_id = q_id;\n+\tsq->owner = 1;\n+\n+\tsq->cons_idx_addr = (volatile u16 *)cons_idx_addr;\n+\tsq->db_addr = db_addr;\n+}\n+\n+static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq,\n+\t\t   u16 q_id, __rte_unused u16 rq_msix_idx)\n+{\n+\trq->wq = wq;\n+\trq->q_id = q_id;\n+\n+\trq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(dev_hdl,\n+\t\t\t\t\t\t\t       PAGE_SIZE,\n+\t\t\t\t\t\t\t       &rq->pi_dma_addr,\n+\t\t\t\t\t\t\t       GFP_KERNEL);\n+\tif (!rq->pi_virt_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate pi virt addr\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void clean_rq(struct hinic_rq *rq, void *dev_hdl)\n+{\n+\tdma_free_coherent_volatile(dev_hdl, PAGE_SIZE,\n+\t\t\t\t   (volatile void *)rq->pi_virt_addr,\n+\t\t\t\t   rq->pi_dma_addr);\n+}\n+\n+static void hinic_qp_prepare_cmdq_header(\n+\t\t\t\tstruct hinic_qp_ctxt_header *qp_ctxt_hdr,\n+\t\t\t\tenum hinic_qp_ctxt_type ctxt_type,\n+\t\t\t\tu16 num_queues, u16 max_queues, u16 q_id)\n+{\n+\tqp_ctxt_hdr->queue_type = ctxt_type;\n+\tqp_ctxt_hdr->num_queues = num_queues;\n+\n+\tif (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)\n+\t\tqp_ctxt_hdr->addr_offset =\n+\t\t\t\tSQ_CTXT_OFFSET(max_queues, max_queues, q_id);\n+\telse\n+\t\tqp_ctxt_hdr->addr_offset =\n+\t\t\t\tRQ_CTXT_OFFSET(max_queues, max_queues, q_id);\n+\n+\tqp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);\n+\n+\thinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));\n+}\n+\n+static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,\n+\t\t\t   struct hinic_sq_ctxt *sq_ctxt)\n+{\n+\tstruct hinic_wq *wq = sq->wq;\n+\tu64 wq_page_addr;\n+\tu64 wq_page_pfn, wq_block_pfn;\n+\tu32 wq_page_pfn_hi, wq_page_pfn_lo;\n+\tu32 wq_block_pfn_hi, wq_block_pfn_lo;\n+\tu16 pi_start, ci_start;\n+\n+\tci_start = (u16)(wq->cons_idx);\n+\tpi_start = (u16)(wq->prod_idx);\n+\n+\t/* read the first page from the HW table */\n+\twq_page_addr = wq->queue_buf_paddr;\n+\n+\twq_page_pfn = WQ_PAGE_PFN(wq_page_addr);\n+\twq_page_pfn_hi = upper_32_bits(wq_page_pfn);\n+\twq_page_pfn_lo = lower_32_bits(wq_page_pfn);\n+\n+\twq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);\n+\twq_block_pfn_hi = upper_32_bits(wq_block_pfn);\n+\twq_block_pfn_lo = lower_32_bits(wq_block_pfn);\n+\n+\t/* must config as ceq disabled */\n+\tsq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, ARM) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, EN);\n+\n+\tsq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |\n+\t\t\t\tSQ_CTXT_CI_SET(1, OWNER);\n+\n+\tsq_ctxt->wq_pfn_hi =\n+\t\t\tSQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |\n+\t\t\tSQ_CTXT_WQ_PAGE_SET(pi_start, PI);\n+\n+\tsq_ctxt->wq_pfn_lo = wq_page_pfn_lo;\n+\n+\tsq_ctxt->pref_cache =\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);\n+\n+\tsq_ctxt->pref_owner = 1;\n+\n+\tsq_ctxt->pref_wq_pfn_hi_ci =\n+\t\tSQ_CTXT_PREF_SET(ci_start, CI) |\n+\t\tSQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);\n+\n+\tsq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;\n+\n+\tsq_ctxt->wq_block_pfn_hi =\n+\t\tSQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);\n+\n+\tsq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;\n+\n+\thinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));\n+}\n+\n+static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,\n+\t\t\tstruct hinic_rq_ctxt *rq_ctxt)\n+{\n+\tstruct hinic_wq *wq = rq->wq;\n+\tu64 wq_page_addr;\n+\tu64 wq_page_pfn, wq_block_pfn;\n+\tu32 wq_page_pfn_hi, wq_page_pfn_lo;\n+\tu32 wq_block_pfn_hi, wq_block_pfn_lo;\n+\tu16 pi_start, ci_start;\n+\n+\tci_start = (u16)(wq->cons_idx);\n+\tpi_start = (u16)(wq->prod_idx);\n+\n+\t/* read the first page from the HW table */\n+\twq_page_addr = wq->queue_buf_paddr;\n+\n+\twq_page_pfn = WQ_PAGE_PFN(wq_page_addr);\n+\twq_page_pfn_hi = upper_32_bits(wq_page_pfn);\n+\twq_page_pfn_lo = lower_32_bits(wq_page_pfn);\n+\n+\twq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);\n+\twq_block_pfn_hi = upper_32_bits(wq_block_pfn);\n+\twq_block_pfn_lo = lower_32_bits(wq_block_pfn);\n+\n+\t/* must config as ceq enable but do not generate ceq */\n+\trq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |\n+\t\t\t    RQ_CTXT_CEQ_ATTR_SET(1, OWNER);\n+\n+\trq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |\n+\t\t\t\tRQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |\n+\t\t\t\tRQ_CTXT_PI_SET(0, CEQ_ARM);\n+\n+\trq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |\n+\t\t\t\tRQ_CTXT_WQ_PAGE_SET(ci_start, CI);\n+\n+\trq_ctxt->wq_pfn_lo = wq_page_pfn_lo;\n+\n+\trq_ctxt->pref_cache =\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);\n+\n+\trq_ctxt->pref_owner = 1;\n+\n+\trq_ctxt->pref_wq_pfn_hi_ci =\n+\t\tRQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |\n+\t\tRQ_CTXT_PREF_SET(ci_start, CI);\n+\n+\trq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;\n+\n+\trq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);\n+\trq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);\n+\n+\trq_ctxt->wq_block_pfn_hi =\n+\t\tRQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);\n+\n+\trq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;\n+\n+\thinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));\n+}\n+\n+static int init_sq_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_sq_ctxt_block *sq_ctxt_block;\n+\tstruct hinic_sq_ctxt *sq_ctxt;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tstruct hinic_qp *qp;\n+\tu64 out_param = EIO;\n+\tu16 q_id, curr_id, global_qpn, max_ctxts, i;\n+\tint err = 0;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq_id = 0;\n+\t/* sq and rq number may not equal */\n+\twhile (q_id < nic_io->num_sqs) {\n+\t\tsq_ctxt_block = (struct hinic_sq_ctxt_block *)cmd_buf->buf;\n+\t\tsq_ctxt = sq_ctxt_block->sq_ctxt;\n+\n+\t\tmax_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?\n+\t\t\t\tHINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);\n+\n+\t\thinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,\n+\t\t\t\t\t     HINIC_QP_CTXT_TYPE_SQ, max_ctxts,\n+\t\t\t\t\t     nic_io->max_qps, q_id);\n+\n+\t\tfor (i = 0; i < max_ctxts; i++) {\n+\t\t\tcurr_id = q_id + i;\n+\t\t\tqp = &nic_io->qps[curr_id];\n+\t\t\tglobal_qpn = nic_io->global_qpn + curr_id;\n+\n+\t\t\thinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);\n+\t\t}\n+\n+\t\tcmd_buf->size = SQ_CTXT_SIZE(max_ctxts);\n+\n+\t\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t\t     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,\n+\t\t\t\t\t     cmd_buf, &out_param, 0);\n+\t\tif ((err) || out_param != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to set SQ ctxts, err:%d, out_param:0x%lx\",\n+\t\t\t\terr, out_param);\n+\t\t\terr = -EFAULT;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tq_id += max_ctxts;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int init_rq_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_rq_ctxt_block *rq_ctxt_block;\n+\tstruct hinic_rq_ctxt *rq_ctxt;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tstruct hinic_qp *qp;\n+\tu64 out_param = 0;\n+\tu16 q_id, curr_id, max_ctxts, i;\n+\tint err = 0;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq_id = 0;\n+\t/* sq and rq number may not equal */\n+\twhile (q_id < nic_io->num_rqs) {\n+\t\trq_ctxt_block = (struct hinic_rq_ctxt_block *)cmd_buf->buf;\n+\t\trq_ctxt = rq_ctxt_block->rq_ctxt;\n+\n+\t\tmax_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?\n+\t\t\t\tHINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);\n+\n+\t\thinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,\n+\t\t\t\t\t     HINIC_QP_CTXT_TYPE_RQ, max_ctxts,\n+\t\t\t\t\t     nic_io->max_qps, q_id);\n+\n+\t\tfor (i = 0; i < max_ctxts; i++) {\n+\t\t\tcurr_id = q_id + i;\n+\t\t\tqp = &nic_io->qps[curr_id];\n+\n+\t\t\thinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);\n+\t\t}\n+\n+\t\tcmd_buf->size = RQ_CTXT_SIZE(max_ctxts);\n+\n+\t\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t\t     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,\n+\t\t\t\t\t     cmd_buf, &out_param, 0);\n+\n+\t\tif ((err) || out_param != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to set RQ ctxts\");\n+\t\t\terr = -EFAULT;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tq_id += max_ctxts;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int init_qp_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\treturn (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));\n+}\n+\n+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,\n+\t\t\t\t    enum hinic_qp_ctxt_type ctxt_type)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_clean_queue_ctxt *ctxt_block;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tu64 out_param = 0;\n+\tint err;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tctxt_block = (struct hinic_clean_queue_ctxt *)cmd_buf->buf;\n+\tctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;\n+\tctxt_block->cmdq_hdr.queue_type = ctxt_type;\n+\tctxt_block->cmdq_hdr.addr_offset = 0;\n+\n+\t/* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */\n+\tctxt_block->ctxt_size = 0x3;\n+\n+\thinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));\n+\n+\tcmd_buf->size = sizeof(*ctxt_block);\n+\n+\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,\n+\t\t\t\t     cmd_buf, &out_param, 0);\n+\n+\tif ((err) || (out_param)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clean queue offload ctxts\");\n+\t\terr = -EFAULT;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)\n+{\n+\t/* clean LRO/TSO context space */\n+\treturn (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||\n+\t\tclean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));\n+}\n+\n+static void hinic_get_func_rx_buf_size(hinic_nic_dev *nic_dev)\n+{\n+\tstruct hinic_rxq *rxq;\n+\tu16 q_id;\n+\tu16 buf_size = 0;\n+\n+\tfor (q_id = 0; q_id < nic_dev->num_rq; q_id++) {\n+\t\trxq = nic_dev->rxqs[q_id];\n+\n+\t\tif (rxq == NULL)\n+\t\t\tcontinue;\n+\n+\t\tif (q_id == 0)\n+\t\t\tbuf_size = rxq->buf_len;\n+\n+\t\tbuf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;\n+\t}\n+\n+\tnic_dev->nic_io->rq_buf_size = buf_size;\n+}\n+\n+/* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */\n+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq_attr sq_attr;\n+\tu16 q_id;\n+\tint err, rx_buf_sz;\n+\n+\t/* set vat page size to max queue depth page_size */\n+\terr = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);\n+\tif (err != HINIC_OK) {\n+\t\tPMD_DRV_LOG(ERR, \"Set vat page size: %d failed, rc: %d\",\n+\t\t\tHINIC_PAGE_SIZE_DPDK, err);\n+\t\treturn err;\n+\t}\n+\n+\terr = init_qp_ctxts(nic_io);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Init QP ctxts failed, rc: %d\", err);\n+\t\treturn err;\n+\t}\n+\n+\t/* clean LRO/TSO context space */\n+\terr = clean_qp_offload_ctxt(nic_io);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Clean qp offload ctxts failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\t/* get func rx buf size */\n+\thinic_get_func_rx_buf_size((hinic_nic_dev *)(hwdev->dev_hdl));\n+\trx_buf_sz = nic_io->rq_buf_size;\n+\n+\t/* update rx buf size to function table */\n+\terr = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Set rx vhd mode failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\terr = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,\n+\t\t\t\t  nic_io->sq_depth, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Set root context failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\tfor (q_id = 0; q_id < nic_io->num_sqs; q_id++) {\n+\t\tsq_attr.ci_dma_base =\n+\t\t\tHINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;\n+\t\t/* performance: sq ci update threshold as 8 */\n+\t\tsq_attr.pending_limit = 1;\n+\t\tsq_attr.coalescing_time = 1;\n+\t\tsq_attr.intr_en = 0;\n+\t\tsq_attr.l2nic_sqn = q_id;\n+\t\tsq_attr.dma_attr_off = 0;\n+\t\terr = hinic_set_ci_table(hwdev, q_id, &sq_attr);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Set ci table failed, rc: %d\",\n+\t\t\t\terr);\n+\t\t\tgoto set_cons_idx_table_err;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+set_cons_idx_table_err:\n+\t(void)hinic_clean_root_ctxt(hwdev);\n+\treturn err;\n+}\n+\n+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)\n+{\n+\tint err;\n+\n+\terr = hinic_clean_root_ctxt(hwdev);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clean root ctxt\");\n+}\n+\n+static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tu16 global_qpn, rx_buf_sz;\n+\tint err;\n+\n+\terr = hinic_get_base_qpn(hwdev, &global_qpn);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get base qpn\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\tnic_io->global_qpn = global_qpn;\n+\trx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;\n+\terr = hinic_init_function_table(hwdev, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init function table\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\terr = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set fast recycle mode\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\treturn 0;\n+\n+err_init_nic_hwdev:\n+\treturn err;\n+}\n+\n+static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)\n+{\n+\thwdev->nic_io = NULL;\n+}\n+\n+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)\n+{\n+\treturn hinic_func_rx_tx_flush(hwdev);\n+}\n+\n+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->sq_wq[q_id];\n+\n+\treturn (wq->delta) - 1;\n+}\n+\n+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->rq_wq[q_id];\n+\n+\treturn (wq->delta) - 1;\n+}\n+\n+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->sq_wq[q_id];\n+\n+\treturn (wq->cons_idx) & wq->mask;\n+}\n+\n+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t int num_wqebbs, u16 owner)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq *sq = &nic_io->qps[q_id].sq;\n+\n+\tif (owner != sq->owner)\n+\t\tsq->owner = owner;\n+\n+\tsq->wq->delta += num_wqebbs;\n+\tsq->wq->prod_idx -= num_wqebbs;\n+}\n+\n+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,\n+\t\t\t      u16 q_id, int wqebb_cnt)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq *sq = &nic_io->qps[q_id].sq;\n+\n+\thinic_put_wqe(sq->wq, wqebb_cnt);\n+}\n+\n+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\treturn hinic_get_wqe(rq->wq, 1, pi);\n+}\n+\n+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\trq->wq->delta += num_wqebbs;\n+\trq->wq->prod_idx -= num_wqebbs;\n+}\n+\n+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->rq_wq[q_id];\n+\n+\treturn (wq->cons_idx) & wq->mask;\n+}\n+\n+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\thinic_put_wqe(rq->wq, wqe_cnt);\n+}\n+\n+int hinic_create_rq(hinic_nic_dev *nic_dev, u16 q_id, u16 rq_depth)\n+{\n+\tint err;\n+\tstruct hinic_nic_io *nic_io;\n+\tstruct hinic_qp *qp;\n+\tstruct hinic_rq *rq;\n+\tstruct hinic_hwdev *hwdev;\n+\n+\thwdev = nic_dev->hwdev;\n+\tnic_io = hwdev->nic_io;\n+\tqp = &nic_io->qps[q_id];\n+\trq = &qp->rq;\n+\n+\t/* in case of hardware still generate interrupt, do not use msix 0 */\n+\trq->msix_entry_idx = 1;\n+\n+\trq->rq_depth = rq_depth;\n+\tnic_io->rq_depth = rq_depth;\n+\n+\terr = hinic_wq_allocate(hwdev->dev_hdl, &nic_io->rq_wq[q_id],\n+\t\t\t\tHINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate WQ for RQ\");\n+\t\tgoto rq_alloc_err;\n+\t}\n+\n+\terr = init_rq(rq, hwdev->dev_hdl, &nic_io->rq_wq[q_id],\n+\t\t      q_id, 0);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init RQ\");\n+\t\tgoto rq_init_err;\n+\t}\n+\n+\treturn HINIC_OK;\n+\n+rq_init_err:\n+\thinic_wq_free(hwdev->dev_hdl, &nic_io->rq_wq[q_id]);\n+\n+rq_alloc_err:\n+\treturn err;\n+}\n+\n+void hinic_destroy_rq(hinic_nic_dev *nic_dev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io;\n+\tstruct hinic_qp *qp;\n+\tstruct hinic_hwdev *hwdev;\n+\n+\thwdev = nic_dev->hwdev;\n+\tnic_io = hwdev->nic_io;\n+\tqp = &nic_io->qps[q_id];\n+\n+\tif (qp->rq.wq == NULL)\n+\t\treturn;\n+\n+\tclean_rq(&qp->rq, nic_io->hwdev->dev_hdl);\n+\thinic_wq_free(nic_io->hwdev->dev_hdl, qp->rq.wq);\n+\tqp->rq.wq = NULL;\n+}\n+\n+int hinic_create_sq(hinic_nic_dev *nic_dev, u16 q_id, u16 sq_depth)\n+{\n+\tint err;\n+\tstruct hinic_nic_io *nic_io;\n+\tstruct hinic_qp *qp;\n+\tstruct hinic_sq *sq;\n+\tvoid __iomem *db_addr;\n+\tstruct hinic_hwdev *hwdev;\n+\tvolatile u32 *ci_addr;\n+\n+\thwdev = nic_dev->hwdev;\n+\tnic_io = hwdev->nic_io;\n+\tqp = &nic_io->qps[q_id];\n+\tsq = &qp->sq;\n+\n+\tsq->sq_depth = sq_depth;\n+\tnic_io->sq_depth = sq_depth;\n+\n+\t/* alloc wq */\n+\terr = hinic_wq_allocate(nic_io->hwdev->dev_hdl, &nic_io->sq_wq[q_id],\n+\t\t\t\tHINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate WQ for SQ\");\n+\t\treturn err;\n+\t}\n+\n+\t/* alloc sq doorbell space */\n+\terr = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init db addr\");\n+\t\tgoto alloc_db_err;\n+\t}\n+\n+\t/* clear hardware ci */\n+\tci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id);\n+\t*ci_addr = 0;\n+\n+\t/* init sq qheader */\n+\tinit_sq(sq, &nic_io->sq_wq[q_id], q_id,\n+\t      (volatile void *)ci_addr, db_addr);\n+\n+\treturn HINIC_OK;\n+\n+alloc_db_err:\n+\thinic_wq_free(nic_io->hwdev->dev_hdl, &nic_io->sq_wq[q_id]);\n+\n+\treturn err;\n+}\n+\n+void hinic_destroy_sq(hinic_nic_dev *nic_dev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io;\n+\tstruct hinic_qp *qp;\n+\tstruct hinic_hwdev *hwdev;\n+\n+\thwdev = nic_dev->hwdev;\n+\tnic_io = hwdev->nic_io;\n+\tqp = &nic_io->qps[q_id];\n+\n+\tif (qp->sq.wq == NULL)\n+\t\treturn;\n+\n+\thinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL);\n+\thinic_wq_free(nic_io->hwdev->dev_hdl, qp->sq.wq);\n+\tqp->sq.wq = NULL;\n+}\n+\n+static int hinic_alloc_nicio(hinic_nic_dev *nic_dev)\n+{\n+\tint err;\n+\tu16 max_qps, num_qp;\n+\tstruct hinic_nic_io *nic_io;\n+\tstruct hinic_hwdev *hwdev = nic_dev->hwdev;\n+\n+\tif (!hwdev) {\n+\t\tPMD_DRV_LOG(ERR, \"hwdev is NULL\");\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tnic_io = hwdev->nic_io;\n+\n+\tmax_qps = hinic_func_max_qnum(hwdev);\n+\tif ((max_qps & (max_qps - 1))) {\n+\t\tPMD_DRV_LOG(ERR, \"wrong number of max_qps: %d\",\n+\t\t\tmax_qps);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tnic_io->max_qps = max_qps;\n+\tnic_io->num_qps = max_qps;\n+\tnum_qp = max_qps;\n+\n+\tnic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),\n+\t\t\t\t      GFP_KERNEL);\n+\tif (!nic_io->qps) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate qps\");\n+\t\terr = -ENOMEM;\n+\t\tgoto alloc_qps_err;\n+\t}\n+\n+\tnic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev->dev_hdl,\n+\t\t\t\t\t\t    CI_TABLE_SIZE(num_qp,\n+\t\t\t\t\t\t    PAGE_SIZE),\n+\t\t\t\t\t\t    &nic_io->ci_dma_base,\n+\t\t\t\t\t\t    GFP_KERNEL);\n+\tif (!nic_io->ci_vaddr_base) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate ci area\");\n+\t\terr = -ENOMEM;\n+\t\tgoto ci_base_err;\n+\t}\n+\n+\tnic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),\n+\t\t\t\t\tGFP_KERNEL);\n+\tif (!nic_io->sq_wq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate sq wq array\");\n+\t\terr = -ENOMEM;\n+\t\tgoto sq_wq_err;\n+\t}\n+\n+\tnic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),\n+\t\t\t\t\tGFP_KERNEL);\n+\tif (!nic_io->rq_wq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate rq wq array\");\n+\t\terr = -ENOMEM;\n+\t\tgoto rq_wq_err;\n+\t}\n+\n+\treturn HINIC_OK;\n+\n+rq_wq_err:\n+\tkfree(nic_io->sq_wq);\n+\n+sq_wq_err:\n+\tdma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE),\n+\t\t\t  nic_io->ci_vaddr_base, nic_io->ci_dma_base);\n+\n+ci_base_err:\n+\tkfree(nic_io->qps);\n+\n+alloc_qps_err:\n+\treturn err;\n+}\n+\n+static void hinic_free_nicio(hinic_nic_dev *nic_dev)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_dev->hwdev;\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\n+\t/* nic_io->rq_wq */\n+\tkfree(nic_io->rq_wq);\n+\n+\t/* nic_io->sq_wq */\n+\tkfree(nic_io->sq_wq);\n+\n+\t/* nic_io->ci_vaddr_base */\n+\tdma_free_coherent(hwdev->dev_hdl,\n+\t\t\t  CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE),\n+\t\t\t  nic_io->ci_vaddr_base, nic_io->ci_dma_base);\n+\n+\t/* nic_io->qps */\n+\tkfree(nic_io->qps);\n+}\n+\n+/* alloc nic hwdev and init function table */\n+int hinic_init_nicio(hinic_nic_dev *nic_dev)\n+{\n+\tint rc;\n+\n+\tnic_dev->nic_io =\n+\t\t(struct hinic_nic_io *)rte_zmalloc(\"hinic_nicio\",\n+\t\t\t\t\t\t   sizeof(*nic_dev->nic_io),\n+\t\t\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tif (!nic_dev->nic_io) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate nic_io failed, dev_name: %s\",\n+\t\t\t    nic_dev->proc_dev_name);\n+\t\treturn -ENOMEM;\n+\t}\n+\tnic_dev->nic_io->hwdev = nic_dev->hwdev;\n+\tnic_dev->hwdev->nic_io = nic_dev->nic_io;\n+\n+\t/* alloc root working queue set */\n+\trc = hinic_alloc_nicio(nic_dev);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate nic_io failed, dev_name: %s\",\n+\t\t\t    nic_dev->proc_dev_name);\n+\t\tgoto allc_nicio_fail;\n+\t}\n+\n+\trc = hinic_init_nic_hwdev(nic_dev->nic_io->hwdev);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"Initialize hwdev failed, dev_name: %s\",\n+\t\t\t    nic_dev->proc_dev_name);\n+\t\tgoto init_nic_hwdev_fail;\n+\t}\n+\n+\treturn 0;\n+\n+init_nic_hwdev_fail:\n+\thinic_free_nicio(nic_dev);\n+\n+allc_nicio_fail:\n+\trte_free(nic_dev->nic_io);\n+\treturn rc;\n+}\n+\n+void hinic_deinit_nicio(hinic_nic_dev *nic_dev)\n+{\n+\thinic_free_nicio(nic_dev);\n+\n+\thinic_free_nic_hwdev(nic_dev->nic_io->hwdev);\n+\n+\trte_free(nic_dev->nic_io);\n+\tnic_dev->nic_io = NULL;\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_nicio.h b/drivers/net/hinic/base/hinic_pmd_nicio.h\nnew file mode 100644\nindex 0000000..ae9c008\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_nicio.h\n@@ -0,0 +1,53 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_NICIO_H_\n+#define _HINIC_PMD_NICIO_H_\n+\n+#define RX_BUF_LEN_16K\t16384\n+#define RX_BUF_LEN_4K\t4096\n+#define RX_BUF_LEN_1_5K\t1536\n+\n+#define SQ_CTRL_SET(val, member)\t(((val) & SQ_CTRL_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTRL_##member##_SHIFT)\n+\n+struct hinic_sq_db {\n+\tu32\tdb_info;\n+};\n+\n+struct hinic_sge {\n+\tu32\t\thi_addr;\n+\tu32\t\tlo_addr;\n+\tu32\t\tlen;\n+};\n+\n+struct hinic_event {\n+\tvoid (*tx_ack)(void *handle, u16 q_id);\n+\t/* status: 0 - link down; 1 - link up */\n+\tvoid (*link_change)(void *handle, int status);\n+};\n+\n+/* init qps ctxt and set sq ci attr and arm all sq */\n+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev);\n+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev);\n+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev);\n+\n+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);\n+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);\n+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t      int wqebb_cnt);\n+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t int num_wqebbs, u16 owner);\n+\n+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);\n+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi);\n+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs);\n+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);\n+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt);\n+\n+void hinic_cpu_to_be32(void *data, int len);\n+void hinic_be32_to_cpu(void *data, int len);\n+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);\n+\n+#endif /* _HINIC_PMD_NICIO_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_qp.c b/drivers/net/hinic/base/hinic_pmd_qp.c\nnew file mode 100644\nindex 0000000..ac1b9f2\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_qp.c\n@@ -0,0 +1,26 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_pmd_dpdev.h\"\n+\n+void hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,\n+\t\t\t  dma_addr_t cqe_dma)\n+{\n+\tstruct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe;\n+\tstruct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;\n+\tstruct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;\n+\tstruct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;\n+\tu32 rq_ceq_len = sizeof(struct hinic_rq_cqe);\n+\n+\tctrl->ctrl_fmt =\n+\t\tRQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)),  LEN) |\n+\t\tRQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |\n+\t\tRQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |\n+\t\tRQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);\n+\n+\thinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);\n+\n+\tbuf_desc->addr_high = upper_32_bits(buf_addr);\n+\tbuf_desc->addr_low = lower_32_bits(buf_addr);\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_qp.h b/drivers/net/hinic/base/hinic_pmd_qp.h\nnew file mode 100644\nindex 0000000..a63ae04\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_qp.h\n@@ -0,0 +1,76 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_QP_H_\n+#define _HINIC_PMD_QP_H_\n+\n+#define HINIC_MAX_QUEUE_DEPTH\t\t4096\n+#define HINIC_MIN_QUEUE_DEPTH\t\t128\n+#define HINIC_TXD_ALIGN                 1\n+#define HINIC_RXD_ALIGN                 1\n+\n+struct hinic_sq_ctrl {\n+\tu32\tctrl_fmt;\n+\tu32\tqueue_info;\n+};\n+\n+struct hinic_sq_task {\n+\tu32\t\tpkt_info0;\n+\tu32\t\tpkt_info1;\n+\tu32\t\tpkt_info2;\n+\tu32\t\tufo_v6_identify;\n+\tu32\t\tpkt_info4;\n+\tu32\t\trsvd5;\n+};\n+\n+struct hinic_sq_bufdesc {\n+\tstruct hinic_sge sge;\n+\tu32\trsvd;\n+};\n+\n+struct hinic_sq_wqe {\n+\t/* sq wqe control section */\n+\tstruct hinic_sq_ctrl\t\tctrl;\n+\n+\t/* sq task control section */\n+\tstruct hinic_sq_task\t\ttask;\n+\n+\t/* sq sge section start address, 1~127 sges */\n+\tstruct hinic_sq_bufdesc     buf_descs[0];\n+};\n+\n+struct hinic_rq_ctrl {\n+\tu32\tctrl_fmt;\n+};\n+\n+struct hinic_rq_cqe {\n+\tu32 status;\n+\tu32 vlan_len;\n+\tu32 offload_type;\n+\tu32 rss_hash;\n+\n+\tu32 rsvd[4];\n+};\n+\n+struct hinic_rq_cqe_sect {\n+\tstruct hinic_sge\tsge;\n+\tu32\t\t\trsvd;\n+};\n+\n+struct hinic_rq_bufdesc {\n+\tu32\taddr_high;\n+\tu32\taddr_low;\n+};\n+\n+struct hinic_rq_wqe {\n+\tstruct hinic_rq_ctrl\t\tctrl;\n+\tu32\t\t\t\trsvd;\n+\tstruct hinic_rq_cqe_sect\tcqe_sect;\n+\tstruct hinic_rq_bufdesc\t\tbuf_desc;\n+};\n+\n+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,\n+\t\t\t  dma_addr_t cqe_dma);\n+\n+#endif /* _HINIC_PMD_NICIO_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_wq.c b/drivers/net/hinic/base/hinic_pmd_wq.c\nnew file mode 100644\nindex 0000000..05813bf\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_wq.c\n@@ -0,0 +1,164 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_pmd_dpdev.h\"\n+\n+static void free_wq_pages(void *handle, struct hinic_wq *wq)\n+{\n+\tdma_free_coherent(handle, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,\n+\t\t\t(dma_addr_t)wq->queue_buf_paddr);\n+\n+\twq->queue_buf_paddr = 0;\n+\twq->queue_buf_vaddr = 0;\n+}\n+\n+static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq)\n+{\n+\tdma_addr_t dma_addr = 0;\n+\n+\twq->queue_buf_vaddr = (u64)(u64 *)\n+\t\tdma_zalloc_coherent_aligned256k(dev_hdl, wq->wq_buf_size,\n+\t\t\t\t\t\t&dma_addr, GFP_KERNEL);\n+\tif (!wq->queue_buf_vaddr) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate wq page\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (!ADDR_256K_ALIGNED(dma_addr)) {\n+\t\tPMD_DRV_LOG(ERR, \"Wqe pages is not 256k aligned!\");\n+\t\tdma_free_coherent(dev_hdl, wq->wq_buf_size,\n+\t\t\t\t  (void *)wq->queue_buf_vaddr,\n+\t\t\t\t  dma_addr);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\twq->queue_buf_paddr = dma_addr;\n+\n+\treturn 0;\n+}\n+\n+int hinic_wq_allocate(void *dev_hdl, struct hinic_wq *wq,\n+\t\t      u32 wqebb_shift, u16 q_depth)\n+{\n+\tint err;\n+\n+\tif (q_depth & (q_depth - 1)) {\n+\t\tPMD_DRV_LOG(ERR, \"WQ q_depth isn't power of 2\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twq->wqebb_size = 1 << wqebb_shift;\n+\twq->wqebb_shift = wqebb_shift;\n+\twq->wq_buf_size = ((u32)q_depth) << wqebb_shift;\n+\twq->q_depth = q_depth;\n+\n+\tif (wq->wq_buf_size > (PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid q_depth %u which one page_size can not hold\",\n+\t\t\tq_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = alloc_wq_pages(dev_hdl, wq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate wq pages\");\n+\t\treturn err;\n+\t}\n+\n+\twq->cons_idx = 0;\n+\twq->prod_idx = 0;\n+\twq->delta = q_depth;\n+\twq->mask = q_depth - 1;\n+\n+\treturn 0;\n+}\n+\n+void hinic_wq_free(void *dev_hdl, struct hinic_wq *wq)\n+{\n+\tfree_wq_pages(dev_hdl, wq);\n+}\n+\n+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)\n+{\n+\twq->cons_idx += num_wqebbs;\n+\twq->delta += num_wqebbs;\n+}\n+\n+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)\n+{\n+\tu16 curr_cons_idx;\n+\n+\tif ((wq->delta + num_wqebbs) > wq->q_depth)\n+\t\treturn NULL;\n+\n+\tcurr_cons_idx = (u16)(wq->cons_idx);\n+\n+\tcurr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);\n+\n+\t*cons_idx = curr_cons_idx;\n+\n+\treturn WQ_WQE_ADDR(wq, (u32)(*cons_idx));\n+}\n+\n+int hinic_cmdq_alloc(struct hinic_wq *wq, void *dev_hdl,\n+\t\t     int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,\n+\t\t     u16 q_depth)\n+{\n+\tint i, j, err = -ENOMEM;\n+\n+\t/* validate q_depth is power of 2 & wqebb_size is not 0 */\n+\tfor (i = 0; i < cmdq_blocks; i++) {\n+\t\twq[i].wqebb_size = 1 << wqebb_shift;\n+\t\twq[i].wqebb_shift = wqebb_shift;\n+\t\twq[i].wq_buf_size = wq_buf_size;\n+\t\twq[i].q_depth = q_depth;\n+\n+\t\terr = alloc_wq_pages(dev_hdl, &wq[i]);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to alloc CMDQ blocks\");\n+\t\t\tgoto cmdq_block_err;\n+\t\t}\n+\n+\t\twq[i].cons_idx = 0;\n+\t\twq[i].prod_idx = 0;\n+\t\twq[i].delta = q_depth;\n+\n+\t\twq[i].mask = q_depth - 1;\n+\t}\n+\n+\treturn 0;\n+\n+cmdq_block_err:\n+\tfor (j = 0; j < i; j++)\n+\t\tfree_wq_pages(dev_hdl, &wq[j]);\n+\n+\treturn err;\n+}\n+\n+void hinic_cmdq_free(void *dev_hdl, struct hinic_wq *wq, int cmdq_blocks)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < cmdq_blocks; i++)\n+\t\tfree_wq_pages(dev_hdl, &wq[i]);\n+}\n+\n+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)\n+{\n+\twq->cons_idx = 0;\n+\twq->prod_idx = 0;\n+\n+\tmemset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);\n+}\n+\n+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)\n+{\n+\tu16 curr_prod_idx;\n+\n+\twq->delta -= num_wqebbs;\n+\tcurr_prod_idx = wq->prod_idx;\n+\twq->prod_idx += num_wqebbs;\n+\t*prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);\n+\n+\treturn WQ_WQE_ADDR(wq, (u32)(*prod_idx));\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_wq.h b/drivers/net/hinic/base/hinic_pmd_wq.h\nnew file mode 100644\nindex 0000000..8cc7525\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_wq.h\n@@ -0,0 +1,52 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_WQ_H_\n+#define _HINIC_PMD_WQ_H_\n+\n+#define\tWQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \\\n+\t\t\t      ((idx) << (wq)->wqebb_shift)))\n+\n+/* Working Queue */\n+struct hinic_wq {\n+\t/* The addresses are 64 bit in the HW */\n+\tu64     queue_buf_vaddr;\n+\n+\tu16\t\tq_depth;\n+\tu16\t\tmask;\n+\tu32\t\tdelta;\n+\n+\tu32\t\tcons_idx;\n+\tu32\t\tprod_idx;\n+\n+\tu64     queue_buf_paddr;\n+\n+\tu32\t\twqebb_size;\n+\tu32\t\twqebb_shift;\n+\n+\tu32\t\twq_buf_size;\n+\n+\tu32\t\trsvd[5];\n+};\n+\n+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);\n+\n+int hinic_cmdq_alloc(struct hinic_wq *wq, void *dev_hdl,\n+\t\t     int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,\n+\t\t     u16 q_depth);\n+\n+void hinic_cmdq_free(void *dev_hdl, struct hinic_wq *wq, int cmdq_blocks);\n+\n+int hinic_wq_allocate(void *dev_hdl, struct hinic_wq *wq,\n+\t\t      u32 wqebb_shift, u16 q_depth);\n+\n+void hinic_wq_free(void *dev_hdl, struct hinic_wq *wq);\n+\n+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);\n+\n+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);\n+\n+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);\n+\n+#endif /* _HINIC_PMD_WQ_H_ */\n",
    "prefixes": [
        "06/11"
    ]
}