get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55040/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55040,
    "url": "http://patches.dpdk.org/api/patches/55040/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/58113c06486f2fa347e7921beb45f97eef74db66.1560958308.git.xuanziyang2@huawei.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<58113c06486f2fa347e7921beb45f97eef74db66.1560958308.git.xuanziyang2@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/58113c06486f2fa347e7921beb45f97eef74db66.1560958308.git.xuanziyang2@huawei.com",
    "date": "2019-06-19T16:05:39",
    "name": "[v5,09/15] net/hinic/base: add context and work queue support",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "9e2ff1f74c79073e8bb636affe6317c7718bc515",
    "submitter": {
        "id": 1321,
        "url": "http://patches.dpdk.org/api/people/1321/?format=api",
        "name": "Ziyang Xuan",
        "email": "xuanziyang2@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/58113c06486f2fa347e7921beb45f97eef74db66.1560958308.git.xuanziyang2@huawei.com/mbox/",
    "series": [
        {
            "id": 5084,
            "url": "http://patches.dpdk.org/api/series/5084/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5084",
            "date": "2019-06-19T15:45:20",
            "name": "A new net PMD - hinic",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/5084/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/55040/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/55040/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 456961D0A1;\n\tWed, 19 Jun 2019 17:55:51 +0200 (CEST)",
            "from huawei.com (szxga05-in.huawei.com [45.249.212.191])\n\tby dpdk.org (Postfix) with ESMTP id AB8561D113\n\tfor <dev@dpdk.org>; Wed, 19 Jun 2019 17:53:56 +0200 (CEST)",
            "from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.60])\n\tby Forcepoint Email with ESMTP id 57F8E7F6BED369A915DA\n\tfor <dev@dpdk.org>; Wed, 19 Jun 2019 23:53:49 +0800 (CST)",
            "from tester_149.localdomain (10.175.119.39) by\n\tDGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP\n\tServer id 14.3.439.0; Wed, 19 Jun 2019 23:53:40 +0800"
        ],
        "From": "Ziyang Xuan <xuanziyang2@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,\n\t<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,\n\t<luoxianjun@huawei.com>, Ziyang Xuan <xuanziyang2@huawei.com>",
        "Date": "Thu, 20 Jun 2019 00:05:39 +0800",
        "Message-ID": "<58113c06486f2fa347e7921beb45f97eef74db66.1560958308.git.xuanziyang2@huawei.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1560958308.git.xuanziyang2@huawei.com>",
        "References": "<cover.1560958308.git.xuanziyang2@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.175.119.39]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v5 09/15] net/hinic/base: add context and work\n\tqueue support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Work queue is used for cmdq and tx/rx buff description.\nNic business needs to configure cmdq context and txq/rxq\ncontext. This patch adds data structures and function codes\nfor work queue and context.\n\nSigned-off-by: Ziyang Xuan <xuanziyang2@huawei.com>\n---\n drivers/net/hinic/base/hinic_pmd_nicio.c | 894 +++++++++++++++++++++++\n drivers/net/hinic/base/hinic_pmd_nicio.h | 265 +++++++\n drivers/net/hinic/base/hinic_pmd_wq.c    | 179 +++++\n drivers/net/hinic/base/hinic_pmd_wq.h    | 137 ++++\n 4 files changed, 1475 insertions(+)\n create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.h",
    "diff": "diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c\nnew file mode 100644\nindex 000000000..248211f90\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c\n@@ -0,0 +1,894 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+#include<rte_bus_pci.h>\n+\n+#include \"hinic_compat.h\"\n+#include \"hinic_pmd_hwdev.h\"\n+#include \"hinic_pmd_hwif.h\"\n+#include \"hinic_pmd_wq.h\"\n+#include \"hinic_pmd_mgmt.h\"\n+#include \"hinic_pmd_cmdq.h\"\n+#include \"hinic_pmd_cfg.h\"\n+#include \"hinic_pmd_niccfg.h\"\n+#include \"hinic_pmd_nicio.h\"\n+\n+#define WQ_PREFETCH_MAX\t\t\t6\n+#define WQ_PREFETCH_MIN\t\t\t1\n+#define WQ_PREFETCH_THRESHOLD\t\t256\n+\n+#define DEFAULT_RX_BUF_SIZE\t\t((u16)0xB)\n+\n+enum {\n+\tRECYCLE_MODE_NIC = 0x0,\n+\tRECYCLE_MODE_DPDK = 0x1,\n+};\n+\n+/* Queue buffer related define */\n+enum hinic_rx_buf_size {\n+\tHINIC_RX_BUF_SIZE_32B = 0x20,\n+\tHINIC_RX_BUF_SIZE_64B = 0x40,\n+\tHINIC_RX_BUF_SIZE_96B = 0x60,\n+\tHINIC_RX_BUF_SIZE_128B = 0x80,\n+\tHINIC_RX_BUF_SIZE_192B = 0xC0,\n+\tHINIC_RX_BUF_SIZE_256B = 0x100,\n+\tHINIC_RX_BUF_SIZE_384B = 0x180,\n+\tHINIC_RX_BUF_SIZE_512B = 0x200,\n+\tHINIC_RX_BUF_SIZE_768B = 0x300,\n+\tHINIC_RX_BUF_SIZE_1K = 0x400,\n+\tHINIC_RX_BUF_SIZE_1_5K = 0x600,\n+\tHINIC_RX_BUF_SIZE_2K = 0x800,\n+\tHINIC_RX_BUF_SIZE_3K = 0xC00,\n+\tHINIC_RX_BUF_SIZE_4K = 0x1000,\n+\tHINIC_RX_BUF_SIZE_8K = 0x2000,\n+\tHINIC_RX_BUF_SIZE_16K = 0x4000,\n+};\n+\n+const u32 hinic_hw_rx_buf_size[] = {\n+\tHINIC_RX_BUF_SIZE_32B,\n+\tHINIC_RX_BUF_SIZE_64B,\n+\tHINIC_RX_BUF_SIZE_96B,\n+\tHINIC_RX_BUF_SIZE_128B,\n+\tHINIC_RX_BUF_SIZE_192B,\n+\tHINIC_RX_BUF_SIZE_256B,\n+\tHINIC_RX_BUF_SIZE_384B,\n+\tHINIC_RX_BUF_SIZE_512B,\n+\tHINIC_RX_BUF_SIZE_768B,\n+\tHINIC_RX_BUF_SIZE_1K,\n+\tHINIC_RX_BUF_SIZE_1_5K,\n+\tHINIC_RX_BUF_SIZE_2K,\n+\tHINIC_RX_BUF_SIZE_3K,\n+\tHINIC_RX_BUF_SIZE_4K,\n+\tHINIC_RX_BUF_SIZE_8K,\n+\tHINIC_RX_BUF_SIZE_16K,\n+};\n+\n+struct hinic_qp_ctxt_header {\n+\tu16\tnum_queues;\n+\tu16\tqueue_type;\n+\tu32\taddr_offset;\n+};\n+\n+struct hinic_sq_ctxt {\n+\tu32\tceq_attr;\n+\n+\tu32\tci_owner;\n+\n+\tu32\twq_pfn_hi;\n+\tu32\twq_pfn_lo;\n+\n+\tu32\tpref_cache;\n+\tu32\tpref_owner;\n+\tu32\tpref_wq_pfn_hi_ci;\n+\tu32\tpref_wq_pfn_lo;\n+\n+\tu32\trsvd8;\n+\tu32\trsvd9;\n+\n+\tu32\twq_block_pfn_hi;\n+\tu32\twq_block_pfn_lo;\n+};\n+\n+struct hinic_rq_ctxt {\n+\tu32\tceq_attr;\n+\n+\tu32\tpi_intr_attr;\n+\n+\tu32\twq_pfn_hi_ci;\n+\tu32\twq_pfn_lo;\n+\n+\tu32\tpref_cache;\n+\tu32\tpref_owner;\n+\n+\tu32\tpref_wq_pfn_hi_ci;\n+\tu32\tpref_wq_pfn_lo;\n+\n+\tu32\tpi_paddr_hi;\n+\tu32\tpi_paddr_lo;\n+\n+\tu32\twq_block_pfn_hi;\n+\tu32\twq_block_pfn_lo;\n+};\n+\n+struct hinic_sq_ctxt_block {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tstruct hinic_sq_ctxt\t\tsq_ctxt[HINIC_Q_CTXT_MAX];\n+};\n+\n+struct hinic_rq_ctxt_block {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tstruct hinic_rq_ctxt\t\trq_ctxt[HINIC_Q_CTXT_MAX];\n+};\n+\n+struct hinic_clean_queue_ctxt {\n+\tstruct hinic_qp_ctxt_header\tcmdq_hdr;\n+\tu32\t\t\t\tctxt_size;\n+};\n+\n+\n+static void\n+hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,\n+\t\t\t     enum hinic_qp_ctxt_type ctxt_type,\n+\t\t\t     u16 num_queues, u16 max_queues, u16 q_id)\n+{\n+\tqp_ctxt_hdr->queue_type = ctxt_type;\n+\tqp_ctxt_hdr->num_queues = num_queues;\n+\n+\tif (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)\n+\t\tqp_ctxt_hdr->addr_offset =\n+\t\t\t\tSQ_CTXT_OFFSET(max_queues, max_queues, q_id);\n+\telse\n+\t\tqp_ctxt_hdr->addr_offset =\n+\t\t\t\tRQ_CTXT_OFFSET(max_queues, max_queues, q_id);\n+\n+\tqp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);\n+\n+\thinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));\n+}\n+\n+static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,\n+\t\t\t   struct hinic_sq_ctxt *sq_ctxt)\n+{\n+\tstruct hinic_wq *wq = sq->wq;\n+\tu64 wq_page_addr;\n+\tu64 wq_page_pfn, wq_block_pfn;\n+\tu32 wq_page_pfn_hi, wq_page_pfn_lo;\n+\tu32 wq_block_pfn_hi, wq_block_pfn_lo;\n+\tu16 pi_start, ci_start;\n+\n+\tci_start = (u16)(wq->cons_idx);\n+\tpi_start = (u16)(wq->prod_idx);\n+\n+\t/* read the first page from the HW table */\n+\twq_page_addr = wq->queue_buf_paddr;\n+\n+\twq_page_pfn = WQ_PAGE_PFN(wq_page_addr);\n+\twq_page_pfn_hi = upper_32_bits(wq_page_pfn);\n+\twq_page_pfn_lo = lower_32_bits(wq_page_pfn);\n+\n+\twq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);\n+\twq_block_pfn_hi = upper_32_bits(wq_block_pfn);\n+\twq_block_pfn_lo = lower_32_bits(wq_block_pfn);\n+\n+\t/* must config as ceq disabled */\n+\tsq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, ARM) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |\n+\t\t\t\tSQ_CTXT_CEQ_ATTR_SET(0, EN);\n+\n+\tsq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |\n+\t\t\t\tSQ_CTXT_CI_SET(1, OWNER);\n+\n+\tsq_ctxt->wq_pfn_hi =\n+\t\t\tSQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |\n+\t\t\tSQ_CTXT_WQ_PAGE_SET(pi_start, PI);\n+\n+\tsq_ctxt->wq_pfn_lo = wq_page_pfn_lo;\n+\n+\tsq_ctxt->pref_cache =\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |\n+\t\tSQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);\n+\n+\tsq_ctxt->pref_owner = 1;\n+\n+\tsq_ctxt->pref_wq_pfn_hi_ci =\n+\t\tSQ_CTXT_PREF_SET(ci_start, CI) |\n+\t\tSQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);\n+\n+\tsq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;\n+\n+\tsq_ctxt->wq_block_pfn_hi =\n+\t\tSQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);\n+\n+\tsq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;\n+\n+\thinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));\n+}\n+\n+static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,\n+\t\t\tstruct hinic_rq_ctxt *rq_ctxt)\n+{\n+\tstruct hinic_wq *wq = rq->wq;\n+\tu64 wq_page_addr;\n+\tu64 wq_page_pfn, wq_block_pfn;\n+\tu32 wq_page_pfn_hi, wq_page_pfn_lo;\n+\tu32 wq_block_pfn_hi, wq_block_pfn_lo;\n+\tu16 pi_start, ci_start;\n+\n+\tci_start = (u16)(wq->cons_idx);\n+\tpi_start = (u16)(wq->prod_idx);\n+\n+\t/* read the first page from the HW table */\n+\twq_page_addr = wq->queue_buf_paddr;\n+\n+\twq_page_pfn = WQ_PAGE_PFN(wq_page_addr);\n+\twq_page_pfn_hi = upper_32_bits(wq_page_pfn);\n+\twq_page_pfn_lo = lower_32_bits(wq_page_pfn);\n+\n+\twq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);\n+\twq_block_pfn_hi = upper_32_bits(wq_block_pfn);\n+\twq_block_pfn_lo = lower_32_bits(wq_block_pfn);\n+\n+\t/* must config as ceq enable but do not generate ceq */\n+\trq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |\n+\t\t\t    RQ_CTXT_CEQ_ATTR_SET(1, OWNER);\n+\n+\trq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |\n+\t\t\t\tRQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |\n+\t\t\t\tRQ_CTXT_PI_SET(0, CEQ_ARM);\n+\n+\trq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |\n+\t\t\t\tRQ_CTXT_WQ_PAGE_SET(ci_start, CI);\n+\n+\trq_ctxt->wq_pfn_lo = wq_page_pfn_lo;\n+\n+\trq_ctxt->pref_cache =\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |\n+\t\tRQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);\n+\n+\trq_ctxt->pref_owner = 1;\n+\n+\trq_ctxt->pref_wq_pfn_hi_ci =\n+\t\tRQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |\n+\t\tRQ_CTXT_PREF_SET(ci_start, CI);\n+\n+\trq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;\n+\n+\trq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);\n+\trq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);\n+\n+\trq_ctxt->wq_block_pfn_hi =\n+\t\tRQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);\n+\n+\trq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;\n+\n+\thinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));\n+}\n+\n+static int init_sq_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_sq_ctxt_block *sq_ctxt_block;\n+\tstruct hinic_sq_ctxt *sq_ctxt;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tstruct hinic_qp *qp;\n+\tu64 out_param = EIO;\n+\tu16 q_id, curr_id, global_qpn, max_ctxts, i;\n+\tint err = 0;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq_id = 0;\n+\t/* sq and rq number may not equal */\n+\twhile (q_id < nic_io->num_sqs) {\n+\t\tsq_ctxt_block = cmd_buf->buf;\n+\t\tsq_ctxt = sq_ctxt_block->sq_ctxt;\n+\n+\t\tmax_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?\n+\t\t\t\tHINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);\n+\n+\t\thinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,\n+\t\t\t\t\t     HINIC_QP_CTXT_TYPE_SQ, max_ctxts,\n+\t\t\t\t\t     nic_io->max_qps, q_id);\n+\n+\t\tfor (i = 0; i < max_ctxts; i++) {\n+\t\t\tcurr_id = q_id + i;\n+\t\t\tqp = &nic_io->qps[curr_id];\n+\t\t\tglobal_qpn = nic_io->global_qpn + curr_id;\n+\n+\t\t\thinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);\n+\t\t}\n+\n+\t\tcmd_buf->size = SQ_CTXT_SIZE(max_ctxts);\n+\n+\t\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t\t     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,\n+\t\t\t\t\t     cmd_buf, &out_param, 0);\n+\t\tif (err || out_param != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to set SQ ctxts, err:%d\", err);\n+\t\t\terr = -EFAULT;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tq_id += max_ctxts;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int init_rq_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_rq_ctxt_block *rq_ctxt_block;\n+\tstruct hinic_rq_ctxt *rq_ctxt;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tstruct hinic_qp *qp;\n+\tu64 out_param = 0;\n+\tu16 q_id, curr_id, max_ctxts, i;\n+\tint err = 0;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq_id = 0;\n+\t/* sq and rq number may not equal */\n+\twhile (q_id < nic_io->num_rqs) {\n+\t\trq_ctxt_block = cmd_buf->buf;\n+\t\trq_ctxt = rq_ctxt_block->rq_ctxt;\n+\n+\t\tmax_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?\n+\t\t\t\tHINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);\n+\n+\t\thinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,\n+\t\t\t\t\t     HINIC_QP_CTXT_TYPE_RQ, max_ctxts,\n+\t\t\t\t\t     nic_io->max_qps, q_id);\n+\n+\t\tfor (i = 0; i < max_ctxts; i++) {\n+\t\t\tcurr_id = q_id + i;\n+\t\t\tqp = &nic_io->qps[curr_id];\n+\n+\t\t\thinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);\n+\t\t}\n+\n+\t\tcmd_buf->size = RQ_CTXT_SIZE(max_ctxts);\n+\n+\t\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t\t     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,\n+\t\t\t\t\t     cmd_buf, &out_param, 0);\n+\n+\t\tif ((err) || out_param != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to set RQ ctxts\");\n+\t\t\terr = -EFAULT;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tq_id += max_ctxts;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int init_qp_ctxts(struct hinic_nic_io *nic_io)\n+{\n+\treturn (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));\n+}\n+\n+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,\n+\t\t\t\t    enum hinic_qp_ctxt_type ctxt_type)\n+{\n+\tstruct hinic_hwdev *hwdev = nic_io->hwdev;\n+\tstruct hinic_clean_queue_ctxt *ctxt_block;\n+\tstruct hinic_cmd_buf *cmd_buf;\n+\tu64 out_param = 0;\n+\tint err;\n+\n+\tcmd_buf = hinic_alloc_cmd_buf(hwdev);\n+\tif (!cmd_buf) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate cmd buf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tctxt_block = cmd_buf->buf;\n+\tctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;\n+\tctxt_block->cmdq_hdr.queue_type = ctxt_type;\n+\tctxt_block->cmdq_hdr.addr_offset = 0;\n+\n+\t/* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */\n+\tctxt_block->ctxt_size = 0x3;\n+\n+\thinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));\n+\n+\tcmd_buf->size = sizeof(*ctxt_block);\n+\n+\terr = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,\n+\t\t\t\t     HINIC_MOD_L2NIC,\n+\t\t\t\t     HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,\n+\t\t\t\t     cmd_buf, &out_param, 0);\n+\n+\tif ((err) || (out_param)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clean queue offload ctxts\");\n+\t\terr = -EFAULT;\n+\t}\n+\n+\thinic_free_cmd_buf(hwdev, cmd_buf);\n+\n+\treturn err;\n+}\n+\n+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)\n+{\n+\t/* clean LRO/TSO context space */\n+\treturn (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||\n+\t\tclean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));\n+}\n+\n+/**\n+ * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size\n+ * @rx_buf_sz: receive buffer size\n+ * @return\n+ *   hw rx buffer size\n+ **/\n+static u16 get_hw_rx_buf_size(u32 rx_buf_sz)\n+{\n+\tu16 num_hw_types = sizeof(hinic_hw_rx_buf_size)\n+\t\t\t   / sizeof(hinic_hw_rx_buf_size[0]);\n+\tu16 i;\n+\n+\tfor (i = 0; i < num_hw_types; i++) {\n+\t\tif (hinic_hw_rx_buf_size[i] == rx_buf_sz)\n+\t\t\treturn i;\n+\t}\n+\n+\tPMD_DRV_LOG(ERR, \"Hw can't support rx buf size of %u\", rx_buf_sz);\n+\n+\treturn DEFAULT_RX_BUF_SIZE;\t/* default 2K */\n+}\n+\n+/**\n+ * hinic_set_root_ctxt - init root context in NIC\n+ * @hwdev: the hardware interface of a nic device\n+ * @rq_depth: the depth of receive queue\n+ * @sq_depth: the depth of transmit queue\n+ * @rx_buf_sz: receive buffer size from app\n+ * Return: 0 on success, negative error value otherwise.\n+ **/\n+static int\n+hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)\n+{\n+\tstruct hinic_root_ctxt root_ctxt;\n+\n+\tmemset(&root_ctxt, 0, sizeof(root_ctxt));\n+\troot_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\troot_ctxt.func_idx = hinic_global_func_id(hwdev);\n+\troot_ctxt.ppf_idx = hinic_ppf_idx(hwdev);\n+\troot_ctxt.set_cmdq_depth = 0;\n+\troot_ctxt.cmdq_depth = 0;\n+\troot_ctxt.lro_en = 1;\n+\troot_ctxt.rq_depth  = (u16)ilog2(rq_depth);\n+\troot_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);\n+\troot_ctxt.sq_depth  = (u16)ilog2(sq_depth);\n+\n+\treturn hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,\n+\t\t\t\t      HINIC_MGMT_CMD_VAT_SET,\n+\t\t\t\t      &root_ctxt, sizeof(root_ctxt),\n+\t\t\t\t      NULL, NULL, 0);\n+}\n+\n+/**\n+ * hinic_clean_root_ctxt - clean root context table in NIC\n+ * @hwdev: the hardware interface of a nic device\n+ * @return\n+ *   0 on success,\n+ *   negative error value otherwise.\n+ **/\n+static int hinic_clean_root_ctxt(void *hwdev)\n+{\n+\tstruct hinic_root_ctxt root_ctxt;\n+\n+\tmemset(&root_ctxt, 0, sizeof(root_ctxt));\n+\troot_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\troot_ctxt.func_idx = hinic_global_func_id(hwdev);\n+\troot_ctxt.ppf_idx = hinic_ppf_idx(hwdev);\n+\troot_ctxt.set_cmdq_depth = 0;\n+\troot_ctxt.cmdq_depth = 0;\n+\troot_ctxt.lro_en = 0;\n+\troot_ctxt.rq_depth  = 0;\n+\troot_ctxt.rx_buf_sz = 0;\n+\troot_ctxt.sq_depth  = 0;\n+\n+\treturn hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,\n+\t\t\t\t      HINIC_MGMT_CMD_VAT_SET,\n+\t\t\t\t      &root_ctxt, sizeof(root_ctxt),\n+\t\t\t\t      NULL, NULL, 0);\n+}\n+\n+/* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */\n+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq_attr sq_attr;\n+\tu16 q_id;\n+\tint err, rx_buf_sz;\n+\n+\t/* set vat page size to max queue depth page_size */\n+\terr = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);\n+\tif (err != HINIC_OK) {\n+\t\tPMD_DRV_LOG(ERR, \"Set vat page size: %d failed, rc: %d\",\n+\t\t\tHINIC_PAGE_SIZE_DPDK, err);\n+\t\treturn err;\n+\t}\n+\n+\terr = init_qp_ctxts(nic_io);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Init QP ctxts failed, rc: %d\", err);\n+\t\treturn err;\n+\t}\n+\n+\t/* clean LRO/TSO context space */\n+\terr = clean_qp_offload_ctxt(nic_io);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Clean qp offload ctxts failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\trx_buf_sz = nic_io->rq_buf_size;\n+\n+\t/* update rx buf size to function table */\n+\terr = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Set rx vhd mode failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\terr = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,\n+\t\t\t\t  nic_io->sq_depth, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Set root context failed, rc: %d\",\n+\t\t\terr);\n+\t\treturn err;\n+\t}\n+\n+\tfor (q_id = 0; q_id < nic_io->num_sqs; q_id++) {\n+\t\tsq_attr.ci_dma_base =\n+\t\t\tHINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;\n+\t\t/* performance: sq ci update threshold as 8 */\n+\t\tsq_attr.pending_limit = 1;\n+\t\tsq_attr.coalescing_time = 1;\n+\t\tsq_attr.intr_en = 0;\n+\t\tsq_attr.l2nic_sqn = q_id;\n+\t\tsq_attr.dma_attr_off = 0;\n+\t\terr = hinic_set_ci_table(hwdev, q_id, &sq_attr);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Set ci table failed, rc: %d\",\n+\t\t\t\terr);\n+\t\t\tgoto set_cons_idx_table_err;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+set_cons_idx_table_err:\n+\t(void)hinic_clean_root_ctxt(hwdev);\n+\treturn err;\n+}\n+\n+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)\n+{\n+\tint err;\n+\n+\terr = hinic_clean_root_ctxt(hwdev);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clean root ctxt\");\n+}\n+\n+static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tu16 global_qpn, rx_buf_sz;\n+\tint err;\n+\n+\terr = hinic_get_base_qpn(hwdev, &global_qpn);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to get base qpn\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\tnic_io->global_qpn = global_qpn;\n+\trx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;\n+\terr = hinic_init_function_table(hwdev, rx_buf_sz);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init function table\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\terr = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to set fast recycle mode\");\n+\t\tgoto err_init_nic_hwdev;\n+\t}\n+\n+\treturn 0;\n+\n+err_init_nic_hwdev:\n+\treturn err;\n+}\n+\n+static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)\n+{\n+\thwdev->nic_io = NULL;\n+}\n+\n+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)\n+{\n+\treturn hinic_func_rx_tx_flush(hwdev);\n+}\n+\n+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->sq_wq[q_id];\n+\n+\treturn (wq->delta) - 1;\n+}\n+\n+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->rq_wq[q_id];\n+\n+\treturn (wq->delta) - 1;\n+}\n+\n+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->sq_wq[q_id];\n+\n+\treturn (wq->cons_idx) & wq->mask;\n+}\n+\n+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t int num_wqebbs, u16 owner)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq *sq = &nic_io->qps[q_id].sq;\n+\n+\tif (owner != sq->owner)\n+\t\tsq->owner = owner;\n+\n+\tsq->wq->delta += num_wqebbs;\n+\tsq->wq->prod_idx -= num_wqebbs;\n+}\n+\n+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,\n+\t\t\t      u16 q_id, int wqebb_cnt)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_sq *sq = &nic_io->qps[q_id].sq;\n+\n+\thinic_put_wqe(sq->wq, wqebb_cnt);\n+}\n+\n+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\treturn hinic_get_wqe(rq->wq, 1, pi);\n+}\n+\n+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\trq->wq->delta += num_wqebbs;\n+\trq->wq->prod_idx -= num_wqebbs;\n+}\n+\n+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_wq *wq = &nic_io->rq_wq[q_id];\n+\n+\treturn (wq->cons_idx) & wq->mask;\n+}\n+\n+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\tstruct hinic_rq *rq = &nic_io->qps[q_id].rq;\n+\n+\thinic_put_wqe(rq->wq, wqe_cnt);\n+}\n+\n+static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)\n+{\n+\tint err;\n+\tu16 max_qps, num_qp;\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\n+\tmax_qps = hinic_func_max_qnum(hwdev);\n+\tif ((max_qps & (max_qps - 1))) {\n+\t\tPMD_DRV_LOG(ERR, \"wrong number of max_qps: %d\",\n+\t\t\tmax_qps);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tnic_io->max_qps = max_qps;\n+\tnic_io->num_qps = max_qps;\n+\tnum_qp = max_qps;\n+\n+\tnic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),\n+\t\t\t\t      GFP_KERNEL);\n+\tif (!nic_io->qps) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate qps\");\n+\t\terr = -ENOMEM;\n+\t\tgoto alloc_qps_err;\n+\t}\n+\n+\tnic_io->ci_vaddr_base =\n+\t\tdma_zalloc_coherent(hwdev,\n+\t\t\t\t    CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),\n+\t\t\t\t    &nic_io->ci_dma_base, GFP_KERNEL);\n+\tif (!nic_io->ci_vaddr_base) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate ci area\");\n+\t\terr = -ENOMEM;\n+\t\tgoto ci_base_err;\n+\t}\n+\n+\tnic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),\n+\t\t\t\t\tGFP_KERNEL);\n+\tif (!nic_io->sq_wq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate sq wq array\");\n+\t\terr = -ENOMEM;\n+\t\tgoto sq_wq_err;\n+\t}\n+\n+\tnic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),\n+\t\t\t\t\tGFP_KERNEL);\n+\tif (!nic_io->rq_wq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate rq wq array\");\n+\t\terr = -ENOMEM;\n+\t\tgoto rq_wq_err;\n+\t}\n+\n+\treturn HINIC_OK;\n+\n+rq_wq_err:\n+\tkfree(nic_io->sq_wq);\n+\n+sq_wq_err:\n+\tdma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),\n+\t\t\t  nic_io->ci_vaddr_base, nic_io->ci_dma_base);\n+\n+ci_base_err:\n+\tkfree(nic_io->qps);\n+\n+alloc_qps_err:\n+\treturn err;\n+}\n+\n+static void hinic_free_nicio(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_nic_io *nic_io = hwdev->nic_io;\n+\n+\t/* nic_io->rq_wq */\n+\tkfree(nic_io->rq_wq);\n+\n+\t/* nic_io->sq_wq */\n+\tkfree(nic_io->sq_wq);\n+\n+\t/* nic_io->ci_vaddr_base */\n+\tdma_free_coherent(hwdev,\n+\t\t\t  CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE),\n+\t\t\t  nic_io->ci_vaddr_base, nic_io->ci_dma_base);\n+\n+\t/* nic_io->qps */\n+\tkfree(nic_io->qps);\n+}\n+\n+/* alloc nic hwdev and init function table */\n+int hinic_init_nicio(struct hinic_hwdev *hwdev)\n+{\n+\tint rc;\n+\n+\thwdev->nic_io = rte_zmalloc(\"hinic_nicio\", sizeof(*hwdev->nic_io),\n+\t\t\t\t      RTE_CACHE_LINE_SIZE);\n+\tif (!hwdev->nic_io) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate nic_io failed, dev_name: %s\",\n+\t\t\t    hwdev->pcidev_hdl->name);\n+\t\treturn -ENOMEM;\n+\t}\n+\thwdev->nic_io->hwdev = hwdev;\n+\n+\t/* alloc root working queue set */\n+\trc = hinic_alloc_nicio(hwdev);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate nic_io failed, dev_name: %s\",\n+\t\t\t    hwdev->pcidev_hdl->name);\n+\t\tgoto allc_nicio_fail;\n+\t}\n+\n+\trc = hinic_init_nic_hwdev(hwdev);\n+\tif (rc) {\n+\t\tPMD_DRV_LOG(ERR, \"Initialize hwdev failed, dev_name: %s\",\n+\t\t\t    hwdev->pcidev_hdl->name);\n+\t\tgoto init_nic_hwdev_fail;\n+\t}\n+\n+\treturn 0;\n+\n+init_nic_hwdev_fail:\n+\thinic_free_nicio(hwdev);\n+\n+allc_nicio_fail:\n+\trte_free(hwdev->nic_io);\n+\treturn rc;\n+}\n+\n+void hinic_deinit_nicio(struct hinic_hwdev *hwdev)\n+{\n+\thinic_free_nicio(hwdev);\n+\n+\thinic_free_nic_hwdev(hwdev);\n+\n+\trte_free(hwdev->nic_io);\n+\thwdev->nic_io = NULL;\n+}\n+\n+/**\n+ * hinic_convert_rx_buf_size - convert rx buffer size to hw size\n+ * @rx_buf_sz: receive buffer size of mbuf\n+ * @match_sz: receive buffer size of hardware\n+ * @return\n+ *   0 on success,\n+ *   negative error value otherwise.\n+ **/\n+int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz)\n+{\n+\tu32 i, num_hw_types, best_match_sz;\n+\n+\tif (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B))\n+\t\treturn -EINVAL;\n+\n+\tif (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) {\n+\t\tbest_match_sz =  HINIC_RX_BUF_SIZE_16K;\n+\t\tgoto size_matched;\n+\t}\n+\n+\tnum_hw_types = sizeof(hinic_hw_rx_buf_size) /\n+\t\tsizeof(hinic_hw_rx_buf_size[0]);\n+\tbest_match_sz = hinic_hw_rx_buf_size[0];\n+\tfor (i = 0; i < num_hw_types; i++) {\n+\t\tif (rx_buf_sz == hinic_hw_rx_buf_size[i]) {\n+\t\t\tbest_match_sz = hinic_hw_rx_buf_size[i];\n+\t\t\tbreak;\n+\t\t} else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) {\n+\t\t\tbreak;\n+\t\t}\n+\t\tbest_match_sz = hinic_hw_rx_buf_size[i];\n+\t}\n+\n+size_matched:\n+\t*match_sz = best_match_sz;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_nicio.h b/drivers/net/hinic/base/hinic_pmd_nicio.h\nnew file mode 100644\nindex 000000000..487e44064\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_nicio.h\n@@ -0,0 +1,265 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_NICIO_H_\n+#define _HINIC_PMD_NICIO_H_\n+\n+#define RX_BUF_LEN_16K\t\t\t16384\n+#define RX_BUF_LEN_1_5K\t\t\t1536\n+\n+#define HINIC_Q_CTXT_MAX\t\t42\n+\n+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */\n+#define HINIC_CI_Q_ADDR_SIZE\t\t64\n+\n+#define CI_TABLE_SIZE(num_qps, pg_sz)\t\\\n+\t(ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))\n+\n+#define HINIC_CI_VADDR(base_addr, q_id)\t\t\\\n+\t((u8 *)(base_addr) + (q_id) * HINIC_CI_Q_ADDR_SIZE)\n+\n+#define HINIC_CI_PADDR(base_paddr, q_id)\t\\\n+\t((base_paddr) + (q_id) * HINIC_CI_Q_ADDR_SIZE)\n+\n+#define Q_CTXT_SIZE\t\t\t\t48\n+#define TSO_LRO_CTXT_SIZE\t\t\t240\n+\n+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id)\t\\\n+\t(((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE +\t\\\n+\t\t(q_id) * Q_CTXT_SIZE)\n+\n+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id)\t\\\n+\t(((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE +\t\\\n+\t\t(max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)\n+\n+#define SQ_CTXT_SIZE(num_sqs)\t\t\\\n+\t((u16)(sizeof(struct hinic_qp_ctxt_header) +\t\\\n+\t\t(num_sqs) * sizeof(struct hinic_sq_ctxt)))\n+\n+#define RQ_CTXT_SIZE(num_rqs)\t\t\\\n+\t((u16)(sizeof(struct hinic_qp_ctxt_header) +\t\\\n+\t\t(num_rqs) * sizeof(struct hinic_rq_ctxt)))\n+\n+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT\t\t\t8\n+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT\t\t13\n+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT\t\t\t23\n+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT\t\t\t31\n+\n+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK\t\t\t0x1FU\n+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK\t\t0x3FFU\n+#define SQ_CTXT_CEQ_ATTR_EN_MASK\t\t\t0x1U\n+#define SQ_CTXT_CEQ_ATTR_ARM_MASK\t\t\t0x1U\n+\n+#define SQ_CTXT_CEQ_ATTR_SET(val, member)\t\\\n+\t(((val) & SQ_CTXT_CEQ_ATTR_##member##_MASK) <<\t\\\n+\t\tSQ_CTXT_CEQ_ATTR_##member##_SHIFT)\n+\n+#define SQ_CTXT_CI_IDX_SHIFT\t\t\t\t11\n+#define SQ_CTXT_CI_OWNER_SHIFT\t\t\t\t23\n+\n+#define SQ_CTXT_CI_IDX_MASK\t\t\t\t0xFFFU\n+#define SQ_CTXT_CI_OWNER_MASK\t\t\t\t0x1U\n+\n+#define SQ_CTXT_CI_SET(val, member)\t\t\\\n+\t(((val) & SQ_CTXT_CI_##member##_MASK) << SQ_CTXT_CI_##member##_SHIFT)\n+\n+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT\t\t\t0\n+#define SQ_CTXT_WQ_PAGE_PI_SHIFT\t\t\t20\n+\n+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK\t\t\t0xFFFFFU\n+#define SQ_CTXT_WQ_PAGE_PI_MASK\t\t\t\t0xFFFU\n+\n+#define SQ_CTXT_WQ_PAGE_SET(val, member)\t\\\n+\t(((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) <<\t\\\n+\t\tSQ_CTXT_WQ_PAGE_##member##_SHIFT)\n+\n+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT\t\t0\n+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT\t\t\t14\n+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT\t\t\t25\n+\n+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK\t\t0x3FFFU\n+#define SQ_CTXT_PREF_CACHE_MAX_MASK\t\t\t0x7FFU\n+#define SQ_CTXT_PREF_CACHE_MIN_MASK\t\t\t0x7FU\n+\n+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT\t\t\t0\n+#define SQ_CTXT_PREF_CI_SHIFT\t\t\t\t20\n+\n+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK\t\t\t0xFFFFFU\n+#define SQ_CTXT_PREF_CI_MASK\t\t\t\t0xFFFU\n+\n+#define SQ_CTXT_PREF_SET(val, member)\t\t\\\n+\t(((val) & SQ_CTXT_PREF_##member##_MASK) <<\t\\\n+\t\tSQ_CTXT_PREF_##member##_SHIFT)\n+\n+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT\t\t\t0\n+\n+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK\t\t\t0x7FFFFFU\n+\n+#define SQ_CTXT_WQ_BLOCK_SET(val, member)\t\\\n+\t(((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) <<\t\\\n+\t\tSQ_CTXT_WQ_BLOCK_##member##_SHIFT)\n+\n+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT\t\t\t0\n+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT\t\t\t1\n+\n+#define RQ_CTXT_CEQ_ATTR_EN_MASK\t\t\t0x1U\n+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK\t\t\t0x1U\n+\n+#define RQ_CTXT_CEQ_ATTR_SET(val, member)\t\\\n+\t(((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) <<\t\\\n+\t\tRQ_CTXT_CEQ_ATTR_##member##_SHIFT)\n+\n+#define RQ_CTXT_PI_IDX_SHIFT\t\t\t\t0\n+#define RQ_CTXT_PI_INTR_SHIFT\t\t\t\t22\n+#define RQ_CTXT_PI_CEQ_ARM_SHIFT\t\t\t31\n+\n+#define RQ_CTXT_PI_IDX_MASK\t\t\t\t0xFFFU\n+#define RQ_CTXT_PI_INTR_MASK\t\t\t\t0x3FFU\n+#define RQ_CTXT_PI_CEQ_ARM_MASK\t\t\t\t0x1U\n+\n+#define RQ_CTXT_PI_SET(val, member)\t\t\\\n+\t(((val) & RQ_CTXT_PI_##member##_MASK) << RQ_CTXT_PI_##member##_SHIFT)\n+\n+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT\t\t\t0\n+#define RQ_CTXT_WQ_PAGE_CI_SHIFT\t\t\t20\n+\n+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK\t\t\t0xFFFFFU\n+#define RQ_CTXT_WQ_PAGE_CI_MASK\t\t\t\t0xFFFU\n+\n+#define RQ_CTXT_WQ_PAGE_SET(val, member)\t\\\n+\t(((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \\\n+\t\tRQ_CTXT_WQ_PAGE_##member##_SHIFT)\n+\n+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT\t\t0\n+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT\t\t\t14\n+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT\t\t\t25\n+\n+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK\t\t0x3FFFU\n+#define RQ_CTXT_PREF_CACHE_MAX_MASK\t\t\t0x7FFU\n+#define RQ_CTXT_PREF_CACHE_MIN_MASK\t\t\t0x7FU\n+\n+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT\t\t\t0\n+#define RQ_CTXT_PREF_CI_SHIFT\t\t\t\t20\n+\n+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK\t\t\t0xFFFFFU\n+#define RQ_CTXT_PREF_CI_MASK\t\t\t\t0xFFFU\n+\n+#define RQ_CTXT_PREF_SET(val, member)\t\t\\\n+\t(((val) & RQ_CTXT_PREF_##member##_MASK) <<\t\\\n+\t\tRQ_CTXT_PREF_##member##_SHIFT)\n+\n+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT\t\t\t0\n+\n+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK\t\t\t0x7FFFFFU\n+\n+#define RQ_CTXT_WQ_BLOCK_SET(val, member)\t\\\n+\t(((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) <<\t\\\n+\t\tRQ_CTXT_WQ_BLOCK_##member##_SHIFT)\n+\n+#define SIZE_16BYTES(size)\t\t(ALIGN((size), 16) >> 4)\n+\n+enum hinic_qp_ctxt_type {\n+\tHINIC_QP_CTXT_TYPE_SQ,\n+\tHINIC_QP_CTXT_TYPE_RQ,\n+};\n+\n+struct hinic_sq {\n+\tstruct hinic_wq\t\t*wq;\n+\tvolatile u16\t\t*cons_idx_addr;\n+\tvoid __iomem\t\t*db_addr;\n+\n+\tu16\tq_id;\n+\tu16\towner;\n+\tu16\tsq_depth;\n+};\n+\n+struct hinic_rq {\n+\tstruct hinic_wq\t\t*wq;\n+\tvolatile u16\t\t*pi_virt_addr;\n+\tdma_addr_t\t\tpi_dma_addr;\n+\n+\tu16\t\t\tirq_id;\n+\tu16\t\t\tmsix_entry_idx;\n+\tu16\t\t\tq_id;\n+\tu16\t\t\trq_depth;\n+};\n+\n+struct hinic_qp {\n+\tstruct hinic_sq\t\tsq;\n+\tstruct hinic_rq\t\trq;\n+};\n+\n+struct hinic_event {\n+\tvoid (*tx_ack)(void *handle, u16 q_id);\n+\t/* status: 0 - link down; 1 - link up */\n+\tvoid (*link_change)(void *handle, int status);\n+};\n+\n+struct hinic_nic_io {\n+\tstruct hinic_hwdev\t*hwdev;\n+\n+\tu16\t\t\tglobal_qpn;\n+\n+\tstruct hinic_wq\t\t*sq_wq;\n+\tstruct hinic_wq\t\t*rq_wq;\n+\n+\tu16\t\t\tmax_qps;\n+\tu16\t\t\tnum_qps;\n+\n+\tu16\t\t\tnum_sqs;\n+\tu16\t\t\tnum_rqs;\n+\n+\tu16\t\t\tsq_depth;\n+\tu16\t\t\trq_depth;\n+\n+\tu16\t\t\trq_buf_size;\n+\tu16\t\t\tvhd_mode;\n+\n+\tstruct hinic_qp\t\t*qps;\n+\t/* sq ci mem base addr of the function*/\n+\tvoid\t\t\t*ci_vaddr_base;\n+\tdma_addr_t\t\tci_dma_base;\n+\n+\tstruct hinic_event\tevent;\n+\tvoid\t\t\t*event_handle;\n+};\n+\n+struct hinic_sq_db {\n+\tu32\tdb_info;\n+};\n+\n+\n+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev);\n+\n+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev);\n+\n+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev);\n+\n+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);\n+\n+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);\n+\n+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t      int wqebb_cnt);\n+\n+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t int num_wqebbs, u16 owner);\n+\n+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);\n+\n+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi);\n+\n+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs);\n+\n+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);\n+\n+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt);\n+\n+int hinic_init_nicio(struct hinic_hwdev *hwdev);\n+\n+void hinic_deinit_nicio(struct hinic_hwdev *hwdev);\n+\n+int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz);\n+\n+#endif /* _HINIC_PMD_NICIO_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_wq.c b/drivers/net/hinic/base/hinic_pmd_wq.c\nnew file mode 100644\nindex 000000000..04c81f9bc\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_wq.c\n@@ -0,0 +1,179 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_compat.h\"\n+#include \"hinic_pmd_hwdev.h\"\n+#include \"hinic_pmd_wq.h\"\n+\n+static void free_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)\n+{\n+\tdma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,\n+\t\t\t(dma_addr_t)wq->queue_buf_paddr);\n+\n+\twq->queue_buf_paddr = 0;\n+\twq->queue_buf_vaddr = 0;\n+}\n+\n+static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)\n+{\n+\tdma_addr_t dma_addr = 0;\n+\n+\twq->queue_buf_vaddr = (u64)(u64 *)\n+\t\tdma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,\n+\t\t\t\t\t\t&dma_addr, GFP_KERNEL);\n+\tif (!wq->queue_buf_vaddr) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate wq page\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (!ADDR_256K_ALIGNED(dma_addr)) {\n+\t\tPMD_DRV_LOG(ERR, \"Wqe pages is not 256k aligned!\");\n+\t\tdma_free_coherent(hwdev, wq->wq_buf_size,\n+\t\t\t\t  (void *)wq->queue_buf_vaddr,\n+\t\t\t\t  dma_addr);\n+\t\treturn -ENOMEM;\n+\t}\n+\twq->queue_buf_paddr = dma_addr;\n+\n+\treturn 0;\n+}\n+\n+int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,\n+\t\t      u32 wqebb_shift, u16 q_depth)\n+{\n+\tint err;\n+\n+\tif (q_depth & (q_depth - 1)) {\n+\t\tPMD_DRV_LOG(ERR, \"WQ q_depth isn't power of 2\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twq->wqebb_size = 1 << wqebb_shift;\n+\twq->wqebb_shift = wqebb_shift;\n+\twq->wq_buf_size = ((u32)q_depth) << wqebb_shift;\n+\twq->q_depth = q_depth;\n+\n+\tif (wq->wq_buf_size > (HINIC_PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid q_depth %u which one page_size can not hold\",\n+\t\t\tq_depth);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = alloc_wq_pages(hwdev, wq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate wq pages\");\n+\t\treturn err;\n+\t}\n+\n+\twq->cons_idx = 0;\n+\twq->prod_idx = 0;\n+\twq->delta = q_depth;\n+\twq->mask = q_depth - 1;\n+\n+\treturn 0;\n+}\n+\n+void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq)\n+{\n+\tfree_wq_pages(hwdev, wq);\n+}\n+\n+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)\n+{\n+\twq->cons_idx += num_wqebbs;\n+\twq->delta += num_wqebbs;\n+}\n+\n+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)\n+{\n+\tu16 curr_cons_idx;\n+\n+\tif ((wq->delta + num_wqebbs) > wq->q_depth)\n+\t\treturn NULL;\n+\n+\tcurr_cons_idx = (u16)(wq->cons_idx);\n+\n+\tcurr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);\n+\n+\t*cons_idx = curr_cons_idx;\n+\n+\treturn WQ_WQE_ADDR(wq, (u32)(*cons_idx));\n+}\n+\n+int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,\n+\t\t     int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,\n+\t\t     u16 q_depth)\n+{\n+\tint i, j, err = -ENOMEM;\n+\n+\t/* validate q_depth is power of 2 & wqebb_size is not 0 */\n+\tfor (i = 0; i < cmdq_blocks; i++) {\n+\t\twq[i].wqebb_size = 1 << wqebb_shift;\n+\t\twq[i].wqebb_shift = wqebb_shift;\n+\t\twq[i].wq_buf_size = wq_buf_size;\n+\t\twq[i].q_depth = q_depth;\n+\n+\t\terr = alloc_wq_pages(hwdev, &wq[i]);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to alloc CMDQ blocks\");\n+\t\t\tgoto cmdq_block_err;\n+\t\t}\n+\n+\t\twq[i].cons_idx = 0;\n+\t\twq[i].prod_idx = 0;\n+\t\twq[i].delta = q_depth;\n+\n+\t\twq[i].mask = q_depth - 1;\n+\t}\n+\n+\treturn 0;\n+\n+cmdq_block_err:\n+\tfor (j = 0; j < i; j++)\n+\t\tfree_wq_pages(hwdev, &wq[j]);\n+\n+\treturn err;\n+}\n+\n+void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,\n+\t\t     int cmdq_blocks)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < cmdq_blocks; i++)\n+\t\tfree_wq_pages(hwdev, &wq[i]);\n+}\n+\n+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)\n+{\n+\twq->cons_idx = 0;\n+\twq->prod_idx = 0;\n+\n+\tmemset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);\n+}\n+\n+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)\n+{\n+\tu16 curr_prod_idx;\n+\n+\twq->delta -= num_wqebbs;\n+\tcurr_prod_idx = wq->prod_idx;\n+\twq->prod_idx += num_wqebbs;\n+\t*prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);\n+\n+\treturn WQ_WQE_ADDR(wq, (u32)(*prod_idx));\n+}\n+\n+/**\n+ * hinic_set_sge - set dma area in scatter gather entry\n+ * @sge: scatter gather entry\n+ * @addr: dma address\n+ * @len: length of relevant data in the dma address\n+ **/\n+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len)\n+{\n+\tsge->hi_addr = upper_32_bits(addr);\n+\tsge->lo_addr = lower_32_bits(addr);\n+\tsge->len  = len;\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_wq.h b/drivers/net/hinic/base/hinic_pmd_wq.h\nnew file mode 100644\nindex 000000000..53ecc225c\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_wq.h\n@@ -0,0 +1,137 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_WQ_H_\n+#define _HINIC_PMD_WQ_H_\n+\n+#define WQS_BLOCKS_PER_PAGE\t\t4\n+\n+#define WQ_SIZE(wq)\t\t(u32)((u64)(wq)->q_depth * (wq)->wqebb_size)\n+\n+#define\tWQE_PAGE_NUM(wq, idx)\t(((idx) >> ((wq)->wqebbs_per_page_shift)) & \\\n+\t\t\t\t((wq)->num_q_pages - 1))\n+\n+#define\tWQE_PAGE_OFF(wq, idx)\t((u64)((wq)->wqebb_size) * \\\n+\t\t\t\t((idx) & ((wq)->num_wqebbs_per_page - 1)))\n+\n+#define WQ_PAGE_ADDR_SIZE\t\tsizeof(u64)\n+#define WQ_PAGE_ADDR_SIZE_SHIFT\t\t3\n+#define WQ_PAGE_ADDR(wq, idx)\t\t\\\n+\t\t(u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \\\n+\t\t(WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT)))\n+\n+#define WQ_BLOCK_SIZE\t\t4096UL\n+#define WQS_PAGE_SIZE\t\t(WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)\n+#define WQ_MAX_PAGES\t\t(WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)\n+\n+#define CMDQ_BLOCKS_PER_PAGE\t\t8\n+#define CMDQ_BLOCK_SIZE\t\t\t512UL\n+#define CMDQ_PAGE_SIZE\t\t\tALIGN((CMDQ_BLOCKS_PER_PAGE * \\\n+\t\t\t\t\t\tCMDQ_BLOCK_SIZE), PAGE_SIZE)\n+\n+#define ADDR_4K_ALIGNED(addr)\t\t(0 == ((addr) & 0xfff))\n+#define ADDR_256K_ALIGNED(addr)\t\t(0 == ((addr) & 0x3ffff))\n+\n+#define WQ_BASE_VADDR(wqs, wq)\t\t\\\n+\t\t(u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \\\n+\t\t\t\t+ (wq)->block_idx * WQ_BLOCK_SIZE)\n+\n+#define WQ_BASE_PADDR(wqs, wq)\t(((wqs)->page_paddr[(wq)->page_idx]) \\\n+\t\t\t\t+ (u64)(wq)->block_idx * WQ_BLOCK_SIZE)\n+\n+#define WQ_BASE_ADDR(wqs, wq)\t\t\\\n+\t\t(u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \\\n+\t\t\t\t+ (wq)->block_idx * WQ_BLOCK_SIZE)\n+\n+#define CMDQ_BASE_VADDR(cmdq_pages, wq)\t\\\n+\t\t\t(u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \\\n+\t\t\t\t+ (wq)->block_idx * CMDQ_BLOCK_SIZE)\n+\n+#define CMDQ_BASE_PADDR(cmdq_pages, wq)\t\\\n+\t\t\t(((u64)((cmdq_pages)->cmdq_page_paddr)) \\\n+\t\t\t\t+ (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE)\n+\n+#define CMDQ_BASE_ADDR(cmdq_pages, wq)\t\\\n+\t\t\t(u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \\\n+\t\t\t\t+ (wq)->block_idx * CMDQ_BLOCK_SIZE)\n+\n+#define MASKED_WQE_IDX(wq, idx)\t((idx) & (wq)->mask)\n+\n+#define WQE_SHADOW_PAGE(wq, wqe)\t\\\n+\t\t(u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \\\n+\t\t/ (wq)->max_wqe_size)\n+\n+#define WQE_IN_RANGE(wqe, start, end)\t\\\n+\t\t(((unsigned long)(wqe) >= (unsigned long)(start)) && \\\n+\t\t((unsigned long)(wqe) < (unsigned long)(end)))\n+\n+#define WQ_NUM_PAGES(num_wqs)\t\\\n+\t(ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)\n+\n+#define\tWQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \\\n+\t\t\t      ((idx) << (wq)->wqebb_shift)))\n+\n+#define\tWQ_PAGE_PFN_SHIFT\t\t\t12\n+#define\tWQ_BLOCK_PFN_SHIFT\t\t\t9\n+\n+#define WQ_PAGE_PFN(page_addr)\t\t((page_addr) >> WQ_PAGE_PFN_SHIFT)\n+#define WQ_BLOCK_PFN(page_addr)\t\t((page_addr) >> WQ_BLOCK_PFN_SHIFT)\n+\n+\n+#define HINIC_SQ_WQEBB_SIZE\t64\n+#define HINIC_RQ_WQE_SIZE\t32\n+#define HINIC_SQ_WQEBB_SHIFT\t6\n+#define HINIC_RQ_WQEBB_SHIFT\t5\n+\n+struct hinic_sge {\n+\tu32\t\thi_addr;\n+\tu32\t\tlo_addr;\n+\tu32\t\tlen;\n+};\n+\n+/* Working Queue */\n+struct hinic_wq {\n+\t/* The addresses are 64 bit in the HW */\n+\tu64     queue_buf_vaddr;\n+\n+\tu16\t\tq_depth;\n+\tu16\t\tmask;\n+\tu32\t\tdelta;\n+\n+\tu32\t\tcons_idx;\n+\tu32\t\tprod_idx;\n+\n+\tu64     queue_buf_paddr;\n+\n+\tu32\t\twqebb_size;\n+\tu32\t\twqebb_shift;\n+\n+\tu32\t\twq_buf_size;\n+\n+\tu32\t\trsvd[5];\n+};\n+\n+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);\n+\n+int hinic_cmdq_alloc(struct hinic_wq *wq, struct hinic_hwdev *hwdev,\n+\t\t     int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,\n+\t\t     u16 q_depth);\n+\n+void hinic_cmdq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq,\n+\t\t     int cmdq_blocks);\n+\n+int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,\n+\t\t      u32 wqebb_shift, u16 q_depth);\n+\n+void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq);\n+\n+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);\n+\n+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);\n+\n+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);\n+\n+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);\n+\n+#endif /* _HINIC_PMD_WQ_H_ */\n",
    "prefixes": [
        "v5",
        "09/15"
    ]
}