get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54167/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54167,
    "url": "http://patches.dpdk.org/api/patches/54167/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/835c5e1132663e18be29984212d55accb19de7c4.1559553895.git.xuanziyang2@huawei.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<835c5e1132663e18be29984212d55accb19de7c4.1559553895.git.xuanziyang2@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/835c5e1132663e18be29984212d55accb19de7c4.1559553895.git.xuanziyang2@huawei.com",
    "date": "2019-06-03T11:39:05",
    "name": "[v3,05/11] net/hinic/base: add eqs and context code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4af5c8c5b1d92de99219814c3550242ef9ef9b2b",
    "submitter": {
        "id": 1321,
        "url": "http://patches.dpdk.org/api/people/1321/?format=api",
        "name": "Ziyang Xuan",
        "email": "xuanziyang2@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/835c5e1132663e18be29984212d55accb19de7c4.1559553895.git.xuanziyang2@huawei.com/mbox/",
    "series": [
        {
            "id": 4862,
            "url": "http://patches.dpdk.org/api/series/4862/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4862",
            "date": "2019-06-03T11:36:53",
            "name": "A new net PMD - hinic",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/4862/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54167/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/54167/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AAC481B99E;\n\tMon,  3 Jun 2019 13:27:53 +0200 (CEST)",
            "from huawei.com (szxga05-in.huawei.com [45.249.212.191])\n\tby dpdk.org (Postfix) with ESMTP id 3ABE71B99D\n\tfor <dev@dpdk.org>; Mon,  3 Jun 2019 13:27:52 +0200 (CEST)",
            "from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60])\n\tby Forcepoint Email with ESMTP id 0E630D065341E2BFA830\n\tfor <dev@dpdk.org>; Mon,  3 Jun 2019 19:27:50 +0800 (CST)",
            "from tester_149.localdomain (10.175.119.39) by\n\tDGGEMS409-HUB.china.huawei.com (10.3.19.209) with Microsoft SMTP\n\tServer id 14.3.439.0; Mon, 3 Jun 2019 19:27:41 +0800"
        ],
        "From": "Ziyang Xuan <xuanziyang2@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,\n\t<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,\n\t<luoxianjun@huawei.com>, Ziyang Xuan <xuanziyang2@huawei.com>",
        "Date": "Mon, 3 Jun 2019 19:39:05 +0800",
        "Message-ID": "<835c5e1132663e18be29984212d55accb19de7c4.1559553895.git.xuanziyang2@huawei.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1559553894.git.xuanziyang2@huawei.com>",
        "References": "<cover.1559553894.git.xuanziyang2@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.175.119.39]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v3 05/11] net/hinic/base: add eqs and context code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add code for eq. Aeq is a kind queue for mgmt asynchronous message\nand mgmt command response message.\n\nSigned-off-by: Ziyang Xuan <xuanziyang2@huawei.com>\n---\n drivers/net/hinic/base/hinic_ctx_def.h   | 184 ++++++\n drivers/net/hinic/base/hinic_pmd_dpdev.h | 146 +++++\n drivers/net/hinic/base/hinic_pmd_eqs.c   | 725 +++++++++++++++++++++++\n drivers/net/hinic/base/hinic_pmd_eqs.h   |  94 +++\n 4 files changed, 1149 insertions(+)\n create mode 100644 drivers/net/hinic/base/hinic_ctx_def.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_dpdev.h\n create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.c\n create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.h",
    "diff": "diff --git a/drivers/net/hinic/base/hinic_ctx_def.h b/drivers/net/hinic/base/hinic_ctx_def.h\nnew file mode 100644\nindex 000000000..ff5151bae\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_ctx_def.h\n@@ -0,0 +1,184 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_CTX_DEF_H_\n+#define _HINIC_CTX_DEF_H_\n+\n+#define MASKED_SQ_IDX(sq, idx)\t\t\t((idx) & (sq)->wq->mask)\n+\n+#define HINIC_Q_CTXT_MAX\t\t\t42\n+\n+/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */\n+#define HINIC_CI_Q_ADDR_SIZE\t\t\t(64)\n+\n+#define CI_TABLE_SIZE(num_qps, pg_sz)\t\\\n+\t\t\t(ALIGN((num_qps) * HINIC_CI_Q_ADDR_SIZE, pg_sz))\n+\n+#define HINIC_CI_VADDR(base_addr, q_id)\t\t((u8 *)(base_addr) + \\\n+\t\t\t\t\t\t(q_id) * HINIC_CI_Q_ADDR_SIZE)\n+\n+#define HINIC_CI_PADDR(base_paddr, q_id)\t((base_paddr) + \\\n+\t\t\t\t\t\t(q_id) * HINIC_CI_Q_ADDR_SIZE)\n+\n+#define Q_CTXT_SIZE\t\t\t\t\t48\n+#define TSO_LRO_CTXT_SIZE\t\t\t\t240\n+\n+#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id)\t\\\n+\t\t\t(((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \\\n+\t\t\t+ (q_id) * Q_CTXT_SIZE)\n+\n+#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id)\t\\\n+\t\t\t(((max_rqs) + (max_sqs)) * TSO_LRO_CTXT_SIZE \\\n+\t\t\t+ (max_sqs) * Q_CTXT_SIZE + (q_id) * Q_CTXT_SIZE)\n+\n+#define SQ_CTXT_SIZE(num_sqs)\t((u16)(sizeof(struct hinic_qp_ctxt_header) \\\n+\t\t\t\t+ (num_sqs) * sizeof(struct hinic_sq_ctxt)))\n+\n+#define RQ_CTXT_SIZE(num_rqs)\t((u16)(sizeof(struct hinic_qp_ctxt_header) \\\n+\t\t\t\t+ (num_rqs) * sizeof(struct hinic_rq_ctxt)))\n+\n+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_SHIFT\t\t\t8\n+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT\t\t13\n+#define SQ_CTXT_CEQ_ATTR_EN_SHIFT\t\t\t23\n+#define SQ_CTXT_CEQ_ATTR_ARM_SHIFT\t\t\t31\n+\n+#define SQ_CTXT_CEQ_ATTR_CEQ_ID_MASK\t\t\t0x1FU\n+#define SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK\t\t0x3FFU\n+#define SQ_CTXT_CEQ_ATTR_EN_MASK\t\t\t0x1U\n+#define SQ_CTXT_CEQ_ATTR_ARM_MASK\t\t\t0x1U\n+\n+#define SQ_CTXT_CEQ_ATTR_SET(val, member)\t\t(((val) & \\\n+\t\t\t\t\tSQ_CTXT_CEQ_ATTR_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTXT_CEQ_ATTR_##member##_SHIFT)\n+\n+#define SQ_CTXT_CI_IDX_SHIFT\t\t\t\t11\n+#define SQ_CTXT_CI_OWNER_SHIFT\t\t\t\t23\n+\n+#define SQ_CTXT_CI_IDX_MASK\t\t\t\t0xFFFU\n+#define SQ_CTXT_CI_OWNER_MASK\t\t\t\t0x1U\n+\n+#define SQ_CTXT_CI_SET(val, member)\t\t\t(((val) & \\\n+\t\t\t\t\tSQ_CTXT_CI_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTXT_CI_##member##_SHIFT)\n+\n+#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT\t\t\t0\n+#define SQ_CTXT_WQ_PAGE_PI_SHIFT\t\t\t20\n+\n+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK\t\t\t0xFFFFFU\n+#define SQ_CTXT_WQ_PAGE_PI_MASK\t\t\t\t0xFFFU\n+\n+#define SQ_CTXT_WQ_PAGE_SET(val, member)\t\t(((val) & \\\n+\t\t\t\t\tSQ_CTXT_WQ_PAGE_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTXT_WQ_PAGE_##member##_SHIFT)\n+\n+#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT\t\t0\n+#define SQ_CTXT_PREF_CACHE_MAX_SHIFT\t\t\t14\n+#define SQ_CTXT_PREF_CACHE_MIN_SHIFT\t\t\t25\n+\n+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK\t\t0x3FFFU\n+#define SQ_CTXT_PREF_CACHE_MAX_MASK\t\t\t0x7FFU\n+#define SQ_CTXT_PREF_CACHE_MIN_MASK\t\t\t0x7FU\n+\n+#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT\t\t\t0\n+#define SQ_CTXT_PREF_CI_SHIFT\t\t\t\t20\n+\n+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK\t\t\t0xFFFFFU\n+#define SQ_CTXT_PREF_CI_MASK\t\t\t\t0xFFFU\n+\n+#define SQ_CTXT_PREF_SET(val, member)\t\t\t(((val) & \\\n+\t\t\t\t\tSQ_CTXT_PREF_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTXT_PREF_##member##_SHIFT)\n+\n+#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT\t\t\t0\n+\n+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK\t\t\t0x7FFFFFU\n+\n+#define SQ_CTXT_WQ_BLOCK_SET(val, member)\t(((val) & \\\n+\t\t\t\t\tSQ_CTXT_WQ_BLOCK_##member##_MASK) \\\n+\t\t\t\t\t<< SQ_CTXT_WQ_BLOCK_##member##_SHIFT)\n+\n+#define RQ_CTXT_CEQ_ATTR_EN_SHIFT\t\t\t0\n+#define RQ_CTXT_CEQ_ATTR_OWNER_SHIFT\t\t\t1\n+\n+#define RQ_CTXT_CEQ_ATTR_EN_MASK\t\t\t0x1U\n+#define RQ_CTXT_CEQ_ATTR_OWNER_MASK\t\t\t0x1U\n+\n+#define RQ_CTXT_CEQ_ATTR_SET(val, member)\t\t(((val) & \\\n+\t\t\t\t\tRQ_CTXT_CEQ_ATTR_##member##_MASK) \\\n+\t\t\t\t\t<< RQ_CTXT_CEQ_ATTR_##member##_SHIFT)\n+\n+#define RQ_CTXT_PI_IDX_SHIFT\t\t\t\t0\n+#define RQ_CTXT_PI_INTR_SHIFT\t\t\t\t22\n+#define RQ_CTXT_PI_CEQ_ARM_SHIFT\t\t\t31\n+\n+#define RQ_CTXT_PI_IDX_MASK\t\t\t\t0xFFFU\n+#define RQ_CTXT_PI_INTR_MASK\t\t\t\t0x3FFU\n+#define RQ_CTXT_PI_CEQ_ARM_MASK\t\t\t\t0x1U\n+\n+#define RQ_CTXT_PI_SET(val, member)\t\t\t(((val) & \\\n+\t\t\t\t\tRQ_CTXT_PI_##member##_MASK) << \\\n+\t\t\t\t\tRQ_CTXT_PI_##member##_SHIFT)\n+\n+#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT\t\t\t0\n+#define RQ_CTXT_WQ_PAGE_CI_SHIFT\t\t\t20\n+\n+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK\t\t\t0xFFFFFU\n+#define RQ_CTXT_WQ_PAGE_CI_MASK\t\t\t\t0xFFFU\n+\n+#define RQ_CTXT_WQ_PAGE_SET(val, member)\t\t(((val) & \\\n+\t\t\t\t\tRQ_CTXT_WQ_PAGE_##member##_MASK) << \\\n+\t\t\t\t\tRQ_CTXT_WQ_PAGE_##member##_SHIFT)\n+\n+#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT\t\t0\n+#define RQ_CTXT_PREF_CACHE_MAX_SHIFT\t\t\t14\n+#define RQ_CTXT_PREF_CACHE_MIN_SHIFT\t\t\t25\n+\n+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK\t\t0x3FFFU\n+#define RQ_CTXT_PREF_CACHE_MAX_MASK\t\t\t0x7FFU\n+#define RQ_CTXT_PREF_CACHE_MIN_MASK\t\t\t0x7FU\n+\n+#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT\t\t\t0\n+#define RQ_CTXT_PREF_CI_SHIFT\t\t\t\t20\n+\n+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK\t\t\t0xFFFFFU\n+#define RQ_CTXT_PREF_CI_MASK\t\t\t\t0xFFFU\n+\n+#define RQ_CTXT_PREF_SET(val, member)\t\t\t(((val) & \\\n+\t\t\t\t\tRQ_CTXT_PREF_##member##_MASK) << \\\n+\t\t\t\t\tRQ_CTXT_PREF_##member##_SHIFT)\n+\n+#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT\t\t\t0\n+\n+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK\t\t\t0x7FFFFFU\n+\n+#define RQ_CTXT_WQ_BLOCK_SET(val, member)\t\t(((val) & \\\n+\t\t\t\t\tRQ_CTXT_WQ_BLOCK_##member##_MASK) << \\\n+\t\t\t\t\tRQ_CTXT_WQ_BLOCK_##member##_SHIFT)\n+\n+#define SIZE_16BYTES(size)\t\t(ALIGN((size), 16) >> 4)\n+\n+#define\tWQ_PAGE_PFN_SHIFT\t\t\t\t12\n+#define\tWQ_BLOCK_PFN_SHIFT\t\t\t\t9\n+\n+#define WQ_PAGE_PFN(page_addr)\t\t((page_addr) >> WQ_PAGE_PFN_SHIFT)\n+#define WQ_BLOCK_PFN(page_addr)\t\t((page_addr) >> WQ_BLOCK_PFN_SHIFT)\n+\n+enum sq_cflag {\n+\tCFLAG_DATA_PATH = 0,\n+};\n+\n+enum hinic_qp_ctxt_type {\n+\tHINIC_QP_CTXT_TYPE_SQ,\n+\tHINIC_QP_CTXT_TYPE_RQ,\n+};\n+\n+/* service type related define */\n+enum cfg_svc_type_en {\n+\tCFG_SVC_NIC_BIT0    = (1 << 0),\n+};\n+\n+#define IS_NIC_TYPE(dev) \\\n+\t((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)\n+\n+#endif /* _HINIC_CTX_DEF_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_dpdev.h b/drivers/net/hinic/base/hinic_pmd_dpdev.h\nnew file mode 100644\nindex 000000000..dfaec0209\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_dpdev.h\n@@ -0,0 +1,146 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_DPDEV_H_\n+#define _HINIC_PMD_DPDEV_H_\n+\n+#include <rte_ethdev.h>\n+#include <rte_eth_ctrl.h>\n+\n+#include \"hinic_compat.h\"\n+#include \"hinic_csr.h\"\n+#include \"hinic_ctx_def.h\"\n+#include \"hinic_qe_def.h\"\n+#include \"hinic_port_cmd.h\"\n+#include \"hinic_pmd_wq.h\"\n+#include \"hinic_pmd_hw.h\"\n+#include \"hinic_pmd_hw_mgmt.h\"\n+#include \"hinic_pmd_hwif.h\"\n+#include \"hinic_pmd_nicio.h\"\n+#include \"hinic_pmd_qp.h\"\n+#include \"hinic_pmd_hwdev.h\"\n+#include \"hinic_pmd_nic.h\"\n+#include \"hinic_pmd_niccfg.h\"\n+#include \"hinic_pmd_mgmt_interface.h\"\n+#include \"hinic_pmd_cfg.h\"\n+#include \"hinic_pmd_eqs.h\"\n+#include \"hinic_pmd_api_cmd.h\"\n+#include \"hinic_pmd_mgmt.h\"\n+#include \"hinic_pmd_cmdq.h\"\n+\n+#define HINIC_AEQN_START\t(0)\n+#define HINIC_AEQN_NUM\t\t(4)\n+#define HINIC_MGMT_RSP_AEQN\t(1)\n+\n+#define HINIC_DEV_NAME_LEN\t(32)\n+\n+#define HINIC_MAX_DMA_ENTRIES\t(8192)\n+\n+#define HINIC_MAX_RX_QUEUES\t(64)\n+\n+#define HINIC_MGMT_CMD_UNSUPPORTED\t0xFF\n+\n+/* mbuf pool for copy invalid mbuf segs */\n+#define HINIC_COPY_MEMPOOL_DEPTH (128)\n+#define HINIC_COPY_MBUF_SIZE     (4096)\n+\n+#define HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev) \\\n+\t((struct hinic_nic_dev *)(dev)->data->dev_private)\n+\n+enum hinic_dev_status {\n+\tHINIC_DEV_INIT,\n+\tHINIC_DEV_CLOSE,\n+\tHINIC_DEV_START,\n+\tHINIC_DEV_INTR_EN,\n+};\n+\n+struct hinic_txq;\n+struct hinic_rxq;\n+\n+/* dma os dependency implementation */\n+struct hinic_os_dep {\n+\t/* kernel dma alloc api */\n+\trte_atomic32_t dma_alloc_cnt;\n+\trte_spinlock_t  dma_hash_lock;\n+\tstruct rte_hash *dma_addr_hash;\n+};\n+\n+/* hinic nic_device */\n+struct hinic_nic_dev {\n+\tu32 link_status;\t\t/* port link status */\n+\tstruct hinic_txq **txqs;\n+\tstruct hinic_rxq **rxqs;\n+\tstruct rte_mempool *cpy_mpool;\n+\tu16 num_qps;\n+\tu16 num_sq;\n+\tu16 num_rq;\n+\tu16 mtu_size;\n+\tu8 rss_tmpl_idx;\n+\tu8 rss_indir_flag;\n+\tu8 num_rss;\n+\tu8 rx_queue_list[HINIC_MAX_RX_QUEUES];\n+\n+\t/* hardware hw_dev */\n+\tstruct hinic_hwdev *hwdev;\n+\tstruct hinic_nic_io *nic_io;\n+\n+\t/* dma memory allocator */\n+\tstruct hinic_os_dep dumb_os_dep;\n+\tstruct hinic_os_dep *os_dep;\n+\n+\t/* info */\n+\tunsigned int flags;\n+\tstruct nic_service_cap nic_cap;\n+\tu32 rx_mode_status;\t/* promisc allmulticast */\n+\tunsigned long dev_status;\n+\n+\t/* dpdk only */\n+\tchar proc_dev_name[HINIC_DEV_NAME_LEN];\n+\t/* PF0->COS4, PF1->COS5, PF2->COS6, PF3->COS7,\n+\t * vf: the same with associate pf\n+\t */\n+\tu32 default_cos;\n+\n+\tu32 ffm_num;\n+};\n+\n+int32_t hinic_nic_dev_create(struct rte_eth_dev *rte_dev);\n+void hinic_nic_dev_destroy(struct rte_eth_dev *rte_dev);\n+\n+int hinic_hwif_res_init(struct hinic_nic_dev *nic_dev);\n+void hinic_hwif_res_free(struct hinic_nic_dev *nic_dev);\n+\n+int hinic_init_nicio(struct hinic_nic_dev *nic_dev);\n+void hinic_deinit_nicio(struct hinic_nic_dev *nic_dev);\n+\n+int hinic_comm_aeqs_init(struct hinic_nic_dev *nic_dev);\n+void hinic_comm_aeqs_free(struct hinic_nic_dev *nic_dev);\n+\n+int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev);\n+void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev);\n+\n+int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev);\n+void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev);\n+\n+int hinic_init_capability(struct hinic_nic_dev *nic_dev);\n+\n+int hinic_create_rq(struct hinic_nic_dev *nic_dev, u16 q_id, u16 rq_depth);\n+void hinic_destroy_rq(struct hinic_nic_dev *nic_dev, u16 q_id);\n+\n+int hinic_create_sq(struct hinic_nic_dev *nic_dev, u16 q_id, u16 sq_depth);\n+void hinic_destroy_sq(struct hinic_nic_dev *nic_dev, u16 q_id);\n+\n+void hinic_lsc_process(struct rte_eth_dev *rte_dev, u8 status);\n+\n+void *hinic_dma_mem_zalloc(void *dev, size_t size, dma_addr_t *dma_handle,\n+\t\tunsigned int flag, unsigned int align);\n+void hinic_dma_mem_free(void *dev, size_t size, void *virt, dma_addr_t phys);\n+\n+int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev);\n+void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev);\n+\n+void dma_free_coherent_volatile(void *dev, size_t size,\n+\tvolatile void *virt, dma_addr_t phys);\n+\n+#endif /* _HINIC_PMD_DPDEV_H_ */\ndiff --git a/drivers/net/hinic/base/hinic_pmd_eqs.c b/drivers/net/hinic/base/hinic_pmd_eqs.c\nnew file mode 100644\nindex 000000000..db175a776\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_eqs.c\n@@ -0,0 +1,725 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include \"hinic_pmd_dpdev.h\"\n+\n+#define AEQ_CTRL_0_INTR_IDX_SHIFT\t\t0\n+#define AEQ_CTRL_0_DMA_ATTR_SHIFT\t\t12\n+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT\t\t20\n+#define AEQ_CTRL_0_INTR_MODE_SHIFT\t\t31\n+\n+#define AEQ_CTRL_0_INTR_IDX_MASK\t\t0x3FFU\n+#define AEQ_CTRL_0_DMA_ATTR_MASK\t\t0x3FU\n+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK\t\t0x3U\n+#define AEQ_CTRL_0_INTR_MODE_MASK\t\t0x1U\n+\n+#define AEQ_CTRL_0_SET(val, member)\t\t\\\n+\t\t\t\t(((val) & AEQ_CTRL_0_##member##_MASK) << \\\n+\t\t\t\tAEQ_CTRL_0_##member##_SHIFT)\n+\n+#define AEQ_CTRL_0_CLEAR(val, member)\t\t\\\n+\t\t\t\t((val) & (~(AEQ_CTRL_0_##member##_MASK \\\n+\t\t\t\t\t<< AEQ_CTRL_0_##member##_SHIFT)))\n+\n+#define AEQ_CTRL_1_LEN_SHIFT\t\t\t0\n+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT\t\t24\n+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT\t\t28\n+\n+#define AEQ_CTRL_1_LEN_MASK\t\t\t0x1FFFFFU\n+#define AEQ_CTRL_1_ELEM_SIZE_MASK\t\t0x3U\n+#define AEQ_CTRL_1_PAGE_SIZE_MASK\t\t0xFU\n+\n+#define AEQ_CTRL_1_SET(val, member)\t\t\\\n+\t\t\t\t(((val) & AEQ_CTRL_1_##member##_MASK) << \\\n+\t\t\t\tAEQ_CTRL_1_##member##_SHIFT)\n+\n+#define AEQ_CTRL_1_CLEAR(val, member)\t\t\\\n+\t\t\t\t((val) & (~(AEQ_CTRL_1_##member##_MASK \\\n+\t\t\t\t\t<< AEQ_CTRL_1_##member##_SHIFT)))\n+\n+#define CEQ_CTRL_0_INTR_IDX_SHIFT\t\t0\n+#define CEQ_CTRL_0_DMA_ATTR_SHIFT\t\t12\n+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT\t\t20\n+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT\t\t24\n+#define CEQ_CTRL_0_INTR_MODE_SHIFT\t\t31\n+\n+#define CEQ_CTRL_0_INTR_IDX_MASK\t\t0x3FFU\n+#define CEQ_CTRL_0_DMA_ATTR_MASK\t\t0x3FU\n+#define CEQ_CTRL_0_LIMIT_KICK_MASK\t\t0xFU\n+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK\t\t0x3U\n+#define CEQ_CTRL_0_INTR_MODE_MASK\t\t0x1U\n+\n+#define CEQ_CTRL_0_SET(val, member)\t\t\\\n+\t\t\t\t(((val) & CEQ_CTRL_0_##member##_MASK) << \\\n+\t\t\t\t\tCEQ_CTRL_0_##member##_SHIFT)\n+\n+#define CEQ_CTRL_1_LEN_SHIFT\t\t\t0\n+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT\t\t28\n+\n+#define CEQ_CTRL_1_LEN_MASK\t\t\t0x1FFFFFU\n+#define CEQ_CTRL_1_PAGE_SIZE_MASK\t\t0xFU\n+\n+#define CEQ_CTRL_1_SET(val, member)\t\t\\\n+\t\t\t\t(((val) & CEQ_CTRL_1_##member##_MASK) << \\\n+\t\t\t\t\tCEQ_CTRL_1_##member##_SHIFT)\n+\n+#define EQ_ELEM_DESC_TYPE_SHIFT\t\t\t0\n+#define EQ_ELEM_DESC_SRC_SHIFT\t\t\t7\n+#define EQ_ELEM_DESC_SIZE_SHIFT\t\t\t8\n+#define EQ_ELEM_DESC_WRAPPED_SHIFT\t\t31\n+\n+#define EQ_ELEM_DESC_TYPE_MASK\t\t\t0x7FU\n+#define EQ_ELEM_DESC_SRC_MASK\t\t\t0x1U\n+#define EQ_ELEM_DESC_SIZE_MASK\t\t\t0xFFU\n+#define EQ_ELEM_DESC_WRAPPED_MASK\t\t0x1U\n+\n+#define EQ_ELEM_DESC_GET(val, member)\t\t\\\n+\t\t\t\t(((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \\\n+\t\t\t\tEQ_ELEM_DESC_##member##_MASK)\n+\n+#define EQ_CONS_IDX_CONS_IDX_SHIFT\t\t0\n+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT\t\t24\n+#define EQ_CONS_IDX_INT_ARMED_SHIFT\t\t31\n+\n+#define EQ_CONS_IDX_CONS_IDX_MASK\t\t0x1FFFFFU\n+#define EQ_CONS_IDX_XOR_CHKSUM_MASK\t\t0xFU\n+#define EQ_CONS_IDX_INT_ARMED_MASK\t\t0x1U\n+\n+#define EQ_CONS_IDX_SET(val, member)\t\t\\\n+\t\t\t\t(((val) & EQ_CONS_IDX_##member##_MASK) << \\\n+\t\t\t\tEQ_CONS_IDX_##member##_SHIFT)\n+\n+#define EQ_CONS_IDX_CLEAR(val, member)\t\t\\\n+\t\t\t\t((val) & (~(EQ_CONS_IDX_##member##_MASK \\\n+\t\t\t\t\t<< EQ_CONS_IDX_##member##_SHIFT)))\n+\n+#define EQ_WRAPPED(eq)\t\t\t((u32)(eq)->wrapped << EQ_VALID_SHIFT)\n+\n+#define EQ_CONS_IDX(eq)\t\t((eq)->cons_idx | \\\n+\t\t\t\t((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))\n+\n+#define EQ_CONS_IDX_REG_ADDR(eq)\t(((eq)->type == HINIC_AEQ) ? \\\n+\t\t\t\tHINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\\\n+\t\t\t\tHINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))\n+\n+#define EQ_PROD_IDX_REG_ADDR(eq)\t(((eq)->type == HINIC_AEQ) ? \\\n+\t\t\t\tHINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\\\n+\t\t\t\tHINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))\n+\n+#define GET_EQ_NUM_PAGES(eq, size)\t\t\\\n+\t\t((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \\\n+\t\t/ (size)))\n+\n+#define GET_EQ_NUM_ELEMS(eq, pg_size)\t((pg_size) / (u32)(eq)->elem_size)\n+\n+#define GET_EQ_ELEMENT(eq, idx)\t\t\\\n+\t\t(((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \\\n+\t\t(((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))\n+\n+#define GET_AEQ_ELEM(eq, idx)\t\t((struct hinic_aeq_elem *) \\\n+\t\t\t\t\tGET_EQ_ELEMENT((eq), (idx)))\n+\n+#define GET_CEQ_ELEM(eq, idx)\t\t((u32 *)GET_EQ_ELEMENT((eq), (idx)))\n+\n+#define GET_CURR_AEQ_ELEM(eq)\t\tGET_AEQ_ELEM((eq), (eq)->cons_idx)\n+\n+#define PAGE_IN_4K(page_size)\t\t((page_size) >> 12)\n+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))\n+\n+#define ELEMENT_SIZE_IN_32B(eq)\t\t(((eq)->elem_size) >> 5)\n+#define EQ_SET_HW_ELEM_SIZE_VAL(eq)\t((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))\n+\n+#define AEQ_DMA_ATTR_DEFAULT\t\t\t0\n+#define CEQ_DMA_ATTR_DEFAULT\t\t\t0\n+\n+#define CEQ_LMT_KICK_DEFAULT\t\t\t0\n+\n+#define EQ_WRAPPED_SHIFT\t\t\t20\n+\n+#define\tEQ_VALID_SHIFT\t\t\t\t31\n+\n+#define aeq_to_aeqs(eq) \\\n+\t\tcontainer_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])\n+\n+static u8 eq_cons_idx_checksum_set(u32 val)\n+{\n+\tu8 checksum = 0;\n+\tu8 idx;\n+\n+\tfor (idx = 0; idx < 32; idx += 4)\n+\t\tchecksum ^= ((val >> idx) & 0xF);\n+\n+\treturn (checksum & 0xF);\n+}\n+\n+/**\n+ * set_eq_cons_idx - write the cons idx to the hw\n+ * @eq: The event queue to update the cons idx for\n+ * @arm_state: indicate whether report interrupts when generate eq element\n+ **/\n+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)\n+{\n+\tu32 eq_cons_idx, eq_wrap_ci, val;\n+\tu32 addr = EQ_CONS_IDX_REG_ADDR(eq);\n+\n+\teq_wrap_ci = EQ_CONS_IDX(eq);\n+\n+\t/* Read Modify Write */\n+\tval = hinic_hwif_read_reg(eq->hwdev->hwif, addr);\n+\n+\tval = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &\n+\t\tEQ_CONS_IDX_CLEAR(val, INT_ARMED) &\n+\t\tEQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);\n+\n+\t/* Just aeq0 use int_arm mode for pmd drv to recv\n+\t * asyn event&mbox recv data\n+\t */\n+\tif (eq->q_id == 0)\n+\t\teq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |\n+\t\t\tEQ_CONS_IDX_SET(arm_state, INT_ARMED);\n+\telse\n+\t\teq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |\n+\t\t\tEQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);\n+\n+\tval |= eq_cons_idx;\n+\n+\tval |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);\n+\n+\thinic_hwif_write_reg(eq->hwdev->hwif, addr, val);\n+}\n+\n+/**\n+ * eq_update_ci - update the cons idx of event queue\n+ * @eq: the event queue to update the cons idx for\n+ **/\n+static void eq_update_ci(struct hinic_eq *eq)\n+{\n+\tset_eq_cons_idx(eq, HINIC_EQ_ARMED);\n+}\n+\n+struct hinic_ceq_ctrl_reg {\n+\tstruct hinic_mgmt_msg_head mgmt_msg_head;\n+\n+\tu16 func_id;\n+\tu16 q_id;\n+\tu32 ctrl0;\n+\tu32 ctrl1;\n+};\n+\n+static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t\t    u32 ctrl0, u32 ctrl1)\n+{\n+\tstruct hinic_ceq_ctrl_reg ceq_ctrl;\n+\tu16 in_size = sizeof(ceq_ctrl);\n+\n+\tmemset(&ceq_ctrl, 0, in_size);\n+\tceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;\n+\tceq_ctrl.func_id = hinic_global_func_id(hwdev);\n+\tceq_ctrl.q_id = q_id;\n+\tceq_ctrl.ctrl0 = ctrl0;\n+\tceq_ctrl.ctrl1 = ctrl1;\n+\n+\treturn hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,\n+\t\t\t\t     HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,\n+\t\t\t\t     &ceq_ctrl, in_size, NULL, NULL, 0);\n+}\n+\n+/**\n+ * set_eq_ctrls - setting eq's ctrls registers\n+ * @eq: the event queue for setting\n+ **/\n+static int set_eq_ctrls(struct hinic_eq *eq)\n+{\n+\tenum hinic_eq_type type = eq->type;\n+\tstruct hinic_hwif *hwif = eq->hwdev->hwif;\n+\tstruct irq_info *eq_irq = &eq->eq_irq;\n+\tu32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;\n+\tu32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);\n+\tint ret = 0;\n+\n+\tif (type == HINIC_AEQ) {\n+\t\t/* set ctrl0 */\n+\t\taddr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);\n+\n+\t\tval = hinic_hwif_read_reg(hwif, addr);\n+\n+\t\tval = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &\n+\t\t\tAEQ_CTRL_0_CLEAR(val, DMA_ATTR) &\n+\t\t\tAEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &\n+\t\t\tAEQ_CTRL_0_CLEAR(val, INTR_MODE);\n+\n+\t\tctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |\n+\t\t\tAEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR)\t|\n+\t\t\tAEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)\t|\n+\t\t\tAEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);\n+\n+\t\tval |= ctrl0;\n+\n+\t\thinic_hwif_write_reg(hwif, addr, val);\n+\n+\t\t/* set ctrl1 */\n+\t\taddr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);\n+\n+\t\tpage_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);\n+\t\telem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);\n+\n+\t\tctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN)\t\t|\n+\t\t\tAEQ_CTRL_1_SET(elem_size, ELEM_SIZE)\t|\n+\t\t\tAEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);\n+\n+\t\thinic_hwif_write_reg(hwif, addr, ctrl1);\n+\t} else {\n+\t\tctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |\n+\t\t\tCEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR)\t|\n+\t\t\tCEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |\n+\t\t\tCEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)\t|\n+\t\t\tCEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);\n+\n+\t\tpage_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);\n+\n+\t\tctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |\n+\t\t\tCEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);\n+\n+\t\t/* set ceq ctrl reg through mgmt cpu */\n+\t\tret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * ceq_elements_init - Initialize all the elements in the ceq\n+ * @eq: the event queue\n+ * @init_val: value to init with it the elements\n+ **/\n+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)\n+{\n+\tu16 i;\n+\tu32 *ceqe;\n+\n+\tfor (i = 0; i < eq->eq_len; i++) {\n+\t\tceqe = GET_CEQ_ELEM(eq, i);\n+\t\t*(ceqe) = cpu_to_be32(init_val);\n+\t}\n+\n+\trte_wmb();\t/* Write the init values */\n+}\n+\n+/**\n+ * aeq_elements_init - initialize all the elements in the aeq\n+ * @eq: the event queue\n+ * @init_val: value to init with it the elements\n+ **/\n+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)\n+{\n+\tstruct hinic_aeq_elem *aeqe;\n+\tu16 i;\n+\n+\tfor (i = 0; i < eq->eq_len; i++) {\n+\t\taeqe = GET_AEQ_ELEM(eq, i);\n+\t\taeqe->desc = cpu_to_be32(init_val);\n+\t}\n+\n+\trte_wmb();\t/* Write the init values */\n+}\n+\n+/**\n+ * alloc_eq_pages - allocate the pages for the queue\n+ * @eq: the event queue\n+ **/\n+static int alloc_eq_pages(struct hinic_eq *eq)\n+{\n+\tstruct hinic_hwif *hwif = eq->hwdev->hwif;\n+\tu32 init_val;\n+\tu64 dma_addr_size, virt_addr_size;\n+\tu16 pg_num, i;\n+\tint err;\n+\n+\tdma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);\n+\tvirt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);\n+\n+\teq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);\n+\tif (!eq->dma_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate dma addr array failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\teq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);\n+\tif (!eq->virt_addr) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate virt addr array failed\");\n+\t\terr = -ENOMEM;\n+\t\tgoto virt_addr_alloc_err;\n+\t}\n+\n+\tfor (pg_num = 0; pg_num < eq->num_pages; pg_num++) {\n+\t\teq->virt_addr[pg_num] =\n+\t\t\t(u8 *)dma_zalloc_coherent_aligned(eq->hwdev->dev_hdl,\n+\t\t\t\t\teq->page_size, &eq->dma_addr[pg_num],\n+\t\t\t\t\tGFP_KERNEL);\n+\t\tif (!eq->virt_addr[pg_num]) {\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto dma_alloc_err;\n+\t\t}\n+\n+\t\thinic_hwif_write_reg(hwif,\n+\t\t\t\t     HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,\n+\t\t\t\t     eq->q_id, pg_num),\n+\t\t\t\t     upper_32_bits(eq->dma_addr[pg_num]));\n+\n+\t\thinic_hwif_write_reg(hwif,\n+\t\t\t\t     HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,\n+\t\t\t\t     eq->q_id, pg_num),\n+\t\t\t\t     lower_32_bits(eq->dma_addr[pg_num]));\n+\t}\n+\n+\tinit_val = EQ_WRAPPED(eq);\n+\n+\tif (eq->type == HINIC_AEQ)\n+\t\taeq_elements_init(eq, init_val);\n+\telse\n+\t\tceq_elements_init(eq, init_val);\n+\n+\treturn 0;\n+\n+dma_alloc_err:\n+\tfor (i = 0; i < pg_num; i++)\n+\t\tdma_free_coherent(eq->hwdev->dev_hdl, eq->page_size,\n+\t\t\t\t  eq->virt_addr[i], eq->dma_addr[i]);\n+\n+virt_addr_alloc_err:\n+\tkfree(eq->dma_addr);\n+\treturn err;\n+}\n+\n+/**\n+ * free_eq_pages - free the pages of the queue\n+ * @eq: the event queue\n+ **/\n+static void free_eq_pages(struct hinic_eq *eq)\n+{\n+\tstruct hinic_hwdev *hwdev = eq->hwdev;\n+\tu16 pg_num;\n+\n+\tfor (pg_num = 0; pg_num < eq->num_pages; pg_num++)\n+\t\tdma_free_coherent(hwdev->dev_hdl, eq->page_size,\n+\t\t\t\t  eq->virt_addr[pg_num],\n+\t\t\t\t  eq->dma_addr[pg_num]);\n+\n+\tkfree(eq->virt_addr);\n+\tkfree(eq->dma_addr);\n+}\n+\n+#define MSIX_ENTRY_IDX_0 (0)\n+\n+/**\n+ * init_eq - initialize eq\n+ * @eq:\tthe event queue\n+ * @hwdev: the pointer to the private hardware device object\n+ * @q_id: Queue id number\n+ * @q_len: the number of EQ elements\n+ * @type: the type of the event queue, ceq or aeq\n+ * @page_size: the page size of the event queue\n+ * @entry: msix entry associated with the event queue\n+ * Return: 0 - Success, Negative - failure\n+ **/\n+static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,\n+\t\t   u16 q_len, enum hinic_eq_type type, u32 page_size,\n+\t\t   __rte_unused struct irq_info *entry)\n+{\n+\tint err = 0;\n+\n+\teq->hwdev = hwdev;\n+\teq->q_id = q_id;\n+\teq->type = type;\n+\teq->page_size = page_size;\n+\teq->eq_len = q_len;\n+\n+\t/* clear eq_len to force eqe drop in hardware */\n+\tif (eq->type == HINIC_AEQ) {\n+\t\thinic_hwif_write_reg(eq->hwdev->hwif,\n+\t\t\t\t     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);\n+\t} else {\n+\t\terr = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Set ceq control registers ctrl0[0] ctrl1[0] failed\");\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\teq->cons_idx = 0;\n+\teq->wrapped = 0;\n+\n+\teq->elem_size = (type == HINIC_AEQ) ?\n+\t\t\tHINIC_AEQE_SIZE : HINIC_CEQE_SIZE;\n+\teq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);\n+\teq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);\n+\n+\tif (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {\n+\t\tPMD_DRV_LOG(ERR, \"Number element in eq page is not power of 2\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (eq->num_pages > HINIC_EQ_MAX_PAGES) {\n+\t\tPMD_DRV_LOG(ERR, \"Too many pages for eq, num_pages: %d\",\n+\t\t\teq->num_pages);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = alloc_eq_pages(eq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate pages for eq failed\");\n+\t\treturn err;\n+\t}\n+\n+\t/* pmd use MSIX_ENTRY_IDX_0*/\n+\teq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;\n+\n+\terr = set_eq_ctrls(eq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Init eq control registers failed\");\n+\t\tgoto init_eq_ctrls_err;\n+\t}\n+\n+\thinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);\n+\tset_eq_cons_idx(eq, HINIC_EQ_ARMED);\n+\n+\tif (eq->q_id == 0)\n+\t\thinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);\n+\n+\teq->poll_retry_nr = HINIC_RETRY_NUM;\n+\n+\treturn 0;\n+\n+init_eq_ctrls_err:\n+\tfree_eq_pages(eq);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * remove_eq - remove eq\n+ * @eq:\tthe event queue\n+ **/\n+static void remove_eq(struct hinic_eq *eq)\n+{\n+\tstruct irq_info *entry = &eq->eq_irq;\n+\n+\tif (eq->type == HINIC_AEQ) {\n+\t\tif (eq->q_id == 0)\n+\t\t\thinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,\n+\t\t\t\t\t     HINIC_MSIX_DISABLE);\n+\n+\t\t/* clear eq_len to avoid hw access host memory */\n+\t\thinic_hwif_write_reg(eq->hwdev->hwif,\n+\t\t\t\t     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);\n+\t} else {\n+\t\t(void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);\n+\t}\n+\n+\t/* update cons_idx to avoid invalid interrupt */\n+\teq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,\n+\t\t\t\t\t\tEQ_PROD_IDX_REG_ADDR(eq));\n+\tset_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);\n+\n+\tfree_eq_pages(eq);\n+}\n+\n+/**\n+ * hinic_aeqs_init - init all the aeqs\n+ * @hwdev: the pointer to the private hardware device object\n+ * @num_aeqs: number of aeq\n+ * @msix_entries: msix entries associated with the event queues\n+ * Return: 0 - Success, Negative - failure\n+ **/\n+static int\n+hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,\n+\t\tstruct irq_info *msix_entries)\n+{\n+\tstruct hinic_aeqs *aeqs;\n+\tint err;\n+\tu16 i, q_id;\n+\n+\taeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);\n+\tif (!aeqs)\n+\t\treturn -ENOMEM;\n+\n+\thwdev->aeqs = aeqs;\n+\taeqs->hwdev = hwdev;\n+\taeqs->num_aeqs = num_aeqs;\n+\n+\tfor (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {\n+\t\terr = init_eq(&aeqs->aeq[q_id], hwdev, q_id,\n+\t\t\t      HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ,\n+\t\t\t      HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Init aeq %d failed\", q_id);\n+\t\t\tgoto init_aeq_err;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+init_aeq_err:\n+\tfor (i = 0; i < q_id; i++)\n+\t\tremove_eq(&aeqs->aeq[i]);\n+\n+\tkfree(aeqs);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * hinic_aeqs_free - free all the aeqs\n+ * @hwdev: the pointer to the private hardware device object\n+ **/\n+static void hinic_aeqs_free(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_aeqs *aeqs = hwdev->aeqs;\n+\tu16 q_id;\n+\n+\t/* hinic pmd use aeq[1~3], aeq[0] used in kernel only */\n+\tfor (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)\n+\t\tremove_eq(&aeqs->aeq[q_id]);\n+\n+\tkfree(aeqs);\n+}\n+\n+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)\n+{\n+\tstruct hinic_eq *eq;\n+\tu32 addr, ci, pi;\n+\tint q_id;\n+\n+\tfor (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {\n+\t\teq = &hwdev->aeqs->aeq[q_id];\n+\t\taddr = EQ_CONS_IDX_REG_ADDR(eq);\n+\t\tci = hinic_hwif_read_reg(hwdev->hwif, addr);\n+\t\taddr = EQ_PROD_IDX_REG_ADDR(eq);\n+\t\tpi = hinic_hwif_read_reg(hwdev->hwif, addr);\n+\t\tPMD_DRV_LOG(ERR, \"aeq id: %d, ci: 0x%x, pi: 0x%x\",\n+\t\t\tq_id, ci, pi);\n+\t}\n+}\n+\n+static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event,\n+\t\t      u8 *data, u8 size, void *param)\n+{\n+\tint rc = 0;\n+\n+\tswitch (event) {\n+\tcase HINIC_MSG_FROM_MGMT_CPU:\n+\t\trc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(ERR, \"Unknown event type: 0x%x, size: %d\",\n+\t\t\t    event, size);\n+\t\trc = HINIC_RECV_NEXT_AEQE;\n+\t\tbreak;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+/**\n+ * hinic_aeq_poll_msg - poll one or continue aeqe, and call dedicated process\n+ * @eq: aeq of the chip\n+ * @timeout: 0   - poll all aeqe in eq, used in interrupt mode,\n+ *           > 0 - poll aeq until get aeqe with 'last' field set to 1,\n+ *           used in polling mode.\n+ * @param: customized parameter\n+ * Return: 0 - Success, EIO - poll timeout, ENODEV - swe not support\n+ **/\n+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param)\n+{\n+\tstruct hinic_aeq_elem *aeqe_pos;\n+\tenum hinic_aeq_type event;\n+\tu32 aeqe_desc = 0;\n+\tu16 i;\n+\tu8 size;\n+\tint done = HINIC_ERROR;\n+\tint err = -EFAULT;\n+\tunsigned long end;\n+\n+\tfor (i = 0; ((timeout == 0) && (i < eq->eq_len)) ||\n+\t     ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) {\n+\t\terr = -EIO;\n+\t\tend = jiffies + msecs_to_jiffies(timeout);\n+\t\tdo {\n+\t\t\taeqe_pos = GET_CURR_AEQ_ELEM(eq);\n+\t\t\trte_rmb();\n+\n+\t\t\t/* Data in HW is in Big endian Format */\n+\t\t\taeqe_desc = be32_to_cpu(aeqe_pos->desc);\n+\n+\t\t\t/* HW updates wrapped bit,\n+\t\t\t * when it adds eq element event\n+\t\t\t */\n+\t\t\tif (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED)\n+\t\t\t    != eq->wrapped) {\n+\t\t\t\terr = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tif (timeout != 0)\n+\t\t\t\trte_delay_ms(1);\n+\t\t} while (time_before(jiffies, end));\n+\n+\t\tif (err != HINIC_OK) /*poll time out*/\n+\t\t\tbreak;\n+\n+\t\tevent = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);\n+\t\tif (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"AEQ sw event not support %d\",\n+\t\t\t\tevent);\n+\t\t\treturn -ENODEV;\n+\n+\t\t} else {\n+\t\t\tsize = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);\n+\t\t\tdone = hinic_handle_aeqe(eq->hwdev, event,\n+\t\t\t\t\t\t aeqe_pos->aeqe_data,\n+\t\t\t\t\t\t size, param);\n+\t\t}\n+\n+\t\teq->cons_idx++;\n+\t\tif (eq->cons_idx == eq->eq_len) {\n+\t\t\teq->cons_idx = 0;\n+\t\t\teq->wrapped = !eq->wrapped;\n+\t\t}\n+\t}\n+\n+\teq_update_ci(eq);\n+\n+\treturn err;\n+}\n+\n+/**\n+ * hinic_aeq_poll_msg - init aeqs\n+ * @nic_dev: pmd nic device\n+ * Return: 0 - Success, Negative - failure\n+ **/\n+int hinic_comm_aeqs_init(struct hinic_nic_dev *nic_dev)\n+{\n+\tint rc;\n+\tu16 num_aeqs;\n+\tstruct irq_info aeq_irqs[HINIC_MAX_AEQS];\n+\n+\tnum_aeqs = HINIC_HWIF_NUM_AEQS(nic_dev->hwdev->hwif);\n+\tif (num_aeqs < HINIC_MAX_AEQS) {\n+\t\tPMD_DRV_LOG(ERR, \"Warning: PMD need %d AEQs, Chip have %d\",\n+\t\t\tHINIC_MAX_AEQS, num_aeqs);\n+\t\treturn HINIC_ERROR;\n+\t}\n+\n+\tmemset(aeq_irqs, 0, sizeof(aeq_irqs));\n+\trc = hinic_aeqs_init(nic_dev->hwdev, num_aeqs, aeq_irqs);\n+\tif (rc != HINIC_OK)\n+\t\tPMD_DRV_LOG(ERR, \"Initialize aeqs failed, rc: %d\", rc);\n+\n+\treturn rc;\n+}\n+\n+void hinic_comm_aeqs_free(struct hinic_nic_dev *nic_dev)\n+{\n+\thinic_aeqs_free(nic_dev->hwdev);\n+}\ndiff --git a/drivers/net/hinic/base/hinic_pmd_eqs.h b/drivers/net/hinic/base/hinic_pmd_eqs.h\nnew file mode 100644\nindex 000000000..73efb3ce6\n--- /dev/null\n+++ b/drivers/net/hinic/base/hinic_pmd_eqs.h\n@@ -0,0 +1,94 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#ifndef _HINIC_PMD_EQS_H_\n+#define _HINIC_PMD_EQS_H_\n+\n+#define HINIC_EQ_PAGE_SIZE\t\t0x00001000\n+\n+#define HINIC_MAX_AEQS\t\t\t4\n+\n+#define HINIC_EQ_MAX_PAGES\t\t8\n+\n+#define HINIC_AEQE_SIZE\t\t\t64\n+#define HINIC_CEQE_SIZE\t\t\t4\n+\n+#define HINIC_AEQE_DESC_SIZE\t\t4\n+#define HINIC_AEQE_DATA_SIZE\t\t\\\n+\t\t\t(HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)\n+\n+#define HINIC_DEFAULT_AEQ_LEN\t\t64\n+\n+#define\tHINIC_CEQ_ID_CMDQ\t\t0\n+\n+enum hinic_eq_type {\n+\tHINIC_AEQ,\n+\tHINIC_CEQ\n+};\n+\n+enum hinic_eq_intr_mode {\n+\tHINIC_INTR_MODE_ARMED,\n+\tHINIC_INTR_MODE_ALWAYS,\n+};\n+\n+enum hinic_eq_ci_arm_state {\n+\tHINIC_EQ_NOT_ARMED,\n+\tHINIC_EQ_ARMED,\n+};\n+\n+enum hinic_aeq_type {\n+\tHINIC_HW_INTER_INT = 0,\n+\tHINIC_MBX_FROM_FUNC = 1,\n+\tHINIC_MSG_FROM_MGMT_CPU = 2,\n+\tHINIC_API_RSP = 3,\n+\tHINIC_API_CHAIN_STS = 4,\n+\tHINIC_MBX_SEND_RSLT = 5,\n+\tHINIC_MAX_AEQ_EVENTS\n+};\n+\n+/* When continue aeqe, the event process must return done\n+ * for indicating data receive finish or not\n+ */\n+typedef int (*hinic_aeq_event_cb)(void *hw_dev, u8 *data, u8 size);\n+#define HINIC_RETRY_NUM\t(10)\n+\n+struct hinic_eq {\n+\tstruct hinic_hwdev\t\t*hwdev;\n+\tu16\t\t\t\tq_id;\n+\tenum hinic_eq_type\t\ttype;\n+\tu32\t\t\t\tpage_size;\n+\tu16\t\t\t\teq_len;\n+\n+\tu16\t\t\t\tcons_idx;\n+\tu16\t\t\t\twrapped;\n+\n+\tu16\t\t\t\telem_size;\n+\tu16\t\t\t\tnum_pages;\n+\tu32\t\t\t\tnum_elem_in_pg;\n+\n+\tstruct irq_info\t\t\teq_irq;\n+\n+\tdma_addr_t\t\t\t*dma_addr;\n+\tu8\t\t\t\t**virt_addr;\n+\n+\tu16\t\t\t\tpoll_retry_nr;\n+};\n+\n+struct hinic_aeq_elem {\n+\tu8\taeqe_data[HINIC_AEQE_DATA_SIZE];\n+\tu32\tdesc;\n+};\n+\n+struct hinic_aeqs {\n+\tstruct hinic_hwdev\t*hwdev;\n+\tu16\t\t\tpoll_retry_nr;\n+\n+\tstruct hinic_eq\t\taeq[HINIC_MAX_AEQS];\n+\tu16\t\t\tnum_aeqs;\n+};\n+\n+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);\n+int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param);\n+\n+#endif /* _HINIC_PMD_EQS_H_ */\n",
    "prefixes": [
        "v3",
        "05/11"
    ]
}