get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54479/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54479,
    "url": "http://patches.dpdk.org/api/patches/54479/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/edbb1ec50507d75ea920f8ccaf80d3d617e40a42.1559818024.git.xuanziyang2@huawei.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<edbb1ec50507d75ea920f8ccaf80d3d617e40a42.1559818024.git.xuanziyang2@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/edbb1ec50507d75ea920f8ccaf80d3d617e40a42.1559818024.git.xuanziyang2@huawei.com",
    "date": "2019-06-06T11:18:22",
    "name": "[v4,09/11] net/hinic: add RX module",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "eff91909310f05e1a230b2a5ba2280a79a838ba7",
    "submitter": {
        "id": 1321,
        "url": "http://patches.dpdk.org/api/people/1321/?format=api",
        "name": "Ziyang Xuan",
        "email": "xuanziyang2@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/edbb1ec50507d75ea920f8ccaf80d3d617e40a42.1559818024.git.xuanziyang2@huawei.com/mbox/",
    "series": [
        {
            "id": 4924,
            "url": "http://patches.dpdk.org/api/series/4924/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4924",
            "date": "2019-06-06T11:04:33",
            "name": "A new net PMD - hinic",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/4924/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54479/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/54479/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 76F421B958;\n\tThu,  6 Jun 2019 13:07:03 +0200 (CEST)",
            "from huawei.com (szxga07-in.huawei.com [45.249.212.35])\n\tby dpdk.org (Postfix) with ESMTP id 251741B946\n\tfor <dev@dpdk.org>; Thu,  6 Jun 2019 13:07:01 +0200 (CEST)",
            "from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58])\n\tby Forcepoint Email with ESMTP id E8A28F6AFE056F0625A0;\n\tThu,  6 Jun 2019 19:06:59 +0800 (CST)",
            "from tester_149.localdomain (10.175.119.39) by\n\tDGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP\n\tServer id 14.3.439.0; Thu, 6 Jun 2019 19:06:52 +0800"
        ],
        "From": "Ziyang Xuan <xuanziyang2@huawei.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,\n\t<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,\n\t<stephen@networkplumber.org>, <luoxianjun@huawei.com>, Ziyang Xuan\n\t<xuanziyang2@huawei.com>",
        "Date": "Thu, 6 Jun 2019 19:18:22 +0800",
        "Message-ID": "<edbb1ec50507d75ea920f8ccaf80d3d617e40a42.1559818024.git.xuanziyang2@huawei.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1559818024.git.xuanziyang2@huawei.com>",
        "References": "<cover.1559818024.git.xuanziyang2@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.175.119.39]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v4 09/11] net/hinic: add RX module",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add code for RX module.\n\nSigned-off-by: Ziyang Xuan <xuanziyang2@huawei.com>\n---\n drivers/net/hinic/Makefile       |   2 +-\n drivers/net/hinic/hinic_pmd_rx.c | 902 +++++++++++++++++++++++++++++++\n drivers/net/hinic/meson.build    |   2 +-\n 3 files changed, 904 insertions(+), 2 deletions(-)\n create mode 100644 drivers/net/hinic/hinic_pmd_rx.c",
    "diff": "diff --git a/drivers/net/hinic/Makefile b/drivers/net/hinic/Makefile\nindex d0f955ce1..27aba4085 100644\n--- a/drivers/net/hinic/Makefile\n+++ b/drivers/net/hinic/Makefile\n@@ -55,7 +55,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c\n \n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_dpdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c\n-#SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c\n+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c\n #SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_tx.c\n \n # this lib depends upon:\ndiff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c\nnew file mode 100644\nindex 000000000..9d6237fc0\n--- /dev/null\n+++ b/drivers/net/hinic/hinic_pmd_rx.c\n@@ -0,0 +1,902 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_mbuf.h>\n+#ifdef __ARM64_NEON__\n+#include <arm_neon.h>\n+#endif\n+\n+#include \"hinic_pmd_ethdev.h\"\n+#include \"hinic_pmd_rx.h\"\n+\n+#ifdef HINIC_XSTAT_RXBUF_INFO\n+static void hinic_rxq_buffer_done_count(struct hinic_rxq *rxq)\n+{\n+\tu16 sw_ci, avail_pkts = 0, hit_done = 0, cqe_hole = 0;\n+\tu32 status;\n+\tvolatile struct hinic_rq_cqe *rx_cqe;\n+\n+\tfor (sw_ci = 0; sw_ci < rxq->q_depth; sw_ci++) {\n+\t\trx_cqe = &rxq->rx_cqe[sw_ci];\n+\n+\t\t/* test current ci is done */\n+\t\tstatus = rx_cqe->status;\n+\t\tif (!HINIC_GET_RX_DONE_BE(status)) {\n+\t\t\tif (hit_done) {\n+\t\t\t\tcqe_hole++;\n+\t\t\t\thit_done = 0;\n+\t\t\t}\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tavail_pkts++;\n+\t\thit_done = 1;\n+\t}\n+\n+\trxq->rxq_stats.rx_avail = avail_pkts;\n+\trxq->rxq_stats.rx_hole = cqe_hole;\n+}\n+#endif\n+\n+void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)\n+{\n+\tif (!rxq || !stats)\n+\t\treturn;\n+\n+#ifdef HINIC_XSTAT_RXBUF_INFO\n+\trxq->rxq_stats.rx_mbuf = (rxq->q_depth)\n+\t\t\t\t- HINIC_GET_RQ_FREE_WQEBBS(rxq);\n+\n+\thinic_rxq_buffer_done_count(rxq);\n+#endif\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\trxq->rxq_stats.left_mbuf = rxq->rxq_stats.alloc_mbuf\n+\t\t\t\t- rxq->rxq_stats.free_mbuf;\n+#endif\n+\tmemcpy(stats, &rxq->rxq_stats, sizeof(rxq->rxq_stats));\n+}\n+\n+void hinic_rxq_stats_reset(struct hinic_rxq *rxq)\n+{\n+\tstruct hinic_rxq_stats *rxq_stats;\n+\n+\tif (rxq == NULL)\n+\t\treturn;\n+\n+\trxq_stats = &rxq->rxq_stats;\n+\tmemset(rxq_stats, 0, sizeof(*rxq_stats));\n+}\n+\n+/* mbuf alloc and free */\n+static inline struct rte_mbuf *hinic_rte_rxmbuf_alloc(struct rte_mempool *mp)\n+{\n+\tstruct rte_mbuf *m;\n+\n+\tm  = rte_mbuf_raw_alloc(mp);\n+\treturn m;\n+}\n+\n+static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq)\n+{\n+\tsize_t cqe_mem_size;\n+\n+\t/* allocate continuous cqe memory for saving number of memory zone */\n+\tcqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;\n+\trxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev, cqe_mem_size,\n+\t\t\t\t\t\t   &rxq->cqe_start_paddr,\n+\t\t\t\t\t\t   GFP_KERNEL);\n+\tif (!rxq->cqe_start_vaddr) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate cqe dma memory failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->rx_cqe = (struct hinic_rq_cqe *)rxq->cqe_start_vaddr;\n+\n+\treturn HINIC_OK;\n+}\n+\n+static void hinic_rx_free_cqe(struct hinic_rxq *rxq)\n+{\n+\tsize_t cqe_mem_size;\n+\n+\tcqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;\n+\tdma_free_coherent(rxq->nic_dev, cqe_mem_size,\n+\t\t\t  rxq->cqe_start_vaddr,\n+\t\t\t  rxq->cqe_start_paddr);\n+\trxq->cqe_start_vaddr = NULL;\n+}\n+\n+static int hinic_rx_fill_wqe(struct hinic_rxq *rxq)\n+{\n+\tstruct hinic_nic_dev *nic_dev = rxq->nic_dev;\n+\tstruct hinic_rq_wqe *rq_wqe;\n+\tdma_addr_t buf_dma_addr, cqe_dma_addr;\n+\tu16 pi = 0;\n+\tint rq_wqe_len;\n+\tint i;\n+\n+\tbuf_dma_addr = 0;\n+\tcqe_dma_addr = rxq->cqe_start_paddr;\n+\tfor (i = 0; i < rxq->q_depth; i++) {\n+\t\trq_wqe = (struct hinic_rq_wqe *)\n+\t\t\thinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);\n+\t\tif (!rq_wqe) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Get rq wqe failed\");\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\thinic_prepare_rq_wqe(rq_wqe, pi, buf_dma_addr, cqe_dma_addr);\n+\t\tcqe_dma_addr +=  sizeof(struct hinic_rq_cqe);\n+\n+\t\trq_wqe_len = sizeof(struct hinic_rq_wqe);\n+\t\thinic_cpu_to_be32(rq_wqe, rq_wqe_len);\n+\t}\n+\n+\thinic_return_rq_wqe(nic_dev->hwdev, rxq->q_id, i);\n+\n+\treturn i;\n+}\n+\n+/* alloc cqe and prepare rqe */\n+int hinic_setup_rx_resources(struct hinic_rxq *rxq)\n+{\n+\tu64 rx_info_sz;\n+\tint err, pkts;\n+\n+\trx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);\n+\trxq->rx_info = kzalloc_aligned(rx_info_sz, GFP_KERNEL);\n+\tif (!rxq->rx_info)\n+\t\treturn -ENOMEM;\n+\n+\terr = hinic_rx_alloc_cqe(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Allocate rx cqe failed\");\n+\t\tgoto rx_cqe_err;\n+\t}\n+\n+\tpkts = hinic_rx_fill_wqe(rxq);\n+\tif (pkts != rxq->q_depth) {\n+\t\tPMD_DRV_LOG(ERR, \"Fill rx wqe failed\");\n+\t\terr = -ENOMEM;\n+\t\tgoto rx_fill_err;\n+\t}\n+\n+\treturn 0;\n+\n+rx_fill_err:\n+\thinic_rx_free_cqe(rxq);\n+\n+rx_cqe_err:\n+\tkfree(rxq->rx_info);\n+\trxq->rx_info = NULL;\n+\n+\treturn err;\n+}\n+\n+void hinic_free_rx_resources(struct hinic_rxq *rxq)\n+{\n+\tif (rxq->rx_info == NULL)\n+\t\treturn;\n+\n+\thinic_rx_free_cqe(rxq);\n+\tkfree(rxq->rx_info);\n+\trxq->rx_info = NULL;\n+}\n+\n+void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)\n+{\n+\tu16 q_id;\n+\tstruct hinic_nic_dev *nic_dev =\n+\t\t\t\tHINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);\n+\n+\tfor (q_id = 0; q_id < nic_dev->num_rq; q_id++) {\n+\t\teth_dev->data->rx_queues[q_id] = NULL;\n+\n+\t\tif (nic_dev->rxqs[q_id] == NULL)\n+\t\t\tcontinue;\n+\n+\t\thinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);\n+\t\thinic_free_rx_resources(nic_dev->rxqs[q_id]);\n+\t\tkfree(nic_dev->rxqs[q_id]);\n+\t\tnic_dev->rxqs[q_id] = NULL;\n+\t}\n+}\n+\n+void hinic_free_all_rx_mbuf(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct hinic_nic_dev *nic_dev =\n+\t\t\t\tHINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);\n+\tu16 q_id;\n+\n+\tfor (q_id = 0; q_id < nic_dev->num_rq; q_id++)\n+\t\thinic_free_all_rx_skbs(nic_dev->rxqs[q_id]);\n+}\n+\n+static void hinic_recv_jumbo_pkt(struct hinic_rxq *rxq,\n+\t\t\t\t struct rte_mbuf *head_skb,\n+\t\t\t\t u32 remain_pkt_len)\n+{\n+\tstruct hinic_nic_dev *nic_dev = rxq->nic_dev;\n+\tstruct rte_mbuf *cur_mbuf, *rxm = NULL;\n+\tstruct hinic_rx_info *rx_info;\n+\tu16 sw_ci, rx_buf_len = rxq->buf_len;\n+\tu32 pkt_len;\n+\n+\twhile (remain_pkt_len > 0) {\n+\t\tsw_ci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);\n+\t\trx_info = &rxq->rx_info[sw_ci];\n+\n+\t\thinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);\n+\n+\t\tpkt_len = remain_pkt_len > rx_buf_len ?\n+\t\t\trx_buf_len : remain_pkt_len;\n+\t\tremain_pkt_len -= pkt_len;\n+\n+\t\tcur_mbuf = rx_info->mbuf;\n+\t\tcur_mbuf->data_len = (u16)pkt_len;\n+\t\tcur_mbuf->next = NULL;\n+\n+\t\thead_skb->pkt_len += cur_mbuf->data_len;\n+\t\thead_skb->nb_segs++;\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\t\trxq->rxq_stats.free_mbuf++;\n+#endif\n+\n+\t\tif (!rxm)\n+\t\t\thead_skb->next = cur_mbuf;\n+\t\telse\n+\t\t\trxm->next = cur_mbuf;\n+\n+\t\trxm = cur_mbuf;\n+\t}\n+}\n+\n+static void hinic_rss_deinit(struct hinic_nic_dev *nic_dev)\n+{\n+\tu8 prio_tc[HINIC_DCB_UP_MAX] = {0};\n+\t(void)hinic_rss_cfg(nic_dev->hwdev, 0,\n+\t\t\t    nic_dev->rss_tmpl_idx, 0, prio_tc);\n+}\n+\n+static int hinic_rss_key_init(struct hinic_nic_dev *nic_dev,\n+\t\t\t      struct rte_eth_rss_conf *rss_conf)\n+{\n+\tu8 default_rss_key[HINIC_RSS_KEY_SIZE] = {\n+\t\t\t 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,\n+\t\t\t 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,\n+\t\t\t 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,\n+\t\t\t 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,\n+\t\t\t 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa};\n+\tu8 hashkey[HINIC_RSS_KEY_SIZE] = {0};\n+\tu8 tmpl_idx = nic_dev->rss_tmpl_idx;\n+\n+\tif (rss_conf->rss_key == NULL)\n+\t\tmemcpy(hashkey, default_rss_key, HINIC_RSS_KEY_SIZE);\n+\telse\n+\t\tmemcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);\n+\n+\treturn hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);\n+}\n+\n+static void hinic_fill_rss_type(struct nic_rss_type *rss_type,\n+\t\t\t\tstruct rte_eth_rss_conf *rss_conf)\n+{\n+\tu64 rss_hf = rss_conf->rss_hf;\n+\n+\trss_type->ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;\n+\trss_type->tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;\n+\trss_type->ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;\n+\trss_type->ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;\n+\trss_type->tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;\n+\trss_type->tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;\n+\trss_type->udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;\n+\trss_type->udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;\n+}\n+\n+static void hinic_fillout_indir_tbl(struct hinic_nic_dev *nic_dev, u32 *indir)\n+{\n+\tu8 rss_queue_count = nic_dev->num_rss;\n+\tint i = 0, j;\n+\n+\tif (rss_queue_count == 0) {\n+\t\t/* delete q_id from indir tbl */\n+\t\tfor (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)\n+\t\t\tindir[i] = 0xFF;\t/* Invalid value in indir tbl */\n+\t} else {\n+\t\twhile (i < HINIC_RSS_INDIR_SIZE)\n+\t\t\tfor (j = 0; (j < rss_queue_count) &&\n+\t\t\t     (i < HINIC_RSS_INDIR_SIZE); j++)\n+\t\t\t\tindir[i++] = nic_dev->rx_queue_list[j];\n+\t}\n+}\n+\n+static int hinic_rss_init(struct hinic_nic_dev *nic_dev,\n+\t\t\t  __attribute__((unused)) u8 *rq2iq_map,\n+\t\t\t  struct rte_eth_rss_conf *rss_conf)\n+{\n+\tu32 indir_tbl[HINIC_RSS_INDIR_SIZE] = {0};\n+\tstruct nic_rss_type rss_type = {0};\n+\tu8 prio_tc[HINIC_DCB_UP_MAX] = {0};\n+\tu8 tmpl_idx = 0xFF, num_tc = 0;\n+\tint err;\n+\n+\ttmpl_idx = nic_dev->rss_tmpl_idx;\n+\n+\terr = hinic_rss_key_init(nic_dev, rss_conf);\n+\tif (err)\n+\t\treturn err;\n+\n+\tif (!nic_dev->rss_indir_flag) {\n+\t\thinic_fillout_indir_tbl(nic_dev, indir_tbl);\n+\t\terr = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx,\n+\t\t\t\t\t      indir_tbl);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\thinic_fill_rss_type(&rss_type, rss_conf);\n+\terr = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);\n+\tif (err)\n+\t\treturn err;\n+\n+\terr = hinic_rss_set_hash_engine(nic_dev->hwdev, tmpl_idx,\n+\t\t\t\t\tHINIC_RSS_HASH_ENGINE_TYPE_TOEP);\n+\tif (err)\n+\t\treturn err;\n+\n+\treturn hinic_rss_cfg(nic_dev->hwdev, 1, tmpl_idx, num_tc, prio_tc);\n+}\n+\n+static void\n+hinic_add_rq_to_rx_queue_list(struct hinic_nic_dev *nic_dev, u16 queue_id)\n+{\n+\tu8 rss_queue_count = nic_dev->num_rss;\n+\n+\tRTE_ASSERT(rss_queue_count <= (RTE_DIM(nic_dev->rx_queue_list) - 1));\n+\n+\tnic_dev->rx_queue_list[rss_queue_count] = queue_id;\n+\tnic_dev->num_rss++;\n+}\n+\n+/**\n+ * hinic_setup_num_qps - determine num_qps from rss_tmpl_id\n+ * @nic_dev: pointer to the private ethernet device\n+ * Return: 0 on Success, error code otherwise.\n+ **/\n+static int hinic_setup_num_qps(struct hinic_nic_dev *nic_dev)\n+{\n+\tint err, i;\n+\n+\tif (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {\n+\t\tnic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;\n+\t\tnic_dev->num_rss = 0;\n+\t\tif (nic_dev->num_rq > 1) {\n+\t\t\t/* get rss template id */\n+\t\t\terr = hinic_rss_template_alloc(nic_dev->hwdev,\n+\t\t\t\t\t\t       &nic_dev->rss_tmpl_idx);\n+\t\t\tif (err) {\n+\t\t\t\tPMD_DRV_LOG(WARNING, \"Alloc rss template failed\");\n+\t\t\t\treturn err;\n+\t\t\t}\n+\t\t\tnic_dev->flags |= ETH_MQ_RX_RSS_FLAG;\n+\t\t\tfor (i = 0; i < nic_dev->num_rq; i++)\n+\t\t\t\thinic_add_rq_to_rx_queue_list(nic_dev, i);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void hinic_destroy_num_qps(struct hinic_nic_dev *nic_dev)\n+{\n+\tif (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {\n+\t\tif (hinic_rss_template_free(nic_dev->hwdev,\n+\t\t\t\t\t    nic_dev->rss_tmpl_idx))\n+\t\t\tPMD_DRV_LOG(WARNING, \"Free rss template failed\");\n+\n+\t\tnic_dev->flags &= ~ETH_MQ_RX_RSS_FLAG;\n+\t}\n+}\n+\n+static int hinic_config_mq_rx_rss(struct hinic_nic_dev *nic_dev, bool on)\n+{\n+\tint ret = 0;\n+\n+\tif (on) {\n+\t\tret = hinic_setup_num_qps(nic_dev);\n+\t\tif (ret)\n+\t\t\tPMD_DRV_LOG(ERR, \"Setup num_qps failed\");\n+\t} else {\n+\t\thinic_destroy_num_qps(nic_dev);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+int hinic_config_mq_mode(struct rte_eth_dev *dev, bool on)\n+{\n+\tstruct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);\n+\tstruct rte_eth_conf *dev_conf = &dev->data->dev_conf;\n+\tint ret = 0;\n+\n+\tswitch (dev_conf->rxmode.mq_mode) {\n+\tcase ETH_MQ_RX_RSS:\n+\t\tret = hinic_config_mq_rx_rss(nic_dev, on);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+int hinic_rx_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);\n+\tstruct rte_eth_rss_conf rss_conf =\n+\t\tdev->data->dev_conf.rx_adv_conf.rss_conf;\n+\tu32 csum_en = 0;\n+\tint err;\n+\n+\tif (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {\n+\t\tif (rss_conf.rss_hf == 0) {\n+\t\t\trss_conf.rss_hf = HINIC_RSS_OFFLOAD_ALL;\n+\t\t} else if ((rss_conf.rss_hf & HINIC_RSS_OFFLOAD_ALL) == 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Do not support rss offload all\");\n+\t\t\tgoto rss_config_err;\n+\t\t}\n+\n+\t\terr = hinic_rss_init(nic_dev, NULL, &rss_conf);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Init rss failed\");\n+\t\t\tgoto rss_config_err;\n+\t\t}\n+\t}\n+\n+\t/* Enable both L3/L4 rx checksum offload */\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM)\n+\t\tcsum_en = HINIC_RX_CSUM_OFFLOAD_EN;\n+\n+\terr = hinic_set_rx_csum_offload(nic_dev->hwdev, csum_en);\n+\tif (err)\n+\t\tgoto rx_csum_ofl_err;\n+\n+\treturn 0;\n+\n+rx_csum_ofl_err:\n+rss_config_err:\n+\thinic_destroy_num_qps(nic_dev);\n+\n+\treturn HINIC_ERROR;\n+}\n+\n+void hinic_rx_remove_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);\n+\n+\tif (nic_dev->flags & ETH_MQ_RX_RSS_FLAG) {\n+\t\thinic_rss_deinit(nic_dev);\n+\t\thinic_destroy_num_qps(nic_dev);\n+\t}\n+}\n+\n+void hinic_free_all_rx_skbs(struct hinic_rxq *rxq)\n+{\n+\tstruct hinic_nic_dev *nic_dev = rxq->nic_dev;\n+\tstruct hinic_rx_info *rx_info;\n+\tint free_wqebbs =\n+\t\thinic_get_rq_free_wqebbs(nic_dev->hwdev, rxq->q_id) + 1;\n+\tvolatile struct hinic_rq_cqe *rx_cqe;\n+\tu16 ci;\n+\n+\twhile (free_wqebbs++ < rxq->q_depth) {\n+\t\tci = hinic_get_rq_local_ci(nic_dev->hwdev, rxq->q_id);\n+\n+\t\trx_cqe = &rxq->rx_cqe[ci];\n+\n+\t\t/* clear done bit */\n+\t\trx_cqe->status = 0;\n+\n+\t\trx_info = &rxq->rx_info[ci];\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\t\thinic_rx_free_mbuf(rxq, rx_info->mbuf);\n+#else\n+\t\thinic_rx_free_mbuf(rx_info->mbuf);\n+#endif\n+\t\trx_info->mbuf = NULL;\n+\n+\t\thinic_update_rq_local_ci(nic_dev->hwdev, rxq->q_id, 1);\n+\t}\n+}\n+\n+/* performance: byteorder swap m128i */\n+static inline void hinic_rq_cqe_be_to_cpu32(void *dst_le32,\n+\t\t\t\t\t    volatile void *src_be32)\n+{\n+#ifndef __ARM64_NEON__\n+\tvolatile __m128i *wqe_be = (volatile __m128i *)src_be32;\n+\t__m128i *wqe_le = (__m128i *)dst_le32;\n+\t__m128i shuf_mask =  _mm_set_epi8(12, 13, 14, 15, 8, 9, 10,\n+\t\t\t\t\t  11, 4, 5, 6, 7, 0, 1, 2, 3);\n+\n+\t/* l2nic just use first 128 bits */\n+\twqe_le[0] = _mm_shuffle_epi8(wqe_be[0], shuf_mask);\n+#else\n+\tvolatile uint8x16_t *wqe_be = (volatile uint8x16_t *)src_be32;\n+\tuint8x16_t *wqe_le = (uint8x16_t *)dst_le32;\n+\tconst uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10,\n+\t\t\t\t\t9, 8, 15, 14, 13, 12};\n+\n+\t/* l2nic just use first 128 bits */\n+\twqe_le[0] = vqtbl1q_u8(wqe_be[0], shuf_mask);\n+#endif\n+}\n+\n+static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type,\n+\t\t\t\t\t uint32_t cqe_hass_val,\n+\t\t\t\t\t uint32_t *rss_hash)\n+{\n+\tuint32_t rss_type;\n+\n+\trss_type = HINIC_GET_RSS_TYPES(offload_type);\n+\tif (likely(rss_type != 0)) {\n+\t\t*rss_hash = cqe_hass_val;\n+\t\treturn PKT_RX_RSS_HASH;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)\n+{\n+\tuint32_t checksum_err;\n+\tuint64_t flags;\n+\n+\t/* most case checksum is ok */\n+\tchecksum_err = HINIC_GET_RX_CSUM_ERR(status);\n+\tif (likely(checksum_err == 0))\n+\t\treturn (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);\n+\n+\t/* If BYPASS bit set, all other status indications should be ignored */\n+\tif (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))\n+\t\treturn PKT_RX_IP_CKSUM_UNKNOWN;\n+\n+\tflags = 0;\n+\n+\t/* IP checksum error */\n+\tif (HINIC_CSUM_ERR_IP(checksum_err))\n+\t\tflags |= PKT_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_IP_CKSUM_GOOD;\n+\n+\t/* L4 checksum error */\n+\tif (HINIC_CSUM_ERR_L4(checksum_err))\n+\t\tflags |= PKT_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_L4_CKSUM_GOOD;\n+\n+\tif (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))\n+\t\tflags = PKT_RX_L4_CKSUM_NONE;\n+\n+\trxq->rxq_stats.errors++;\n+\n+\treturn flags;\n+}\n+\n+static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,\n+\t\t\t\t     uint16_t *vlan_tci)\n+{\n+\tuint16_t vlan_tag;\n+\n+\tvlan_tag = HINIC_GET_RX_VLAN_TAG(vlan_len);\n+\tif (!HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) || 0 == vlan_tag) {\n+\t\t*vlan_tci = 0;\n+\t\treturn 0;\n+\t}\n+\n+\t*vlan_tci = vlan_tag;\n+\n+\treturn PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+}\n+\n+static inline uint64_t hinic_rx_pkt_type(uint32_t offload_type)\n+{\n+\tuint32_t pkt_type, pkt_idx;\n+\tstatic const uint32_t pkt_type_table[RQ_CQE_PKT_TYPES_L2_MASK + 1]\n+\t__rte_cache_aligned = {\n+\t\t[3] =  RTE_PTYPE_L3_IPV4,\n+\t\t[4] =  RTE_PTYPE_L3_IPV4_EXT,\n+\t\t[5] =  RTE_PTYPE_L4_FRAG,\n+\t\t[7] =  RTE_PTYPE_L3_IPV6,\n+\t\t[9] =  RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,\n+\t\t[10] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,\n+\t\t[11] = RTE_PTYPE_TUNNEL_VXLAN,\n+\t\t[13] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[14] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[15] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[16] = RTE_PTYPE_TUNNEL_NVGRE,\n+\t\t[65] = RTE_PTYPE_L4_ICMP,\n+\t\t[66] = RTE_PTYPE_L4_ICMP,\n+\t\t[76] = RTE_PTYPE_L2_ETHER_LLDP,\n+\t\t[81] = RTE_PTYPE_L2_ETHER_ARP,\n+\t\t/* All others reserved */\n+\t};\n+\tpkt_idx = HINIC_GET_PKT_TYPES(offload_type);\n+\n+\t/* Unknown type */\n+\tif (unlikely(pkt_idx == 0))\n+\t\treturn RTE_PTYPE_UNKNOWN;\n+\n+\t/* if hardware report index not correct set l2 ether as default */\n+\tpkt_type = RTE_PTYPE_L2_ETHER;\n+\tpkt_type |= pkt_type_table[HINIC_PKT_TYPES_L2(pkt_idx)];\n+\n+\treturn pkt_type;\n+}\n+\n+static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,\n+\t\t\t\t\t   struct rte_mbuf **mbufs,\n+\t\t\t\t\t   u32 exp_mbuf_cnt)\n+{\n+\tint rc;\n+\tu32 avail_cnt;\n+\n+\trc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, exp_mbuf_cnt);\n+\tif (likely(rc == HINIC_OK)) {\n+\t\tavail_cnt = exp_mbuf_cnt;\n+\t} else {\n+\t\tavail_cnt = 0;\n+\t\trxq->rxq_stats.rx_nombuf += exp_mbuf_cnt;\n+\t}\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\trxq->rxq_stats.alloc_mbuf += avail_cnt;\n+#endif\n+\treturn avail_cnt;\n+}\n+\n+#ifdef HINIC_XSTAT_MBUF_USE\n+void hinic_rx_free_mbuf(struct hinic_rxq *rxq, struct rte_mbuf *m)\n+{\n+\trte_pktmbuf_free(m);\n+\trxq->rxq_stats.free_mbuf++;\n+}\n+#else\n+void hinic_rx_free_mbuf(struct rte_mbuf *m)\n+{\n+\trte_pktmbuf_free(m);\n+}\n+#endif\n+\n+static struct rte_mbuf *hinic_rx_alloc_mbuf(struct hinic_rxq *rxq,\n+\t\t\t\t\tdma_addr_t *dma_addr)\n+{\n+\tstruct rte_mbuf *mbuf;\n+\n+\tmbuf = hinic_rte_rxmbuf_alloc(rxq->mb_pool);\n+\tif (unlikely(!mbuf))\n+\t\treturn NULL;\n+\n+\t*dma_addr = rte_mbuf_data_iova_default(mbuf);\n+\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\trxq->rxq_stats.alloc_mbuf++;\n+#endif\n+\n+\treturn mbuf;\n+}\n+\n+static inline void hinic_rearm_rxq_mbuf(struct hinic_rxq *rxq)\n+{\n+\tu16 pi;\n+\tu32 i, free_wqebbs, rearm_wqebbs, exp_wqebbs;\n+\tdma_addr_t dma_addr;\n+\tstruct hinic_rq_wqe *rq_wqe;\n+\tstruct rte_mbuf **rearm_mbufs;\n+\n+\t/* check free wqebb fo rearm */\n+\tfree_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);\n+\tif (unlikely(free_wqebbs < rxq->rx_free_thresh))\n+\t\treturn;\n+\n+\t/* get rearm mbuf array */\n+\tpi = HINIC_GET_RQ_LOCAL_PI(rxq);\n+\trearm_mbufs = (struct rte_mbuf **)(&rxq->rx_info[pi]);\n+\n+\t/* check rxq free wqebbs turn around */\n+\texp_wqebbs = rxq->q_depth - pi;\n+\tif (free_wqebbs < exp_wqebbs)\n+\t\texp_wqebbs = free_wqebbs;\n+\n+\t/* alloc mbuf in bulk */\n+\trearm_wqebbs = hinic_rx_alloc_mbuf_bulk(rxq, rearm_mbufs, exp_wqebbs);\n+\tif (unlikely(rearm_wqebbs == 0))\n+\t\treturn;\n+\n+\t/* rearm rx mbuf */\n+\trq_wqe = (struct hinic_rq_wqe *)WQ_WQE_ADDR(rxq->wq, (u32)pi);\n+\tfor (i = 0; i < rearm_wqebbs; i++) {\n+\t\tdma_addr = rte_mbuf_data_iova_default(rearm_mbufs[i]);\n+\t\trq_wqe->buf_desc.addr_high =\n+\t\t\t\t\tcpu_to_be32(upper_32_bits(dma_addr));\n+\t\trq_wqe->buf_desc.addr_low =\n+\t\t\t\t\tcpu_to_be32(lower_32_bits(dma_addr));\n+\t\trq_wqe++;\n+\t}\n+\trxq->wq->prod_idx += rearm_wqebbs;\n+\trxq->wq->delta -= rearm_wqebbs;\n+\n+\t/* update rq hw_pi */\n+\trte_wmb();\n+\tHINIC_UPDATE_RQ_HW_PI(rxq, pi + rearm_wqebbs);\n+}\n+\n+void hinic_rx_alloc_pkts(struct hinic_rxq *rxq)\n+{\n+\tstruct hinic_nic_dev *nic_dev = rxq->nic_dev;\n+\tstruct hinic_rq_wqe *rq_wqe;\n+\tstruct hinic_rx_info *rx_info;\n+\tstruct rte_mbuf *mb;\n+\tdma_addr_t dma_addr;\n+\tu16 pi = 0;\n+\tint i, free_wqebbs;\n+\n+\tfree_wqebbs = HINIC_GET_RQ_FREE_WQEBBS(rxq);\n+\tfor (i = 0; i < free_wqebbs; i++) {\n+\t\tmb = hinic_rx_alloc_mbuf(rxq, &dma_addr);\n+\t\tif (unlikely(!mb)) {\n+\t\t\trxq->rxq_stats.rx_nombuf++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\trq_wqe = (struct hinic_rq_wqe *)\n+\t\t\thinic_get_rq_wqe(nic_dev->hwdev, rxq->q_id, &pi);\n+\t\tif (unlikely(!rq_wqe)) {\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\t\t\thinic_rx_free_mbuf(rxq, mb);\n+#else\n+\t\t\thinic_rx_free_mbuf(mb);\n+#endif\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* fill buffer address only */\n+\t\trq_wqe->buf_desc.addr_high =\n+\t\t\t\tcpu_to_be32(upper_32_bits(dma_addr));\n+\t\trq_wqe->buf_desc.addr_low =\n+\t\t\t\tcpu_to_be32(lower_32_bits(dma_addr));\n+\n+\t\trx_info = &rxq->rx_info[pi];\n+\t\trx_info->mbuf = mb;\n+\t}\n+\n+\tif (likely(i > 0)) {\n+\t\trte_wmb();\n+\t\tHINIC_UPDATE_RQ_HW_PI(rxq, pi + 1);\n+\t}\n+}\n+\n+u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)\n+{\n+\tstruct rte_mbuf *rxm;\n+\tstruct hinic_rxq *rxq = (struct hinic_rxq *)rx_queue;\n+\tstruct hinic_rx_info *rx_info;\n+\tvolatile struct hinic_rq_cqe *rx_cqe;\n+\tu16 rx_buf_len, pkts = 0;\n+\tu16 sw_ci, ci_mask, wqebb_cnt = 0;\n+\tu32 pkt_len, status, vlan_len;\n+\tu64 rx_bytes = 0;\n+#ifdef HINIC_XSTAT_PROF_RX\n+\tuint64_t t1 = rte_get_tsc_cycles();\n+\tuint64_t t2;\n+#endif\n+\tstruct hinic_rq_cqe cqe;\n+\tu32 offload_type, rss_hash;\n+\n+\trx_buf_len = rxq->buf_len;\n+\n+\t/* 1. get polling start ci */\n+\tci_mask = HINIC_GET_RQ_WQE_MASK(rxq);\n+\tsw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);\n+\n+\twhile (pkts < nb_pkts) {\n+\t\t /* 2. current ci is done */\n+\t\trx_cqe = &rxq->rx_cqe[sw_ci];\n+\t\tstatus = rx_cqe->status;\n+\t\tif (!HINIC_GET_RX_DONE_BE(status))\n+\t\t\tbreak;\n+\n+\t\t/* read other cqe member after status */\n+\t\trte_rmb();\n+\n+\t\t/* convert cqe and get packet length */\n+\t\thinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe);\n+\t\tvlan_len = cqe.vlan_len;\n+\n+\t\trx_info = &rxq->rx_info[sw_ci];\n+\t\trxm = rx_info->mbuf;\n+\n+\t\t/* 3. next ci point and prefetch */\n+\t\tsw_ci++;\n+\t\tsw_ci &= ci_mask;\n+\n+\t\t/* prefetch next mbuf first 64B */\n+\t\trte_prefetch0(rxq->rx_info[sw_ci].mbuf);\n+\n+\t\t/* 4. jumbo frame process */\n+\t\tpkt_len = HINIC_GET_RX_PKT_LEN(vlan_len);\n+\t\tif (likely(pkt_len <= rx_buf_len)) {\n+\t\t\trxm->data_len = pkt_len;\n+\t\t\trxm->pkt_len = pkt_len;\n+\t\t\twqebb_cnt++;\n+\t\t} else {\n+\t\t\trxm->data_len = rx_buf_len;\n+\t\t\trxm->pkt_len = rx_buf_len;\n+\n+\t\t\t/* if jumbo use multi-wqebb update ci,\n+\t\t\t * recv_jumbo_pkt will also update ci\n+\t\t\t */\n+\t\t\tHINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt + 1);\n+\t\t\twqebb_cnt = 0;\n+\t\t\thinic_recv_jumbo_pkt(rxq, rxm, pkt_len - rx_buf_len);\n+\t\t\tsw_ci = HINIC_GET_RQ_LOCAL_CI(rxq);\n+\t\t}\n+\n+\t\t/* 5. vlan/checksum/rss/pkt_type/gro offload */\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->port = rxq->port_id;\n+\t\toffload_type = cqe.offload_type;\n+\n+\t\t/* vlan offload */\n+\t\trxm->ol_flags |= hinic_rx_vlan(offload_type, vlan_len,\n+\t\t\t\t\t       &rxm->vlan_tci);\n+\n+\t\t/* checksum offload */\n+\t\trxm->ol_flags |= hinic_rx_csum(cqe.status, rxq);\n+\n+\t\t/* rss hash offload */\n+\t\trss_hash = cqe.rss_hash;\n+\t\trxm->ol_flags |= hinic_rx_rss_hash(offload_type, rss_hash,\n+\t\t\t\t\t\t   &rxm->hash.rss);\n+\n+\t\t/* packet type parser offload */\n+\t\trxm->packet_type = hinic_rx_pkt_type(offload_type);\n+\n+\t\t/* 6. clear done bit */\n+\t\trx_cqe->status = 0;\n+\n+\t\trx_bytes += pkt_len;\n+\t\trx_pkts[pkts++] = rxm;\n+\t}\n+\n+\tif (pkts) {\n+\t\t/* 7. update ci */\n+\t\tHINIC_UPDATE_RQ_LOCAL_CI(rxq, wqebb_cnt);\n+\n+\t\t/* do packet stats */\n+\t\trxq->rxq_stats.packets += pkts;\n+\t\trxq->rxq_stats.bytes += rx_bytes;\n+#ifdef HINIC_XSTAT_MBUF_USE\n+\t\trxq->rxq_stats.free_mbuf += pkts;\n+#endif\n+\t}\n+\n+#ifdef HINIC_XSTAT_RXBUF_INFO\n+\trxq->rxq_stats.burst_pkts = pkts;\n+#endif\n+\n+\t/* 8. rearm mbuf to rxq */\n+\thinic_rearm_rxq_mbuf(rxq);\n+\n+#ifdef HINIC_XSTAT_PROF_RX\n+\t/* do profiling stats */\n+\tt2 = rte_get_tsc_cycles();\n+\trxq->rxq_stats.app_tsc = t1 - rxq->prof_rx_end_tsc;\n+\trxq->prof_rx_end_tsc = t2;\n+\trxq->rxq_stats.pmd_tsc = t2 - t1;\n+#endif\n+\n+\treturn pkts;\n+}\ndiff --git a/drivers/net/hinic/meson.build b/drivers/net/hinic/meson.build\nindex f55f5210e..8c0511c8e 100644\n--- a/drivers/net/hinic/meson.build\n+++ b/drivers/net/hinic/meson.build\n@@ -7,7 +7,7 @@ objs = [base_objs]\n sources = files(\n \t'hinic_pmd_dpdev.c',\n \t'hinic_pmd_ethdev.c',\n-#\t'hinic_pmd_rx.c',\n+\t'hinic_pmd_rx.c',\n #\t'hinic_pmd_tx.c'\n \t)\n \n",
    "prefixes": [
        "v4",
        "09/11"
    ]
}