get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76655/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76655,
    "url": "http://patches.dpdk.org/api/patches/76655/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200907090825.1761-4-huwei013@chinasoftinc.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200907090825.1761-4-huwei013@chinasoftinc.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200907090825.1761-4-huwei013@chinasoftinc.com",
    "date": "2020-09-07T09:08:20",
    "name": "[3/8] net/hns3: add simple Rx process function",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "98ec5e2a6f223a774b5ddc45f8f3870f0c958a0a",
    "submitter": {
        "id": 1537,
        "url": "http://patches.dpdk.org/api/people/1537/?format=api",
        "name": "Wei Hu (Xavier)",
        "email": "huwei013@chinasoftinc.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200907090825.1761-4-huwei013@chinasoftinc.com/mbox/",
    "series": [
        {
            "id": 11986,
            "url": "http://patches.dpdk.org/api/series/11986/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11986",
            "date": "2020-09-07T09:08:17",
            "name": "net/hns3: updates for Rx Tx",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11986/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76655/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/76655/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4A395A04B9;\n\tMon,  7 Sep 2020 11:09:30 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 037D01C0BE;\n\tMon,  7 Sep 2020 11:09:14 +0200 (CEST)",
            "from mail.chinasoftinc.com (unknown [114.113.233.8])\n by dpdk.org (Postfix) with ESMTP id 7E7B41C10F\n for <dev@dpdk.org>; Mon,  7 Sep 2020 11:09:10 +0200 (CEST)",
            "from localhost.localdomain (65.49.108.226) by INCCAS002.ito.icss\n (10.168.0.60) with Microsoft SMTP Server id 14.3.487.0; Mon, 7 Sep 2020\n 17:09:05 +0800"
        ],
        "From": "\"Wei Hu (Xavier)\" <huwei013@chinasoftinc.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xavier.huwei@huawei.com>",
        "Date": "Mon, 7 Sep 2020 17:08:20 +0800",
        "Message-ID": "<20200907090825.1761-4-huwei013@chinasoftinc.com>",
        "X-Mailer": "git-send-email 2.9.5",
        "In-Reply-To": "<20200907090825.1761-1-huwei013@chinasoftinc.com>",
        "References": "<20200907090825.1761-1-huwei013@chinasoftinc.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[65.49.108.226]",
        "Subject": "[dpdk-dev] [PATCH 3/8] net/hns3: add simple Rx process function",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"Wei Hu (Xavier)\" <xavier.huwei@huawei.com>\n\nThis patch adds simple Rx process function and support chose Rx function\nby real Rx offloads capability.\n\nSigned-off-by: Chengwen Feng <fengchengwen@huawei.com>\nSigned-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>\nSigned-off-by: Huisong Li <lihuisong@huawei.com>\n---\n drivers/net/hns3/hns3_ethdev.c    |   7 +-\n drivers/net/hns3/hns3_ethdev.h    |  21 ++\n drivers/net/hns3/hns3_ethdev_vf.c |  11 +-\n drivers/net/hns3/hns3_rxtx.c      | 538 +++++++++++++++++++++++---------------\n drivers/net/hns3/hns3_rxtx.h      | 130 ++++++++-\n 5 files changed, 489 insertions(+), 218 deletions(-)",
    "diff": "diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c\nindex 6fa34e8..5d612f1 100644\n--- a/drivers/net/hns3/hns3_ethdev.c\n+++ b/drivers/net/hns3/hns3_ethdev.c\n@@ -2351,6 +2351,8 @@ hns3_dev_configure(struct rte_eth_dev *dev)\n \tif (ret)\n \t\tgoto cfg_err;\n \n+\thns->rx_simple_allowed = true;\n+\thns3_init_rx_ptype_tble(dev);\n \thw->adapter_state = HNS3_NIC_CONFIGURED;\n \n \treturn 0;\n@@ -4746,6 +4748,7 @@ hns3_dev_start(struct rte_eth_dev *dev)\n \thw->adapter_state = HNS3_NIC_STARTED;\n \trte_spinlock_unlock(&hw->lock);\n \n+\thns3_rx_scattered_calc(dev);\n \thns3_set_rxtx_function(dev);\n \thns3_mp_req_start_rxtx(dev);\n \trte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev);\n@@ -4844,6 +4847,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)\n \t\thns3_dev_release_mbufs(hns);\n \t\thw->adapter_state = HNS3_NIC_CONFIGURED;\n \t}\n+\thns3_rx_scattered_reset(dev);\n \trte_eal_alarm_cancel(hns3_service_handler, dev);\n \trte_spinlock_unlock(&hw->lock);\n }\n@@ -5514,6 +5518,7 @@ hns3_reset_service(void *param)\n }\n \n static const struct eth_dev_ops hns3_eth_dev_ops = {\n+\t.dev_configure      = hns3_dev_configure,\n \t.dev_start          = hns3_dev_start,\n \t.dev_stop           = hns3_dev_stop,\n \t.dev_close          = hns3_dev_close,\n@@ -5539,7 +5544,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {\n \t.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,\n \t.rxq_info_get           = hns3_rxq_info_get,\n \t.txq_info_get           = hns3_txq_info_get,\n-\t.dev_configure          = hns3_dev_configure,\n+\t.rx_burst_mode_get      = hns3_rx_burst_mode_get,\n \t.flow_ctrl_get          = hns3_flow_ctrl_get,\n \t.flow_ctrl_set          = hns3_flow_ctrl_set,\n \t.priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,\ndiff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h\nindex 3cb0535..d93c5b2 100644\n--- a/drivers/net/hns3/hns3_ethdev.h\n+++ b/drivers/net/hns3/hns3_ethdev.h\n@@ -433,6 +433,7 @@ struct hns3_hw {\n \tuint16_t tqps_num;          /* num task queue pairs of this function */\n \tuint16_t intr_tqps_num;     /* num queue pairs mapping interrupt */\n \tuint16_t rss_size_max;      /* HW defined max RSS task queue */\n+\tuint16_t rx_buf_len;        /* hold min hardware rx buf len */\n \tuint16_t num_tx_desc;       /* desc num of per tx queue */\n \tuint16_t num_rx_desc;       /* desc num of per rx queue */\n \tuint32_t mng_entry_num;     /* number of manager table entry */\n@@ -575,6 +576,23 @@ struct hns3_mp_param {\n /* Key string for IPC. */\n #define HNS3_MP_NAME \"net_hns3_mp\"\n \n+#define HNS3_L2TBL_NUM\t4\n+#define HNS3_L3TBL_NUM\t16\n+#define HNS3_L4TBL_NUM\t16\n+#define HNS3_OL3TBL_NUM\t16\n+#define HNS3_OL4TBL_NUM\t16\n+\n+struct hns3_ptype_table {\n+\tuint32_t l2table[HNS3_L2TBL_NUM];\n+\tuint32_t l3table[HNS3_L3TBL_NUM];\n+\tuint32_t l4table[HNS3_L4TBL_NUM];\n+\tuint32_t inner_l2table[HNS3_L2TBL_NUM];\n+\tuint32_t inner_l3table[HNS3_L3TBL_NUM];\n+\tuint32_t inner_l4table[HNS3_L4TBL_NUM];\n+\tuint32_t ol3table[HNS3_OL3TBL_NUM];\n+\tuint32_t ol4table[HNS3_OL4TBL_NUM];\n+};\n+\n struct hns3_pf {\n \tstruct hns3_adapter *adapter;\n \tbool is_main_pf;\n@@ -623,6 +641,9 @@ struct hns3_adapter {\n \t\tstruct hns3_pf pf;\n \t\tstruct hns3_vf vf;\n \t};\n+\n+\tbool rx_simple_allowed;\n+\tstruct hns3_ptype_table ptype_tbl __rte_cache_min_aligned;\n };\n \n #define HNS3_DEV_SUPPORT_DCB_B\t\t\t0x0\ndiff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c\nindex 7226cc5..0f155d8 100644\n--- a/drivers/net/hns3/hns3_ethdev_vf.c\n+++ b/drivers/net/hns3/hns3_ethdev_vf.c\n@@ -745,7 +745,8 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)\n static int\n hns3vf_dev_configure(struct rte_eth_dev *dev)\n {\n-\tstruct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n \tstruct hns3_rss_conf *rss_cfg = &hw->rss_info;\n \tstruct rte_eth_conf *conf = &dev->data->dev_conf;\n \tenum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;\n@@ -820,6 +821,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)\n \tif (ret)\n \t\tgoto cfg_err;\n \n+\thns->rx_simple_allowed = true;\n+\thns3_init_rx_ptype_tble(dev);\n+\n \thw->adapter_state = HNS3_NIC_CONFIGURED;\n \treturn 0;\n \n@@ -1875,6 +1879,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)\n \t\thns3_dev_release_mbufs(hns);\n \t\thw->adapter_state = HNS3_NIC_CONFIGURED;\n \t}\n+\thns3_rx_scattered_reset(dev);\n \trte_eal_alarm_cancel(hns3vf_service_handler, dev);\n \trte_spinlock_unlock(&hw->lock);\n }\n@@ -2111,6 +2116,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)\n \thw->adapter_state = HNS3_NIC_STARTED;\n \trte_spinlock_unlock(&hw->lock);\n \n+\thns3_rx_scattered_calc(dev);\n \thns3_set_rxtx_function(dev);\n \thns3_mp_req_start_rxtx(dev);\n \trte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);\n@@ -2508,6 +2514,7 @@ hns3vf_reinit_dev(struct hns3_adapter *hns)\n }\n \n static const struct eth_dev_ops hns3vf_eth_dev_ops = {\n+\t.dev_configure      = hns3vf_dev_configure,\n \t.dev_start          = hns3vf_dev_start,\n \t.dev_stop           = hns3vf_dev_stop,\n \t.dev_close          = hns3vf_dev_close,\n@@ -2533,7 +2540,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {\n \t.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,\n \t.rxq_info_get       = hns3_rxq_info_get,\n \t.txq_info_get       = hns3_txq_info_get,\n-\t.dev_configure      = hns3vf_dev_configure,\n+\t.rx_burst_mode_get  = hns3_rx_burst_mode_get,\n \t.mac_addr_add       = hns3vf_add_mac_addr,\n \t.mac_addr_remove    = hns3vf_remove_mac_addr,\n \t.mac_addr_set       = hns3vf_set_default_mac_addr,\ndiff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c\nindex 703b12a..38ad454 100644\n--- a/drivers/net/hns3/hns3_rxtx.c\n+++ b/drivers/net/hns3/hns3_rxtx.c\n@@ -30,7 +30,7 @@\n #include \"hns3_logs.h\"\n \n #define HNS3_CFG_DESC_NUM(num)\t((num) / 8 - 1)\n-#define DEFAULT_RX_FREE_THRESH\t32\n+#define HNS3_RX_RING_PREFECTH_MASK\t3\n \n static void\n hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)\n@@ -38,13 +38,20 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq)\n \tuint16_t i;\n \n \t/* Note: Fake rx queue will not enter here */\n-\tif (rxq->sw_ring) {\n-\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n-\t\t\tif (rxq->sw_ring[i].mbuf) {\n-\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n-\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n-\t\t\t}\n-\t\t}\n+\tif (rxq->sw_ring == NULL)\n+\t\treturn;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\tif (rxq->sw_ring[i].mbuf)\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\n+\tfor (i = 0; i < rxq->bulk_mbuf_num; i++)\n+\t\trte_pktmbuf_free_seg(rxq->bulk_mbuf[i]);\n+\trxq->bulk_mbuf_num = 0;\n+\n+\tif (rxq->pkt_first_seg) {\n+\t\trte_pktmbuf_free(rxq->pkt_first_seg);\n+\t\trxq->pkt_first_seg = NULL;\n \t}\n }\n \n@@ -653,6 +660,8 @@ hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)\n \n \trxq->next_to_use = 0;\n \trxq->rx_free_hold = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n \thns3_init_rx_queue_hw(rxq);\n \n \treturn 0;\n@@ -1243,6 +1252,33 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len)\n \treturn 0;\n }\n \n+static int\n+hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf,\n+\t\t\t struct rte_mempool *mp, uint16_t nb_desc,\n+\t\t\t uint16_t *buf_size)\n+{\n+\tif (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||\n+\t    nb_desc % HNS3_ALIGN_RING_DESC) {\n+\t\thns3_err(hw, \"Number (%u) of rx descriptors is invalid\",\n+\t\t\t nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (conf->rx_drop_en == 0)\n+\t\thns3_warn(hw, \"if no descriptors available, packets are always \"\n+\t\t\t  \"dropped and rx_drop_en (1) is fixed on\");\n+\n+\tif (hns3_rx_buf_len_calc(mp, buf_size)) {\n+\t\thns3_err(hw, \"rxq mbufs' data room size (%u) is not enough! \"\n+\t\t\t\t\"minimal data room size (%u).\",\n+\t\t\t\trte_pktmbuf_data_room_size(mp),\n+\t\t\t\tHNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n int\n hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\t    unsigned int socket_id, const struct rte_eth_rxconf *conf,\n@@ -1254,24 +1290,16 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \tstruct hns3_rx_queue *rxq;\n \tuint16_t rx_buf_size;\n \tint rx_entry_len;\n+\tint ret;\n \n \tif (dev->data->dev_started) {\n \t\thns3_err(hw, \"rx_queue_setup after dev_start no supported\");\n \t\treturn -EINVAL;\n \t}\n \n-\tif (nb_desc > HNS3_MAX_RING_DESC || nb_desc < HNS3_MIN_RING_DESC ||\n-\t    nb_desc % HNS3_ALIGN_RING_DESC) {\n-\t\thns3_err(hw, \"Number (%u) of rx descriptors is invalid\",\n-\t\t\t nb_desc);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (conf->rx_drop_en == 0)\n-\t\thns3_warn(hw, \"if there are no available Rx descriptors,\"\n-\t\t\t  \"incoming packets are always dropped. input parameter\"\n-\t\t\t  \" conf->rx_drop_en(%u) is uneffective.\",\n-\t\t\t  conf->rx_drop_en);\n+\tret = hns3_rx_queue_conf_check(hw, conf, mp, nb_desc, &rx_buf_size);\n+\tif (ret)\n+\t\treturn ret;\n \n \tif (dev->data->rx_queues[idx]) {\n \t\thns3_rx_queue_release(dev->data->rx_queues[idx]);\n@@ -1284,14 +1312,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \tq_info.type = \"hns3 RX queue\";\n \tq_info.ring_name = \"rx_ring\";\n \n-\tif (hns3_rx_buf_len_calc(mp, &rx_buf_size)) {\n-\t\thns3_err(hw, \"rxq mbufs' data room size:%u is not enough! \"\n-\t\t\t\t\"minimal data room size:%u.\",\n-\t\t\t\trte_pktmbuf_data_room_size(mp),\n-\t\t\t\tHNS3_MIN_BD_BUF_SIZE + RTE_PKTMBUF_HEADROOM);\n-\t\treturn -EINVAL;\n-\t}\n-\n \trxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);\n \tif (rxq == NULL) {\n \t\thns3_err(hw,\n@@ -1300,6 +1320,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t}\n \n \trxq->hns = hns;\n+\trxq->ptype_tbl = &hns->ptype_tbl;\n \trxq->mb_pool = mp;\n \trxq->rx_free_thresh = (conf->rx_free_thresh > 0) ?\n \t\tconf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH;\n@@ -1339,6 +1360,8 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \telse\n \t\trxq->crc_len = 0;\n \n+\trxq->bulk_mbuf_num = 0;\n+\n \trte_spinlock_lock(&hw->lock);\n \tdev->data->rx_queues[idx] = rxq;\n \trte_spinlock_unlock(&hw->lock);\n@@ -1346,104 +1369,40 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \treturn 0;\n }\n \n-static inline uint32_t\n-rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint32_t ol_info)\n+void\n+hns3_rx_scattered_reset(struct rte_eth_dev *dev)\n {\n-#define HNS3_L2TBL_NUM\t4\n-#define HNS3_L3TBL_NUM\t16\n-#define HNS3_L4TBL_NUM\t16\n-#define HNS3_OL3TBL_NUM\t16\n-#define HNS3_OL4TBL_NUM\t16\n-\tuint32_t pkt_type = 0;\n-\tuint32_t l2id, l3id, l4id;\n-\tuint32_t ol3id, ol4id;\n-\n-\tstatic const uint32_t l2table[HNS3_L2TBL_NUM] = {\n-\t\tRTE_PTYPE_L2_ETHER,\n-\t\tRTE_PTYPE_L2_ETHER_QINQ,\n-\t\tRTE_PTYPE_L2_ETHER_VLAN,\n-\t\tRTE_PTYPE_L2_ETHER_VLAN\n-\t};\n-\n-\tstatic const uint32_t l3table[HNS3_L3TBL_NUM] = {\n-\t\tRTE_PTYPE_L3_IPV4,\n-\t\tRTE_PTYPE_L3_IPV6,\n-\t\tRTE_PTYPE_L2_ETHER_ARP,\n-\t\tRTE_PTYPE_L2_ETHER,\n-\t\tRTE_PTYPE_L3_IPV4_EXT,\n-\t\tRTE_PTYPE_L3_IPV6_EXT,\n-\t\tRTE_PTYPE_L2_ETHER_LLDP,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0\n-\t};\n-\n-\tstatic const uint32_t l4table[HNS3_L4TBL_NUM] = {\n-\t\tRTE_PTYPE_L4_UDP,\n-\t\tRTE_PTYPE_L4_TCP,\n-\t\tRTE_PTYPE_TUNNEL_GRE,\n-\t\tRTE_PTYPE_L4_SCTP,\n-\t\tRTE_PTYPE_L4_IGMP,\n-\t\tRTE_PTYPE_L4_ICMP,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n-\t};\n-\n-\tstatic const uint32_t inner_l2table[HNS3_L2TBL_NUM] = {\n-\t\tRTE_PTYPE_INNER_L2_ETHER,\n-\t\tRTE_PTYPE_INNER_L2_ETHER_VLAN,\n-\t\tRTE_PTYPE_INNER_L2_ETHER_QINQ,\n-\t\t0\n-\t};\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n \n-\tstatic const uint32_t inner_l3table[HNS3_L3TBL_NUM] = {\n-\t\tRTE_PTYPE_INNER_L3_IPV4,\n-\t\tRTE_PTYPE_INNER_L3_IPV6,\n-\t\t0,\n-\t\tRTE_PTYPE_INNER_L2_ETHER,\n-\t\tRTE_PTYPE_INNER_L3_IPV4_EXT,\n-\t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n-\t};\n+\thw->rx_buf_len = 0;\n+\tdev->data->scattered_rx = false;\n+}\n \n-\tstatic const uint32_t inner_l4table[HNS3_L4TBL_NUM] = {\n-\t\tRTE_PTYPE_INNER_L4_UDP,\n-\t\tRTE_PTYPE_INNER_L4_TCP,\n-\t\tRTE_PTYPE_TUNNEL_GRE,\n-\t\tRTE_PTYPE_INNER_L4_SCTP,\n-\t\tRTE_PTYPE_L4_IGMP,\n-\t\tRTE_PTYPE_INNER_L4_ICMP,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n-\t};\n+void\n+hns3_rx_scattered_calc(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_conf *dev_conf = &dev->data->dev_conf;\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_hw *hw = &hns->hw;\n+\tstruct hns3_rx_queue *rxq;\n+\tuint32_t queue_id;\n \n-\tstatic const uint32_t ol3table[HNS3_OL3TBL_NUM] = {\n-\t\tRTE_PTYPE_L3_IPV4,\n-\t\tRTE_PTYPE_L3_IPV6,\n-\t\t0, 0,\n-\t\tRTE_PTYPE_L3_IPV4_EXT,\n-\t\tRTE_PTYPE_L3_IPV6_EXT,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0,\n-\t\tRTE_PTYPE_UNKNOWN\n-\t};\n+\tif (dev->data->rx_queues == NULL)\n+\t\treturn;\n \n-\tstatic const uint32_t ol4table[HNS3_OL4TBL_NUM] = {\n-\t\t0,\n-\t\tRTE_PTYPE_TUNNEL_VXLAN,\n-\t\tRTE_PTYPE_TUNNEL_NVGRE,\n-\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n-\t};\n+\tfor (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {\n+\t\trxq = dev->data->rx_queues[queue_id];\n+\t\tif (hw->rx_buf_len == 0)\n+\t\t\thw->rx_buf_len = rxq->rx_buf_len;\n+\t\telse\n+\t\t\thw->rx_buf_len = RTE_MIN(hw->rx_buf_len,\n+\t\t\t\t\t\t rxq->rx_buf_len);\n+\t}\n \n-\tl2id = hns3_get_field(pkt_info, HNS3_RXD_STRP_TAGP_M,\n-\t\t\t      HNS3_RXD_STRP_TAGP_S);\n-\tl3id = hns3_get_field(pkt_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);\n-\tl4id = hns3_get_field(pkt_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);\n-\tol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);\n-\tol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);\n-\n-\tif (ol4table[ol4id])\n-\t\tpkt_type |= (inner_l2table[l2id] | inner_l3table[l3id] |\n-\t\t\t     inner_l4table[l4id] | ol3table[ol3id] |\n-\t\t\t     ol4table[ol4id]);\n-\telse\n-\t\tpkt_type |= (l2table[l2id] | l3table[l3id] | l4table[l4id]);\n-\treturn pkt_type;\n+\tif (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||\n+\t    dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)\n+\t\tdev->data->scattered_rx = true;\n }\n \n const uint32_t *\n@@ -1468,81 +1427,69 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)\n \t\tRTE_PTYPE_UNKNOWN\n \t};\n \n-\tif (dev->rx_pkt_burst == hns3_recv_pkts)\n+\tif (dev->rx_pkt_burst == hns3_recv_pkts ||\n+\t    dev->rx_pkt_burst == hns3_recv_scattered_pkts)\n \t\treturn ptypes;\n \n \treturn NULL;\n }\n \n-static int\n-hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,\n-\t\t   uint32_t bd_base_info, uint32_t l234_info,\n-\t\t   uint32_t *cksum_err)\n+void\n+hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)\n {\n-\tuint32_t tmp = 0;\n-\n-\tif (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {\n-\t\trxq->l2_errors++;\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (unlikely(rxm->pkt_len == 0 ||\n-\t\t(l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {\n-\t\trxq->pkt_len_errors++;\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {\n-\t\tif (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {\n-\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n-\t\t\trxq->l3_csum_erros++;\n-\t\t\ttmp |= HNS3_L3_CKSUM_ERR;\n-\t\t}\n-\n-\t\tif (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {\n-\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n-\t\t\trxq->l4_csum_erros++;\n-\t\t\ttmp |= HNS3_L4_CKSUM_ERR;\n-\t\t}\n-\n-\t\tif (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {\n-\t\t\trxq->ol3_csum_erros++;\n-\t\t\ttmp |= HNS3_OUTER_L3_CKSUM_ERR;\n-\t\t}\n-\n-\t\tif (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {\n-\t\t\trxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;\n-\t\t\trxq->ol4_csum_erros++;\n-\t\t\ttmp |= HNS3_OUTER_L4_CKSUM_ERR;\n-\t\t}\n-\t}\n-\t*cksum_err = tmp;\n-\n-\treturn 0;\n-}\n-\n-static void\n-hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, uint64_t packet_type,\n-\t\t       const uint32_t cksum_err)\n-{\n-\tif (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {\n-\t\tif (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&\n-\t\t    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)\n-\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n-\t\tif (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&\n-\t\t    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)\n-\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n-\t\tif (likely(packet_type & RTE_PTYPE_L4_MASK) &&\n-\t\t    (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)\n-\t\t\trxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;\n-\t} else {\n-\t\tif (likely(packet_type & RTE_PTYPE_L3_MASK) &&\n-\t\t    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)\n-\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n-\t\tif (likely(packet_type & RTE_PTYPE_L4_MASK) &&\n-\t\t    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)\n-\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n-\t}\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tstruct hns3_ptype_table *tbl = &hns->ptype_tbl;\n+\n+\tmemset(tbl, 0, sizeof(*tbl));\n+\n+\ttbl->l2table[0] = RTE_PTYPE_L2_ETHER;\n+\ttbl->l2table[1] = RTE_PTYPE_L2_ETHER_QINQ;\n+\ttbl->l2table[2] = RTE_PTYPE_L2_ETHER_VLAN;\n+\ttbl->l2table[3] = RTE_PTYPE_L2_ETHER_VLAN;\n+\n+\ttbl->l3table[0] = RTE_PTYPE_L3_IPV4;\n+\ttbl->l3table[1] = RTE_PTYPE_L3_IPV6;\n+\ttbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;\n+\ttbl->l3table[3] = RTE_PTYPE_L2_ETHER;\n+\ttbl->l3table[4] = RTE_PTYPE_L3_IPV4_EXT;\n+\ttbl->l3table[5] = RTE_PTYPE_L3_IPV6_EXT;\n+\ttbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;\n+\n+\ttbl->l4table[0] = RTE_PTYPE_L4_UDP;\n+\ttbl->l4table[1] = RTE_PTYPE_L4_TCP;\n+\ttbl->l4table[2] = RTE_PTYPE_TUNNEL_GRE;\n+\ttbl->l4table[3] = RTE_PTYPE_L4_SCTP;\n+\ttbl->l4table[4] = RTE_PTYPE_L4_IGMP;\n+\ttbl->l4table[5] = RTE_PTYPE_L4_ICMP;\n+\n+\ttbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;\n+\ttbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;\n+\ttbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;\n+\n+\ttbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;\n+\ttbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;\n+\ttbl->inner_l3table[2] = 0;\n+\ttbl->inner_l3table[3] = RTE_PTYPE_INNER_L2_ETHER;\n+\ttbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;\n+\ttbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;\n+\n+\ttbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;\n+\ttbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;\n+\ttbl->inner_l4table[2] = RTE_PTYPE_TUNNEL_GRE;\n+\ttbl->inner_l4table[3] = RTE_PTYPE_INNER_L4_SCTP;\n+\ttbl->inner_l4table[4] = RTE_PTYPE_L4_IGMP;\n+\ttbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;\n+\n+\ttbl->ol3table[0] = RTE_PTYPE_L3_IPV4;\n+\ttbl->ol3table[1] = RTE_PTYPE_L3_IPV6;\n+\ttbl->ol3table[2] = 0;\n+\ttbl->ol3table[3] = 0;\n+\ttbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;\n+\ttbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;\n+\n+\ttbl->ol4table[0] = 0;\n+\ttbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;\n+\ttbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;\n }\n \n static inline void\n@@ -1612,6 +1559,23 @@ recalculate_data_len(struct rte_mbuf *first_seg, struct rte_mbuf *last_seg,\n \t\trxm->data_len = (uint16_t)(data_len - crc_len);\n }\n \n+static inline struct rte_mbuf *\n+hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq)\n+{\n+\tint ret;\n+\n+\tif (likely(rxq->bulk_mbuf_num > 0))\n+\t\treturn rxq->bulk_mbuf[--rxq->bulk_mbuf_num];\n+\n+\tret = rte_mempool_get_bulk(rxq->mb_pool, (void **)rxq->bulk_mbuf,\n+\t\t\t\t   HNS3_BULK_ALLOC_MBUF_NUM);\n+\tif (likely(ret == 0)) {\n+\t\trxq->bulk_mbuf_num = HNS3_BULK_ALLOC_MBUF_NUM;\n+\t\treturn rxq->bulk_mbuf[--rxq->bulk_mbuf_num];\n+\t} else\n+\t\treturn rte_mbuf_raw_alloc(rxq->mb_pool);\n+}\n+\n uint16_t\n hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n {\n@@ -1620,6 +1584,119 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \tstruct hns3_rx_queue *rxq;      /* RX queue */\n \tstruct hns3_entry *sw_ring;\n \tstruct hns3_entry *rxe;\n+\tstruct hns3_desc rxd;\n+\tstruct rte_mbuf *nmb;           /* pointer of the new mbuf */\n+\tstruct rte_mbuf *rxm;\n+\tuint32_t bd_base_info;\n+\tuint32_t cksum_err;\n+\tuint32_t l234_info;\n+\tuint32_t ol_info;\n+\tuint64_t dma_addr;\n+\tuint16_t nb_rx_bd;\n+\tuint16_t nb_rx;\n+\tuint16_t rx_id;\n+\tint ret;\n+\n+\tnb_rx = 0;\n+\tnb_rx_bd = 0;\n+\trxq = rx_queue;\n+\trx_ring = rxq->rx_ring;\n+\tsw_ring = rxq->sw_ring;\n+\trx_id = rxq->next_to_use;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tbd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);\n+\t\tif (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))\n+\t\t\tbreak;\n+\n+\t\trxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -\n+\t\t\t   (1u << HNS3_RXD_VLD_B)];\n+\n+\t\tnmb = hns3_rx_alloc_buffer(rxq);\n+\t\tif (unlikely(nmb == NULL)) {\n+\t\t\tuint16_t port_id;\n+\n+\t\t\tport_id = rxq->port_id;\n+\t\t\trte_eth_devices[port_id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_rx_bd++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n+\t\t\trx_id = 0;\n+\n+\t\trte_prefetch0(sw_ring[rx_id].mbuf);\n+\t\tif ((rx_id & HNS3_RX_RING_PREFECTH_MASK) == 0) {\n+\t\t\trte_prefetch0(&rx_ring[rx_id]);\n+\t\t\trte_prefetch0(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\n+\t\tdma_addr = rte_mbuf_data_iova_default(nmb);\n+\t\trxdp->addr = rte_cpu_to_le_64(dma_addr);\n+\t\trxdp->rx.bd_base_info = 0;\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trxm->pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len)) -\n+\t\t\t\trxq->crc_len;\n+\t\trxm->data_len = rxm->pkt_len;\n+\t\trxm->port = rxq->port_id;\n+\t\trxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);\n+\t\trxm->ol_flags = PKT_RX_RSS_HASH;\n+\t\tif (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {\n+\t\t\trxm->hash.fdir.hi =\n+\t\t\t\trte_le_to_cpu_16(rxd.rx.fd_id);\n+\t\t\trxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\t\t}\n+\t\trxm->nb_segs = 1;\n+\t\trxm->next = NULL;\n+\n+\t\t/* Load remained descriptor data and extract necessary fields */\n+\t\tl234_info = rte_le_to_cpu_32(rxd.rx.l234_info);\n+\t\tol_info = rte_le_to_cpu_32(rxd.rx.ol_info);\n+\t\tret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,\n+\t\t\t\t\t l234_info, &cksum_err);\n+\t\tif (unlikely(ret))\n+\t\t\tgoto pkt_err;\n+\n+\t\trxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);\n+\n+\t\tif (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))\n+\t\t\thns3_rx_set_cksum_flag(rxm, rxm->packet_type,\n+\t\t\t\t\t       cksum_err);\n+\t\thns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);\n+\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t\tcontinue;\n+pkt_err:\n+\t\trte_pktmbuf_free(rxm);\n+\t}\n+\n+\trxq->next_to_use = rx_id;\n+\trxq->rx_free_hold += nb_rx_bd;\n+\tif (rxq->rx_free_hold > rxq->rx_free_thresh) {\n+\t\thns3_write_reg_opt(rxq->io_head_reg, rxq->rx_free_hold);\n+\t\trxq->rx_free_hold = 0;\n+\t}\n+\n+\treturn nb_rx;\n+}\n+\n+uint16_t\n+hns3_recv_scattered_pkts(void *rx_queue,\n+\t\t\t struct rte_mbuf **rx_pkts,\n+\t\t\t uint16_t nb_pkts)\n+{\n+\tvolatile struct hns3_desc *rx_ring;  /* RX ring (desc) */\n+\tvolatile struct hns3_desc *rxdp;     /* pointer of the current desc */\n+\tstruct hns3_rx_queue *rxq;      /* RX queue */\n+\tstruct hns3_entry *sw_ring;\n+\tstruct hns3_entry *rxe;\n \tstruct rte_mbuf *first_seg;\n \tstruct rte_mbuf *last_seg;\n \tstruct hns3_desc rxd;\n@@ -1632,9 +1709,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \tuint32_t gro_size;\n \tuint32_t ol_info;\n \tuint64_t dma_addr;\n-\tuint16_t data_len;\n \tuint16_t nb_rx_bd;\n-\tuint16_t pkt_len;\n \tuint16_t nb_rx;\n \tuint16_t rx_id;\n \tint ret;\n@@ -1652,8 +1727,9 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \twhile (nb_rx < nb_pkts) {\n \t\trxdp = &rx_ring[rx_id];\n \t\tbd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);\n-\t\tif (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))\n+\t\tif (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))\n \t\t\tbreak;\n+\n \t\t/*\n \t\t * The interactive process between software and hardware of\n \t\t * receiving a new packet in hns3 network engine:\n@@ -1716,7 +1792,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\trxd = rxdp[(bd_base_info & (1u << HNS3_RXD_VLD_B)) -\n \t\t\t   (1u << HNS3_RXD_VLD_B)];\n \n-\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tnmb = hns3_rx_alloc_buffer(rxq);\n \t\tif (unlikely(nmb == NULL)) {\n \t\t\tdev = &rte_eth_devices[rxq->port_id];\n \t\t\tdev->data->rx_mbuf_alloc_failed++;\n@@ -1730,7 +1806,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t\trx_id = 0;\n \n \t\trte_prefetch0(sw_ring[rx_id].mbuf);\n-\t\tif ((rx_id & 0x3) == 0) {\n+\t\tif ((rx_id & HNS3_RX_RING_PREFECTH_MASK) == 0) {\n \t\t\trte_prefetch0(&rx_ring[rx_id]);\n \t\t\trte_prefetch0(&sw_ring[rx_id]);\n \t\t}\n@@ -1742,15 +1818,6 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\trxdp->rx.bd_base_info = 0;\n \t\trxdp->addr = dma_addr;\n \n-\t\t/*\n-\t\t * Load remained descriptor data and extract necessary fields.\n-\t\t * Data size from buffer description may contains CRC len,\n-\t\t * packet len should subtract it.\n-\t\t */\n-\t\tdata_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.size));\n-\t\tl234_info = rte_le_to_cpu_32(rxd.rx.l234_info);\n-\t\tol_info = rte_le_to_cpu_32(rxd.rx.ol_info);\n-\n \t\tif (first_seg == NULL) {\n \t\t\tfirst_seg = rxm;\n \t\t\tfirst_seg->nb_segs = 1;\n@@ -1760,10 +1827,11 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t}\n \n \t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\trxm->data_len = data_len;\n+\t\trxm->data_len = rte_le_to_cpu_16(rxd.rx.size);\n \n-\t\tif (!hns3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {\n+\t\tif (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {\n \t\t\tlast_seg = rxm;\n+\t\t\trxm->next = NULL;\n \t\t\tcontinue;\n \t\t}\n \n@@ -1772,8 +1840,7 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t * buffer description may contains CRC len, packet len should\n \t\t * subtract it, same as data len.\n \t\t */\n-\t\tpkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.rx.pkt_len));\n-\t\tfirst_seg->pkt_len = pkt_len;\n+\t\tfirst_seg->pkt_len = rte_le_to_cpu_16(rxd.rx.pkt_len);\n \n \t\t/*\n \t\t * This is the last buffer of the received packet. If the CRC\n@@ -1789,15 +1856,15 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\tif (unlikely(rxq->crc_len > 0)) {\n \t\t\tfirst_seg->pkt_len -= rxq->crc_len;\n \t\t\trecalculate_data_len(first_seg, last_seg, rxm, rxq,\n-\t\t\t\tdata_len);\n+\t\t\t\trxm->data_len);\n \t\t}\n \n \t\tfirst_seg->port = rxq->port_id;\n \t\tfirst_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);\n \t\tfirst_seg->ol_flags = PKT_RX_RSS_HASH;\n-\t\tif (unlikely(hns3_get_bit(bd_base_info, HNS3_RXD_LUM_B))) {\n+\t\tif (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {\n \t\t\tfirst_seg->hash.fdir.hi =\n-\t\t\t\trte_le_to_cpu_32(rxd.rx.fd_id);\n+\t\t\t\trte_le_to_cpu_16(rxd.rx.fd_id);\n \t\t\tfirst_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n \t\t}\n \n@@ -1808,13 +1875,15 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t\tfirst_seg->tso_segsz = gro_size;\n \t\t}\n \n+\t\tl234_info = rte_le_to_cpu_32(rxd.rx.l234_info);\n+\t\tol_info = rte_le_to_cpu_32(rxd.rx.ol_info);\n \t\tret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,\n \t\t\t\t\t l234_info, &cksum_err);\n \t\tif (unlikely(ret))\n \t\t\tgoto pkt_err;\n \n-\t\tfirst_seg->packet_type = rxd_pkt_info_to_pkt_type(l234_info,\n-\t\t\t\t\t\t\t\t  ol_info);\n+\t\tfirst_seg->packet_type = hns3_rx_calc_ptype(rxq,\n+\t\t\t\t\t\tl234_info, ol_info);\n \n \t\tif (bd_base_info & BIT(HNS3_RXD_L3L4P_B))\n \t\t\thns3_rx_set_cksum_flag(first_seg,\n@@ -1844,6 +1913,46 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n }\n \n int\n+hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,\n+\t\t       struct rte_eth_burst_mode *mode)\n+{\n+\tstatic const struct {\n+\t\teth_rx_burst_t pkt_burst;\n+\t\tconst char *info;\n+\t} burst_infos[] = {\n+\t\t{ hns3_recv_pkts,\t\t\"Scalar\" },\n+\t\t{ hns3_recv_scattered_pkts,\t\"Scalar Scattered\" },\n+\t};\n+\n+\teth_rx_burst_t pkt_burst = dev->rx_pkt_burst;\n+\tint ret = -EINVAL;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < RTE_DIM(burst_infos); i++) {\n+\t\tif (pkt_burst == burst_infos[i].pkt_burst) {\n+\t\t\tsnprintf(mode->info, sizeof(mode->info), \"%s\",\n+\t\t\t\t burst_infos[i].info);\n+\t\t\tret = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static eth_rx_burst_t\n+hns3_get_rx_function(struct rte_eth_dev *dev)\n+{\n+\tstruct hns3_adapter *hns = dev->data->dev_private;\n+\tuint64_t offloads = dev->data->dev_conf.rxmode.offloads;\n+\n+\tif (hns->rx_simple_allowed && !dev->data->scattered_rx &&\n+\t    (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)\n+\t\treturn hns3_recv_pkts;\n+\n+\treturn hns3_recv_scattered_pkts;\n+}\n+int\n hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\t    unsigned int socket_id, const struct rte_eth_txconf *conf)\n {\n@@ -1932,7 +2041,8 @@ hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq)\n \tstruct hns3_desc *desc = &txq->tx_ring[tx_next_clean];\n \tstruct rte_mbuf *mbuf;\n \n-\twhile ((!hns3_get_bit(desc->tx.tp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B)) &&\n+\twhile ((!(desc->tx.tp_fe_sc_vld_ra_ri &\n+\t\trte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) &&\n \t\ttx_next_use != tx_next_clean) {\n \t\tmbuf = tx_bak_pkt->mbuf;\n \t\tif (mbuf) {\n@@ -2818,7 +2928,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)\n \n \tif (hns->hw.adapter_state == HNS3_NIC_STARTED &&\n \t    rte_atomic16_read(&hns->hw.reset.resetting) == 0) {\n-\t\teth_dev->rx_pkt_burst = hns3_recv_pkts;\n+\t\teth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);\n \t\teth_dev->tx_pkt_burst = hns3_xmit_pkts;\n \t\teth_dev->tx_pkt_prepare = hns3_prep_pkts;\n \t} else {\ndiff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h\nindex c1a34e2..3d3f0a0 100644\n--- a/drivers/net/hns3/hns3_rxtx.h\n+++ b/drivers/net/hns3/hns3_rxtx.h\n@@ -10,6 +10,8 @@\n #define HNS3_DEFAULT_RING_DESC  1024\n #define\tHNS3_ALIGN_RING_DESC\t32\n #define HNS3_RING_BASE_ALIGN\t128\n+#define HNS3_BULK_ALLOC_MBUF_NUM\t32\n+\n #define HNS3_DEFAULT_RX_FREE_THRESH\t32\n \n #define HNS3_512_BD_BUF_SIZE\t512\n@@ -233,6 +235,7 @@ struct hns3_rx_queue {\n \tvoid *io_base;\n \tvolatile void *io_head_reg;\n \tstruct hns3_adapter *hns;\n+\tstruct hns3_ptype_table *ptype_tbl;\n \tstruct rte_mempool *mb_pool;\n \tstruct hns3_desc *rx_ring;\n \tuint64_t rx_ring_phys_addr; /* RX ring DMA address */\n@@ -245,13 +248,13 @@ struct hns3_rx_queue {\n \tuint16_t queue_id;\n \tuint16_t port_id;\n \tuint16_t nb_rx_desc;\n-\tuint16_t next_to_use;\n \tuint16_t rx_buf_len;\n \t/*\n \t * threshold for the number of BDs waited to passed to hardware. If the\n \t * number exceeds the threshold, driver will pass these BDs to hardware.\n \t */\n \tuint16_t rx_free_thresh;\n+\tuint16_t next_to_use;    /* index of next BD to be polled */\n \tuint16_t rx_free_hold;   /* num of BDs waited to passed to hardware */\n \n \t/*\n@@ -272,6 +275,9 @@ struct hns3_rx_queue {\n \tuint64_t l4_csum_erros;\n \tuint64_t ol3_csum_erros;\n \tuint64_t ol4_csum_erros;\n+\n+\tstruct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];\n+\tuint16_t bulk_mbuf_num;\n };\n \n struct hns3_tx_queue {\n@@ -380,6 +386,120 @@ enum hns3_cksum_status {\n \tHNS3_OUTER_L4_CKSUM_ERR = 8\n };\n \n+static inline int\n+hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,\n+\t\t   uint32_t bd_base_info, uint32_t l234_info,\n+\t\t   uint32_t *cksum_err)\n+{\n+#define L2E_TRUNC_ERR_FLAG\t(BIT(HNS3_RXD_L2E_B) | \\\n+\t\t\t\t BIT(HNS3_RXD_TRUNCAT_B))\n+#define CHECKSUM_ERR_FLAG\t(BIT(HNS3_RXD_L3E_B) | \\\n+\t\t\t\t BIT(HNS3_RXD_L4E_B) | \\\n+\t\t\t\t BIT(HNS3_RXD_OL3E_B) | \\\n+\t\t\t\t BIT(HNS3_RXD_OL4E_B))\n+\n+\tuint32_t tmp = 0;\n+\n+\t/*\n+\t * If packet len bigger than mtu when recv with no-scattered algorithm,\n+\t * the first n bd will without FE bit, we need process this sisution.\n+\t * Note: we don't need add statistic counter because lastest bd which\n+\t *       with FE bit will mark HNS3_RXD_L2E_B bit.\n+\t */\n+\tif (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))\n+\t\treturn -EINVAL;\n+\n+\tif (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {\n+\t\tif (l234_info & BIT(HNS3_RXD_L2E_B))\n+\t\t\trxq->l2_errors++;\n+\t\telse\n+\t\t\trxq->pkt_len_errors++;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {\n+\t\tif (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) {\n+\t\t\t*cksum_err = 0;\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tif (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {\n+\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\t\trxq->l3_csum_erros++;\n+\t\t\ttmp |= HNS3_L3_CKSUM_ERR;\n+\t\t}\n+\n+\t\tif (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {\n+\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\trxq->l4_csum_erros++;\n+\t\t\ttmp |= HNS3_L4_CKSUM_ERR;\n+\t\t}\n+\n+\t\tif (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {\n+\t\t\trxq->ol3_csum_erros++;\n+\t\t\ttmp |= HNS3_OUTER_L3_CKSUM_ERR;\n+\t\t}\n+\n+\t\tif (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {\n+\t\t\trxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;\n+\t\t\trxq->ol4_csum_erros++;\n+\t\t\ttmp |= HNS3_OUTER_L4_CKSUM_ERR;\n+\t\t}\n+\t}\n+\t*cksum_err = tmp;\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type,\n+\t\t       const uint32_t cksum_err)\n+{\n+\tif (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {\n+\t\tif (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&\n+\t\t    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)\n+\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t\tif (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&\n+\t\t    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)\n+\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t\tif (likely(packet_type & RTE_PTYPE_L4_MASK) &&\n+\t\t    (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)\n+\t\t\trxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;\n+\t} else {\n+\t\tif (likely(packet_type & RTE_PTYPE_L3_MASK) &&\n+\t\t    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)\n+\t\t\trxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t\tif (likely(packet_type & RTE_PTYPE_L4_MASK) &&\n+\t\t    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)\n+\t\t\trxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t}\n+}\n+\n+static inline uint32_t\n+hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,\n+\t\t   const uint32_t ol_info)\n+{\n+\tconst struct hns3_ptype_table *const ptype_tbl = rxq->ptype_tbl;\n+\tuint32_t l2id, l3id, l4id;\n+\tuint32_t ol3id, ol4id;\n+\n+\tol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);\n+\tol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);\n+\tl2id = hns3_get_field(l234_info, HNS3_RXD_STRP_TAGP_M,\n+\t\t\t      HNS3_RXD_STRP_TAGP_S);\n+\tl3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);\n+\tl4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);\n+\n+\tif (unlikely(ptype_tbl->ol4table[ol4id]))\n+\t\treturn ptype_tbl->inner_l2table[l2id] |\n+\t\t\tptype_tbl->inner_l3table[l3id] |\n+\t\t\tptype_tbl->inner_l4table[l4id] |\n+\t\t\tptype_tbl->ol3table[ol3id] | ptype_tbl->ol4table[ol4id];\n+\telse\n+\t\treturn ptype_tbl->l2table[l2id] | ptype_tbl->l3table[l3id] |\n+\t\t\tptype_tbl->l4table[l4id];\n+}\n+\n void hns3_dev_rx_queue_release(void *queue);\n void hns3_dev_tx_queue_release(void *queue);\n void hns3_free_all_queues(struct rte_eth_dev *dev);\n@@ -398,11 +518,17 @@ int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,\n \t\t\tunsigned int socket, const struct rte_eth_txconf *conf);\n uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\tuint16_t nb_pkts);\n+uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t\t  uint16_t nb_pkts);\n+int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,\n+\t\t\t   __rte_unused uint16_t queue_id,\n+\t\t\t   struct rte_eth_burst_mode *mode);\n uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tuint16_t nb_pkts);\n uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\tuint16_t nb_pkts);\n const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n+void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);\n void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);\n void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,\n \t\t\t    uint8_t gl_idx, uint16_t gl_value);\n@@ -415,6 +541,8 @@ int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,\n int hns3_config_gro(struct hns3_hw *hw, bool en);\n int hns3_restore_gro_conf(struct hns3_hw *hw);\n void hns3_update_all_queues_pvid_state(struct hns3_hw *hw);\n+void hns3_rx_scattered_reset(struct rte_eth_dev *dev);\n+void hns3_rx_scattered_calc(struct rte_eth_dev *dev);\n void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \t\t       struct rte_eth_rxq_info *qinfo);\n void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n",
    "prefixes": [
        "3/8"
    ]
}