get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94337/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94337,
    "url": "http://patches.dpdk.org/api/patches/94337/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-13-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210617110005.4132926-13-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210617110005.4132926-13-jiawenwu@trustnetic.com",
    "date": "2021-06-17T10:59:58",
    "name": "[v6,12/19] net/ngbe: add Rx queue setup and release",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "8d06c2c3be16b38bfb4f57e576b472d33cfeef48",
    "submitter": {
        "id": 1932,
        "url": "http://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-13-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 17372,
            "url": "http://patches.dpdk.org/api/series/17372/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=17372",
            "date": "2021-06-17T10:59:46",
            "name": "net: ngbe PMD",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/17372/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/94337/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/94337/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8944BA0C4D;\n\tThu, 17 Jun 2021 12:59:40 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1D2D941140;\n\tThu, 17 Jun 2021 12:58:35 +0200 (CEST)",
            "from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56])\n by mails.dpdk.org (Postfix) with ESMTP id 4791341159\n for <dev@dpdk.org>; Thu, 17 Jun 2021 12:58:32 +0200 (CEST)",
            "from wxdbg.localdomain.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Thu, 17 Jun 2021 18:58:26 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp46t1623927506trnggv1o",
        "X-QQ-SSF": "01400000000000D0E000B00A0000000",
        "X-QQ-FEAT": "YKCDl5A3/ap6M8oDPuBU/NwnGUSe/zEHuwEVBLBI9GRksCLMKGDZOgDSlrKq2\n C26JO3G0bIuxmUDLwy/UaoqJ82kg49ZsbZvZAX0pb7ETGuBelttyxwjsj5UE7MM/3gLsQ8J\n 9XBAdDFTmwSbn26/Xe16KXckeeEgl67ykMsg30dXzCaMyzfFuPPg3Fp7DfV/DeiwC1EstbC\n Q6t705ENaWgzTbmFSeCvtXDwIi6iHEQLd8p7RBN6waKr4lsuPOyr1PjIst+rZKWvoWY2W3J\n u/4e8iurfNXwDnIP90gsDvpH4Hs1cIv5v+OvJjCZmz63Nh2y31y/BMIITe/p+6KsBdi+NYn\n E73S1UaTt6hgLdbR2NfHiorQEg71XNM1eC7wlob",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Thu, 17 Jun 2021 18:59:58 +0800",
        "Message-Id": "<20210617110005.4132926-13-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "References": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign5",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v6 12/19] net/ngbe: add Rx queue setup and release",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Setup device Rx queue and release Rx queue.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/ngbe/meson.build   |   1 +\n drivers/net/ngbe/ngbe_ethdev.c |  37 +++-\n drivers/net/ngbe/ngbe_ethdev.h |  16 ++\n drivers/net/ngbe/ngbe_rxtx.c   | 308 +++++++++++++++++++++++++++++++++\n drivers/net/ngbe/ngbe_rxtx.h   |  96 ++++++++++\n 5 files changed, 457 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ngbe/ngbe_rxtx.c\n create mode 100644 drivers/net/ngbe/ngbe_rxtx.h",
    "diff": "diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build\nindex 81173fa7f0..9e75b82f1c 100644\n--- a/drivers/net/ngbe/meson.build\n+++ b/drivers/net/ngbe/meson.build\n@@ -12,6 +12,7 @@ objs = [base_objs]\n \n sources = files(\n \t'ngbe_ethdev.c',\n+\t'ngbe_rxtx.c',\n )\n \n includes += include_directories('base')\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex c952023e8b..e73606c5f3 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -12,6 +12,7 @@\n #include \"ngbe_logs.h\"\n #include \"base/ngbe.h\"\n #include \"ngbe_ethdev.h\"\n+#include \"ngbe_rxtx.h\"\n \n static int ngbe_dev_close(struct rte_eth_dev *dev);\n \n@@ -37,6 +38,12 @@ static const struct rte_pci_id pci_id_ngbe_map[] = {\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = NGBE_RING_DESC_MAX,\n+\t.nb_min = NGBE_RING_DESC_MIN,\n+\t.nb_align = NGBE_RXD_ALIGN,\n+};\n+\n static const struct eth_dev_ops ngbe_eth_dev_ops;\n \n static inline void\n@@ -241,12 +248,19 @@ static int\n ngbe_dev_configure(struct rte_eth_dev *dev)\n {\n \tstruct ngbe_interrupt *intr = ngbe_dev_intr(dev);\n+\tstruct ngbe_adapter *adapter = ngbe_dev_adapter(dev);\n \n \tPMD_INIT_FUNC_TRACE();\n \n \t/* set flag to update link status after init */\n \tintr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;\n \n+\t/*\n+\t * Initialize to TRUE. If any of Rx queues doesn't meet the bulk\n+\t * allocation Rx preconditions we will reset it.\n+\t */\n+\tadapter->rx_bulk_alloc_allowed = true;\n+\n \treturn 0;\n }\n \n@@ -266,11 +280,30 @@ ngbe_dev_close(struct rte_eth_dev *dev)\n static int\n ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n {\n-\tRTE_SET_USED(dev);\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\n+\tdev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;\n+\n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_thresh = {\n+\t\t\t.pthresh = NGBE_DEFAULT_RX_PTHRESH,\n+\t\t\t.hthresh = NGBE_DEFAULT_RX_HTHRESH,\n+\t\t\t.wthresh = NGBE_DEFAULT_RX_WTHRESH,\n+\t\t},\n+\t\t.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,\n+\t\t.rx_drop_en = 0,\n+\t\t.offloads = 0,\n+\t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n \n \tdev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |\n \t\t\t\tETH_LINK_SPEED_10M;\n \n+\t/* Driver-preferred Rx/Tx parameters */\n+\tdev_info->default_rxportconf.nb_queues = 1;\n+\tdev_info->default_rxportconf.ring_size = 256;\n+\n \treturn 0;\n }\n \n@@ -570,6 +603,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {\n \t.dev_configure              = ngbe_dev_configure,\n \t.dev_infos_get              = ngbe_dev_info_get,\n \t.link_update                = ngbe_dev_link_update,\n+\t.rx_queue_setup             = ngbe_dev_rx_queue_setup,\n+\t.rx_queue_release           = ngbe_dev_rx_queue_release,\n };\n \n RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex b67508a3de..6580d288c8 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -30,6 +30,7 @@ struct ngbe_interrupt {\n struct ngbe_adapter {\n \tstruct ngbe_hw             hw;\n \tstruct ngbe_interrupt      intr;\n+\tbool rx_bulk_alloc_allowed;\n };\n \n static inline struct ngbe_adapter *\n@@ -58,6 +59,13 @@ ngbe_dev_intr(struct rte_eth_dev *dev)\n \treturn intr;\n }\n \n+void ngbe_dev_rx_queue_release(void *rxq);\n+\n+int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n int\n ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n \t\tint wait_to_complete);\n@@ -66,4 +74,12 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n #define NGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */\n #define NGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */\n \n+/*\n+ *  Default values for Rx/Tx configuration\n+ */\n+#define NGBE_DEFAULT_RX_FREE_THRESH  32\n+#define NGBE_DEFAULT_RX_PTHRESH      8\n+#define NGBE_DEFAULT_RX_HTHRESH      8\n+#define NGBE_DEFAULT_RX_WTHRESH      0\n+\n #endif /* _NGBE_ETHDEV_H_ */\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nnew file mode 100644\nindex 0000000000..df0b64dc01\n--- /dev/null\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -0,0 +1,308 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018-2020 Beijing WangXun Technology Co., Ltd.\n+ * Copyright(c) 2010-2017 Intel Corporation\n+ */\n+\n+#include <sys/queue.h>\n+\n+#include <stdint.h>\n+#include <rte_ethdev.h>\n+#include <ethdev_driver.h>\n+#include <rte_malloc.h>\n+\n+#include \"ngbe_logs.h\"\n+#include \"base/ngbe.h\"\n+#include \"ngbe_ethdev.h\"\n+#include \"ngbe_rxtx.h\"\n+\n+/**\n+ * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster\n+ *\n+ * The \"next\" pointer of the last segment of (not-yet-completed) RSC clusters\n+ * in the sw_sc_ring is not set to NULL but rather points to the next\n+ * mbuf of this RSC aggregation (that has not been completed yet and still\n+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we\n+ * will just free first \"nb_segs\" segments of the cluster explicitly by calling\n+ * an rte_pktmbuf_free_seg().\n+ *\n+ * @m scattered cluster head\n+ */\n+static void __rte_cold\n+ngbe_free_sc_cluster(struct rte_mbuf *m)\n+{\n+\tuint16_t i, nb_segs = m->nb_segs;\n+\tstruct rte_mbuf *next_seg;\n+\n+\tfor (i = 0; i < nb_segs; i++) {\n+\t\tnext_seg = m->next;\n+\t\trte_pktmbuf_free_seg(m);\n+\t\tm = next_seg;\n+\t}\n+}\n+\n+static void __rte_cold\n+ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)\n+{\n+\tunsigned int i;\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t\tif (rxq->rx_nb_avail) {\n+\t\t\tfor (i = 0; i < rxq->rx_nb_avail; ++i) {\n+\t\t\t\tstruct rte_mbuf *mb;\n+\n+\t\t\t\tmb = rxq->rx_stage[rxq->rx_next_avail + i];\n+\t\t\t\trte_pktmbuf_free_seg(mb);\n+\t\t\t}\n+\t\t\trxq->rx_nb_avail = 0;\n+\t\t}\n+\t}\n+\n+\tif (rxq->sw_sc_ring != NULL)\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\t\tif (rxq->sw_sc_ring[i].fbuf) {\n+\t\t\t\tngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);\n+\t\t\t\trxq->sw_sc_ring[i].fbuf = NULL;\n+\t\t\t}\n+}\n+\n+static void __rte_cold\n+ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)\n+{\n+\tif (rxq != NULL) {\n+\t\tngbe_rx_queue_release_mbufs(rxq);\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq->sw_sc_ring);\n+\t\trte_free(rxq);\n+\t}\n+}\n+\n+void __rte_cold\n+ngbe_dev_rx_queue_release(void *rxq)\n+{\n+\tngbe_rx_queue_release(rxq);\n+}\n+\n+/*\n+ * Check if Rx Burst Bulk Alloc function can be used.\n+ * Return\n+ *        0: the preconditions are satisfied and the bulk allocation function\n+ *           can be used.\n+ *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst\n+ *           function must be used.\n+ */\n+static inline int __rte_cold\n+check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)\n+{\n+\tint ret = 0;\n+\n+\t/*\n+\t * Make sure the following pre-conditions are satisfied:\n+\t *   rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST\n+\t *   rxq->rx_free_thresh < rxq->nb_rx_desc\n+\t *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0\n+\t * Scattered packets are not supported.  This should be checked\n+\t * outside of this function.\n+\t */\n+\tif (!(rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"RTE_PMD_NGBE_RX_MAX_BURST=%d\",\n+\t\t\t     rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);\n+\t\tret = -EINVAL;\n+\t} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"rxq->nb_rx_desc=%d\",\n+\t\t\t     rxq->rx_free_thresh, rxq->nb_rx_desc);\n+\t\tret = -EINVAL;\n+\t} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->nb_rx_desc=%d, \"\n+\t\t\t     \"rxq->rx_free_thresh=%d\",\n+\t\t\t     rxq->nb_rx_desc, rxq->rx_free_thresh);\n+\t\tret = -EINVAL;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/* Reset dynamic ngbe_rx_queue fields back to defaults */\n+static void __rte_cold\n+ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)\n+{\n+\tstatic const struct ngbe_rx_desc zeroed_desc = {\n+\t\t\t\t\t\t{{0}, {0} }, {{0}, {0} } };\n+\tunsigned int i;\n+\tuint16_t len = rxq->nb_rx_desc;\n+\n+\t/*\n+\t * By default, the Rx queue setup function allocates enough memory for\n+\t * NGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires\n+\t * extra memory at the end of the descriptor ring to be zero'd out.\n+\t */\n+\tif (adapter->rx_bulk_alloc_allowed)\n+\t\t/* zero out extra memory */\n+\t\tlen += RTE_PMD_NGBE_RX_MAX_BURST;\n+\n+\t/*\n+\t * Zero out HW ring memory. Zero out extra memory at the end of\n+\t * the H/W ring so look-ahead logic in Rx Burst bulk alloc function\n+\t * reads extra memory as zeros.\n+\t */\n+\tfor (i = 0; i < len; i++)\n+\t\trxq->rx_ring[i] = zeroed_desc;\n+\n+\t/*\n+\t * initialize extra software ring entries. Space for these extra\n+\t * entries is always allocated\n+\t */\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\tfor (i = rxq->nb_rx_desc; i < len; ++i)\n+\t\trxq->sw_ring[i].mbuf = &rxq->fake_mbuf;\n+\n+\trxq->rx_nb_avail = 0;\n+\trxq->rx_next_avail = 0;\n+\trxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+int __rte_cold\n+ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t uint16_t queue_idx,\n+\t\t\t uint16_t nb_desc,\n+\t\t\t unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tconst struct rte_memzone *rz;\n+\tstruct ngbe_rx_queue *rxq;\n+\tstruct ngbe_hw     *hw;\n+\tuint16_t len;\n+\tstruct ngbe_adapter *adapter = ngbe_dev_adapter(dev);\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\thw = ngbe_dev_hw(dev);\n+\n+\t/*\n+\t * Validate number of receive descriptors.\n+\t * It must not exceed hardware maximum, and must be multiple\n+\t * of NGBE_ALIGN.\n+\t */\n+\tif (nb_desc % NGBE_RXD_ALIGN != 0 ||\n+\t\t\tnb_desc > NGBE_RING_DESC_MAX ||\n+\t\t\tnb_desc < NGBE_RING_DESC_MIN) {\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (dev->data->rx_queues[queue_idx] != NULL) {\n+\t\tngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the Rx queue data structure */\n+\trxq = rte_zmalloc_socket(\"ethdev RX queue\",\n+\t\t\t\t sizeof(struct ngbe_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq == NULL)\n+\t\treturn -ENOMEM;\n+\trxq->mb_pool = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\trxq->queue_id = queue_idx;\n+\trxq->reg_idx = queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->drop_en = rx_conf->rx_drop_en;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\n+\t/*\n+\t * Allocate Rx ring hardware descriptors. A memzone large enough to\n+\t * handle the maximum ring size is allocated in order to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx,\n+\t\t\t\t      RX_RING_SZ, NGBE_ALIGN, socket_id);\n+\tif (rz == NULL) {\n+\t\tngbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * Zero init all the descriptors in the ring.\n+\t */\n+\tmemset(rz->addr, 0, RX_RING_SZ);\n+\n+\trxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));\n+\trxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));\n+\n+\trxq->rx_ring_phys_addr = TMZ_PADDR(rz);\n+\trxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);\n+\n+\t/*\n+\t * Certain constraints must be met in order to use the bulk buffer\n+\t * allocation Rx burst function. If any of Rx queues doesn't meet them\n+\t * the feature should be disabled for the whole port.\n+\t */\n+\tif (check_rx_burst_bulk_alloc_preconditions(rxq)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"queue[%d] doesn't meet Rx Bulk Alloc \"\n+\t\t\t\t    \"preconditions - canceling the feature for \"\n+\t\t\t\t    \"the whole port[%d]\",\n+\t\t\t     rxq->queue_id, rxq->port_id);\n+\t\tadapter->rx_bulk_alloc_allowed = false;\n+\t}\n+\n+\t/*\n+\t * Allocate software ring. Allow for space at the end of the\n+\t * S/W ring to make sure look-ahead logic in bulk alloc Rx burst\n+\t * function does not access an invalid memory region.\n+\t */\n+\tlen = nb_desc;\n+\tif (adapter->rx_bulk_alloc_allowed)\n+\t\tlen += RTE_PMD_NGBE_RX_MAX_BURST;\n+\n+\trxq->sw_ring = rte_zmalloc_socket(\"rxq->sw_ring\",\n+\t\t\t\t\t  sizeof(struct ngbe_rx_entry) * len,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tngbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * Always allocate even if it's not going to be needed in order to\n+\t * simplify the code.\n+\t *\n+\t * This ring is used in Scattered Rx cases and Scattered Rx may\n+\t * be requested in ngbe_dev_rx_init(), which is called later from\n+\t * dev_start() flow.\n+\t */\n+\trxq->sw_sc_ring =\n+\t\trte_zmalloc_socket(\"rxq->sw_sc_ring\",\n+\t\t\t\t  sizeof(struct ngbe_scattered_rx_entry) * len,\n+\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq->sw_sc_ring == NULL) {\n+\t\tngbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p sw_sc_ring=%p hw_ring=%p \"\n+\t\t\t    \"dma_addr=0x%\" PRIx64,\n+\t\t     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,\n+\t\t     rxq->rx_ring_phys_addr);\n+\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\tngbe_reset_rx_queue(adapter, rxq);\n+\n+\treturn 0;\n+}\n+\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h\nnew file mode 100644\nindex 0000000000..92b9a9fd1b\n--- /dev/null\n+++ b/drivers/net/ngbe/ngbe_rxtx.h\n@@ -0,0 +1,96 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018-2020 Beijing WangXun Technology Co., Ltd.\n+ * Copyright(c) 2010-2017 Intel Corporation\n+ */\n+\n+#ifndef _NGBE_RXTX_H_\n+#define _NGBE_RXTX_H_\n+\n+/*****************************************************************************\n+ * Receive Descriptor\n+ *****************************************************************************/\n+struct ngbe_rx_desc {\n+\tstruct {\n+\t\tunion {\n+\t\t\trte_le32_t dw0;\n+\t\t\tstruct {\n+\t\t\t\trte_le16_t pkt;\n+\t\t\t\trte_le16_t hdr;\n+\t\t\t} lo;\n+\t\t};\n+\t\tunion {\n+\t\t\trte_le32_t dw1;\n+\t\t\tstruct {\n+\t\t\t\trte_le16_t ipid;\n+\t\t\t\trte_le16_t csum;\n+\t\t\t} hi;\n+\t\t};\n+\t} qw0; /* also as r.pkt_addr */\n+\tstruct {\n+\t\tunion {\n+\t\t\trte_le32_t dw2;\n+\t\t\tstruct {\n+\t\t\t\trte_le32_t status;\n+\t\t\t} lo;\n+\t\t};\n+\t\tunion {\n+\t\t\trte_le32_t dw3;\n+\t\t\tstruct {\n+\t\t\t\trte_le16_t len;\n+\t\t\t\trte_le16_t tag;\n+\t\t\t} hi;\n+\t\t};\n+\t} qw1; /* also as r.hdr_addr */\n+};\n+\n+#define RTE_PMD_NGBE_RX_MAX_BURST 32\n+\n+#define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \\\n+\t\t    sizeof(struct ngbe_rx_desc))\n+\n+\n+/**\n+ * Structure associated with each descriptor of the Rx ring of a Rx queue.\n+ */\n+struct ngbe_rx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with Rx descriptor. */\n+};\n+\n+struct ngbe_scattered_rx_entry {\n+\tstruct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */\n+};\n+\n+/**\n+ * Structure associated with each Rx queue.\n+ */\n+struct ngbe_rx_queue {\n+\tstruct rte_mempool  *mb_pool; /**< mbuf pool to populate Rx ring. */\n+\tvolatile struct ngbe_rx_desc *rx_ring; /**< Rx ring virtual address. */\n+\tuint64_t            rx_ring_phys_addr; /**< Rx ring DMA address. */\n+\tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n+\tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n+\tstruct ngbe_rx_entry *sw_ring; /**< address of Rx software ring. */\n+\t/**< address of scattered Rx software ring. */\n+\tstruct ngbe_scattered_rx_entry *sw_sc_ring;\n+\tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n+\tstruct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */\n+\tuint16_t            nb_rx_desc; /**< number of Rx descriptors. */\n+\tuint16_t            rx_tail;  /**< current value of RDT register. */\n+\tuint16_t            nb_rx_hold; /**< number of held free Rx desc. */\n+\tuint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */\n+\tuint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */\n+\tuint16_t rx_free_trigger; /**< triggers rx buffer allocation */\n+\tuint16_t            rx_free_thresh; /**< max free Rx desc to hold. */\n+\tuint16_t            queue_id; /**< RX queue index. */\n+\tuint16_t            reg_idx;  /**< RX queue register index. */\n+\t/**< Packet type mask for different NICs. */\n+\tuint16_t            port_id;  /**< Device port identifier. */\n+\tuint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */\n+\tuint8_t             rx_deferred_start; /**< not in global dev start. */\n+\t/** need to alloc dummy mbuf, for wraparound when scanning hw ring */\n+\tstruct rte_mbuf fake_mbuf;\n+\t/** hold packets to return to application */\n+\tstruct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];\n+};\n+\n+#endif /* _NGBE_RXTX_H_ */\n",
    "prefixes": [
        "v6",
        "12/19"
    ]
}