get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/81303/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 81303,
    "url": "https://patches.dpdk.org/api/patches/81303/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20201019085415.82207-22-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201019085415.82207-22-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201019085415.82207-22-jiawenwu@trustnetic.com",
    "date": "2020-10-19T08:53:38",
    "name": "[v4,21/58] net/txgbe: add Rx and Tx queues setup and release",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "5f53fe14c83751917b174238ac166fec85ba703b",
    "submitter": {
        "id": 1932,
        "url": "https://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20201019085415.82207-22-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 13094,
            "url": "https://patches.dpdk.org/api/series/13094/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13094",
            "date": "2020-10-19T08:53:17",
            "name": "net: txgbe PMD",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/13094/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/81303/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/81303/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3D214A04DC;\n\tMon, 19 Oct 2020 11:00:17 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7CCC7C9A6;\n\tMon, 19 Oct 2020 10:53:22 +0200 (CEST)",
            "from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130])\n by dpdk.org (Postfix) with ESMTP id D4501C862\n for <dev@dpdk.org>; Mon, 19 Oct 2020 10:52:58 +0200 (CEST)",
            "from localhost.localdomain.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Mon, 19 Oct 2020 16:52:54 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp6t1603097574tym8wsejo",
        "X-QQ-SSF": "01400000002000C0C000B00A0000000",
        "X-QQ-FEAT": "O9RHVi+JMbKjRyQgQKn+M4T9KNhBSgQtpLysnXDsr5PZr9gTQ356M5qPMA7iA\n y/T4m9I0tumyksdFjeZCbZHswZPe9Z0OL7Fru/6Tzd1X6j8EM3KS8MM8MLY3addezczdsCD\n XCbDBiuCTsFOsqukc6UCYPlINdS77MOfOponAvoATbKfbe8oAJkWk/13zMrmViw2dVbh8mc\n 9Lm4XxlOHtLIg2G5Dg7R5yVVRu/L7A1QZHW7A75/AzL6Zv2d8wuWy9St82ETqBItDRaNEDp\n MfZYVT25fc7LnssUnkx8ok6QOJ4F/rADsvtteMzC8dptUa29kzD5bcHpLcDenjlrnZhSawC\n E/7OU2ztFMigXUIDNu53ZfGageJpA==",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Mon, 19 Oct 2020 16:53:38 +0800",
        "Message-Id": "<20201019085415.82207-22-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.18.4",
        "In-Reply-To": "<20201019085415.82207-1-jiawenwu@trustnetic.com>",
        "References": "<20201019085415.82207-1-jiawenwu@trustnetic.com>",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign5",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v4 21/58] net/txgbe: add Rx and Tx queues setup\n\tand release",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add receive and transmit queues setup and release.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/txgbe/txgbe_ethdev.c |   4 +\n drivers/net/txgbe/txgbe_ethdev.h |  13 +\n drivers/net/txgbe/txgbe_rxtx.c   | 510 +++++++++++++++++++++++++++++++\n drivers/net/txgbe/txgbe_rxtx.h   | 130 ++++++++\n 4 files changed, 657 insertions(+)",
    "diff": "diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c\nindex 1395f6ffe..6186cace1 100644\n--- a/drivers/net/txgbe/txgbe_ethdev.c\n+++ b/drivers/net/txgbe/txgbe_ethdev.c\n@@ -1322,6 +1322,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {\n \t.dev_infos_get              = txgbe_dev_info_get,\n \t.dev_set_link_up            = txgbe_dev_set_link_up,\n \t.dev_set_link_down          = txgbe_dev_set_link_down,\n+\t.rx_queue_setup             = txgbe_dev_rx_queue_setup,\n+\t.rx_queue_release           = txgbe_dev_rx_queue_release,\n+\t.tx_queue_setup             = txgbe_dev_tx_queue_setup,\n+\t.tx_queue_release           = txgbe_dev_tx_queue_release,\n \t.mac_addr_add               = txgbe_add_rar,\n \t.mac_addr_remove            = txgbe_remove_rar,\n \t.mac_addr_set               = txgbe_set_default_mac_addr,\ndiff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h\nindex 096b17673..6636b6e9a 100644\n--- a/drivers/net/txgbe/txgbe_ethdev.h\n+++ b/drivers/net/txgbe/txgbe_ethdev.h\n@@ -80,6 +80,19 @@ struct txgbe_adapter {\n /*\n  * RX/TX function prototypes\n  */\n+void txgbe_dev_rx_queue_release(void *rxq);\n+\n+void txgbe_dev_tx_queue_release(void *txq);\n+\n+int  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n+int  txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\tuint16_t nb_tx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf);\n+\n int txgbe_dev_rx_init(struct rte_eth_dev *dev);\n \n void txgbe_dev_tx_init(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c\nindex eadc06bcf..707d5b2e4 100644\n--- a/drivers/net/txgbe/txgbe_rxtx.c\n+++ b/drivers/net/txgbe/txgbe_rxtx.c\n@@ -7,10 +7,14 @@\n #include <stdio.h>\n #include <stdlib.h>\n #include <string.h>\n+#include <errno.h>\n \n #include <rte_common.h>\n #include <rte_ethdev.h>\n #include <rte_ethdev_driver.h>\n+#include <rte_memzone.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n #include <rte_mbuf.h>\n \n #include \"txgbe_logs.h\"\n@@ -31,6 +35,10 @@ txgbe_is_vf(struct rte_eth_dev *dev)\n \t}\n }\n \n+#ifndef DEFAULT_TX_FREE_THRESH\n+#define DEFAULT_TX_FREE_THRESH 32\n+#endif\n+\n uint64_t\n txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)\n {\n@@ -73,6 +81,57 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)\n \treturn offloads;\n }\n \n+static void __rte_cold\n+txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)\n+{\n+\tunsigned int i;\n+\n+\tif (txq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\t\tif (txq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static void __rte_cold\n+txgbe_tx_free_swring(struct txgbe_tx_queue *txq)\n+{\n+\tif (txq != NULL &&\n+\t    txq->sw_ring != NULL)\n+\t\trte_free(txq->sw_ring);\n+}\n+\n+static void __rte_cold\n+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)\n+{\n+\tif (txq != NULL && txq->ops != NULL) {\n+\t\ttxq->ops->release_mbufs(txq);\n+\t\ttxq->ops->free_swring(txq);\n+\t\trte_free(txq);\n+\t}\n+}\n+\n+void __rte_cold\n+txgbe_dev_tx_queue_release(void *txq)\n+{\n+\ttxgbe_tx_queue_release(txq);\n+}\n+\n+static const struct txgbe_txq_ops def_txq_ops = {\n+\t.release_mbufs = txgbe_tx_queue_release_mbufs,\n+\t.free_swring = txgbe_tx_free_swring,\n+};\n+\n+void __rte_cold\n+txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)\n+{\n+\tRTE_SET_USED(dev);\n+\tRTE_SET_USED(txq);\n+}\n+\n uint64_t\n txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)\n {\n@@ -112,6 +171,457 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)\n \treturn tx_offload_capa;\n }\n \n+int __rte_cold\n+txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t uint16_t queue_idx,\n+\t\t\t uint16_t nb_desc,\n+\t\t\t unsigned int socket_id,\n+\t\t\t const struct rte_eth_txconf *tx_conf)\n+{\n+\tconst struct rte_memzone *tz;\n+\tstruct txgbe_tx_queue *txq;\n+\tstruct txgbe_hw     *hw;\n+\tuint16_t tx_free_thresh;\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\thw = TXGBE_DEV_HW(dev);\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\t/*\n+\t * Validate number of transmit descriptors.\n+\t * It must not exceed hardware maximum, and must be multiple\n+\t * of TXGBE_ALIGN.\n+\t */\n+\tif (nb_desc % TXGBE_TXD_ALIGN != 0 ||\n+\t    nb_desc > TXGBE_RING_DESC_MAX ||\n+\t    nb_desc < TXGBE_RING_DESC_MIN) {\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * The TX descriptor ring will be cleaned after txq->tx_free_thresh\n+\t * descriptors are used or if the number of descriptors required\n+\t * to transmit a packet is greater than the number of free TX\n+\t * descriptors.\n+\t * One descriptor in the TX ring is used as a sentinel to avoid a\n+\t * H/W race condition, hence the maximum threshold constraints.\n+\t * When set to zero use default values.\n+\t */\n+\ttx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?\n+\t\t\ttx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);\n+\tif (tx_free_thresh >= (nb_desc - 3)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_free_thresh must be less than the number of \"\n+\t\t\t     \"TX descriptors minus 3. (tx_free_thresh=%u \"\n+\t\t\t     \"port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_free_thresh,\n+\t\t\t     (int)dev->data->port_id, (int)queue_idx);\n+\t\treturn -(EINVAL);\n+\t}\n+\n+\tif ((nb_desc % tx_free_thresh) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_free_thresh must be a divisor of the \"\n+\t\t\t     \"number of TX descriptors. (tx_free_thresh=%u \"\n+\t\t\t     \"port=%d queue=%d)\", (unsigned int)tx_free_thresh,\n+\t\t\t     (int)dev->data->port_id, (int)queue_idx);\n+\t\treturn -(EINVAL);\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (dev->data->tx_queues[queue_idx] != NULL) {\n+\t\ttxgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the tx queue data structure */\n+\ttxq = rte_zmalloc_socket(\"ethdev TX queue\",\n+\t\t\t\t sizeof(struct txgbe_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (txq == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/*\n+\t * Allocate TX ring hardware descriptors. A memzone large enough to\n+\t * handle the maximum ring size is allocated in order to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\ttz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n+\t\t\tsizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,\n+\t\t\tTXGBE_ALIGN, socket_id);\n+\tif (tz == NULL) {\n+\t\ttxgbe_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->tx_free_thresh = tx_free_thresh;\n+\ttxq->pthresh = tx_conf->tx_thresh.pthresh;\n+\ttxq->hthresh = tx_conf->tx_thresh.hthresh;\n+\ttxq->wthresh = tx_conf->tx_thresh.wthresh;\n+\ttxq->queue_id = queue_idx;\n+\ttxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?\n+\t\tqueue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->ops = &def_txq_ops;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\t/* Modification to set tail pointer for virtual function\n+\t * if vf is detected.\n+\t */\n+\tif (hw->mac.type == txgbe_mac_raptor_vf) {\n+\t\ttxq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));\n+\t\ttxq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));\n+\t} else {\n+\t\ttxq->tdt_reg_addr = TXGBE_REG_ADDR(hw,\n+\t\t\t\t\t\tTXGBE_TXWP(txq->reg_idx));\n+\t\ttxq->tdc_reg_addr = TXGBE_REG_ADDR(hw,\n+\t\t\t\t\t\tTXGBE_TXCFG(txq->reg_idx));\n+\t}\n+\n+\ttxq->tx_ring_phys_addr = TMZ_PADDR(tz);\n+\ttxq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_ring = rte_zmalloc_socket(\"txq->sw_ring\",\n+\t\t\t\tsizeof(struct txgbe_tx_entry) * nb_desc,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (txq->sw_ring == NULL) {\n+\t\ttxgbe_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n+\t\t     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);\n+\n+\t/* set up scalar TX function as appropriate */\n+\ttxgbe_set_tx_function(dev, txq);\n+\n+\ttxq->ops->reset(txq);\n+\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster\n+ *\n+ * The \"next\" pointer of the last segment of (not-yet-completed) RSC clusters\n+ * in the sw_rsc_ring is not set to NULL but rather points to the next\n+ * mbuf of this RSC aggregation (that has not been completed yet and still\n+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we\n+ * will just free first \"nb_segs\" segments of the cluster explicitly by calling\n+ * an rte_pktmbuf_free_seg().\n+ *\n+ * @m scattered cluster head\n+ */\n+static void __rte_cold\n+txgbe_free_sc_cluster(struct rte_mbuf *m)\n+{\n+\tuint16_t i, nb_segs = m->nb_segs;\n+\tstruct rte_mbuf *next_seg;\n+\n+\tfor (i = 0; i < nb_segs; i++) {\n+\t\tnext_seg = m->next;\n+\t\trte_pktmbuf_free_seg(m);\n+\t\tm = next_seg;\n+\t}\n+}\n+\n+static void __rte_cold\n+txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)\n+{\n+\tunsigned int i;\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t\tif (rxq->rx_nb_avail) {\n+\t\t\tfor (i = 0; i < rxq->rx_nb_avail; ++i) {\n+\t\t\t\tstruct rte_mbuf *mb;\n+\n+\t\t\t\tmb = rxq->rx_stage[rxq->rx_next_avail + i];\n+\t\t\t\trte_pktmbuf_free_seg(mb);\n+\t\t\t}\n+\t\t\trxq->rx_nb_avail = 0;\n+\t\t}\n+\t}\n+\n+\tif (rxq->sw_sc_ring)\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\t\tif (rxq->sw_sc_ring[i].fbuf) {\n+\t\t\t\ttxgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);\n+\t\t\t\trxq->sw_sc_ring[i].fbuf = NULL;\n+\t\t\t}\n+}\n+\n+static void __rte_cold\n+txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)\n+{\n+\tif (rxq != NULL) {\n+\t\ttxgbe_rx_queue_release_mbufs(rxq);\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq->sw_sc_ring);\n+\t\trte_free(rxq);\n+\t}\n+}\n+\n+void __rte_cold\n+txgbe_dev_rx_queue_release(void *rxq)\n+{\n+\ttxgbe_rx_queue_release(rxq);\n+}\n+\n+/*\n+ * Check if Rx Burst Bulk Alloc function can be used.\n+ * Return\n+ *        0: the preconditions are satisfied and the bulk allocation function\n+ *           can be used.\n+ *  -EINVAL: the preconditions are NOT satisfied and the default Rx burst\n+ *           function must be used.\n+ */\n+static inline int __rte_cold\n+check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)\n+{\n+\tint ret = 0;\n+\n+\t/*\n+\t * Make sure the following pre-conditions are satisfied:\n+\t *   rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST\n+\t *   rxq->rx_free_thresh < rxq->nb_rx_desc\n+\t *   (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0\n+\t * Scattered packets are not supported.  This should be checked\n+\t * outside of this function.\n+\t */\n+\tif (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"RTE_PMD_TXGBE_RX_MAX_BURST=%d\",\n+\t\t\t     rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);\n+\t\tret = -EINVAL;\n+\t} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"rxq->nb_rx_desc=%d\",\n+\t\t\t     rxq->rx_free_thresh, rxq->nb_rx_desc);\n+\t\tret = -EINVAL;\n+\t} else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->nb_rx_desc=%d, \"\n+\t\t\t     \"rxq->rx_free_thresh=%d\",\n+\t\t\t     rxq->nb_rx_desc, rxq->rx_free_thresh);\n+\t\tret = -EINVAL;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+/* Reset dynamic txgbe_rx_queue fields back to defaults */\n+static void __rte_cold\n+txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)\n+{\n+\tstatic const struct txgbe_rx_desc zeroed_desc = {\n+\t\t\t\t\t\t{{0}, {0} }, {{0}, {0} } };\n+\tunsigned int i;\n+\tuint16_t len = rxq->nb_rx_desc;\n+\n+\t/*\n+\t * By default, the Rx queue setup function allocates enough memory for\n+\t * TXGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires\n+\t * extra memory at the end of the descriptor ring to be zero'd out.\n+\t */\n+\tif (adapter->rx_bulk_alloc_allowed)\n+\t\t/* zero out extra memory */\n+\t\tlen += RTE_PMD_TXGBE_RX_MAX_BURST;\n+\n+\t/*\n+\t * Zero out HW ring memory. Zero out extra memory at the end of\n+\t * the H/W ring so look-ahead logic in Rx Burst bulk alloc function\n+\t * reads extra memory as zeros.\n+\t */\n+\tfor (i = 0; i < len; i++)\n+\t\trxq->rx_ring[i] = zeroed_desc;\n+\n+\t/*\n+\t * initialize extra software ring entries. Space for these extra\n+\t * entries is always allocated\n+\t */\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\tfor (i = rxq->nb_rx_desc; i < len; ++i)\n+\t\trxq->sw_ring[i].mbuf = &rxq->fake_mbuf;\n+\n+\trxq->rx_nb_avail = 0;\n+\trxq->rx_next_avail = 0;\n+\trxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+int __rte_cold\n+txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t uint16_t queue_idx,\n+\t\t\t uint16_t nb_desc,\n+\t\t\t unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tconst struct rte_memzone *rz;\n+\tstruct txgbe_rx_queue *rxq;\n+\tstruct txgbe_hw     *hw;\n+\tuint16_t len;\n+\tstruct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);\n+\tuint64_t offloads;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\thw = TXGBE_DEV_HW(dev);\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/*\n+\t * Validate number of receive descriptors.\n+\t * It must not exceed hardware maximum, and must be multiple\n+\t * of TXGBE_ALIGN.\n+\t */\n+\tif (nb_desc % TXGBE_RXD_ALIGN != 0 ||\n+\t\t\tnb_desc > TXGBE_RING_DESC_MAX ||\n+\t\t\tnb_desc < TXGBE_RING_DESC_MIN) {\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (dev->data->rx_queues[queue_idx] != NULL) {\n+\t\ttxgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the rx queue data structure */\n+\trxq = rte_zmalloc_socket(\"ethdev RX queue\",\n+\t\t\t\t sizeof(struct txgbe_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq == NULL)\n+\t\treturn -ENOMEM;\n+\trxq->mb_pool = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\trxq->queue_id = queue_idx;\n+\trxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?\n+\t\tqueue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);\n+\trxq->port_id = dev->data->port_id;\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n+\t\trxq->crc_len = RTE_ETHER_CRC_LEN;\n+\telse\n+\t\trxq->crc_len = 0;\n+\trxq->drop_en = rx_conf->rx_drop_en;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->offloads = offloads;\n+\n+\t/*\n+\t * The packet type in RX descriptor is different for different NICs.\n+\t * So set different masks for different NICs.\n+\t */\n+\trxq->pkt_type_mask = TXGBE_PTID_MASK;\n+\n+\t/*\n+\t * Allocate RX ring hardware descriptors. A memzone large enough to\n+\t * handle the maximum ring size is allocated in order to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx,\n+\t\t\t\t      RX_RING_SZ, TXGBE_ALIGN, socket_id);\n+\tif (rz == NULL) {\n+\t\ttxgbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * Zero init all the descriptors in the ring.\n+\t */\n+\tmemset(rz->addr, 0, RX_RING_SZ);\n+\n+\t/*\n+\t * Modified to setup VFRDT for Virtual Function\n+\t */\n+\tif (hw->mac.type == txgbe_mac_raptor_vf) {\n+\t\trxq->rdt_reg_addr =\n+\t\t\tTXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));\n+\t\trxq->rdh_reg_addr =\n+\t\t\tTXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));\n+\t} else {\n+\t\trxq->rdt_reg_addr =\n+\t\t\tTXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));\n+\t\trxq->rdh_reg_addr =\n+\t\t\tTXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));\n+\t}\n+\n+\trxq->rx_ring_phys_addr = TMZ_PADDR(rz);\n+\trxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);\n+\n+\t/*\n+\t * Certain constraints must be met in order to use the bulk buffer\n+\t * allocation Rx burst function. If any of Rx queues doesn't meet them\n+\t * the feature should be disabled for the whole port.\n+\t */\n+\tif (check_rx_burst_bulk_alloc_preconditions(rxq)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"queue[%d] doesn't meet Rx Bulk Alloc \"\n+\t\t\t\t    \"preconditions - canceling the feature for \"\n+\t\t\t\t    \"the whole port[%d]\",\n+\t\t\t     rxq->queue_id, rxq->port_id);\n+\t\tadapter->rx_bulk_alloc_allowed = false;\n+\t}\n+\n+\t/*\n+\t * Allocate software ring. Allow for space at the end of the\n+\t * S/W ring to make sure look-ahead logic in bulk alloc Rx burst\n+\t * function does not access an invalid memory region.\n+\t */\n+\tlen = nb_desc;\n+\tif (adapter->rx_bulk_alloc_allowed)\n+\t\tlen += RTE_PMD_TXGBE_RX_MAX_BURST;\n+\n+\trxq->sw_ring = rte_zmalloc_socket(\"rxq->sw_ring\",\n+\t\t\t\t\t  sizeof(struct txgbe_rx_entry) * len,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!rxq->sw_ring) {\n+\t\ttxgbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * Always allocate even if it's not going to be needed in order to\n+\t * simplify the code.\n+\t *\n+\t * This ring is used in LRO and Scattered Rx cases and Scattered Rx may\n+\t * be requested in txgbe_dev_rx_init(), which is called later from\n+\t * dev_start() flow.\n+\t */\n+\trxq->sw_sc_ring =\n+\t\trte_zmalloc_socket(\"rxq->sw_sc_ring\",\n+\t\t\t\t  sizeof(struct txgbe_scattered_rx_entry) * len,\n+\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!rxq->sw_sc_ring) {\n+\t\ttxgbe_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p sw_sc_ring=%p hw_ring=%p \"\n+\t\t\t    \"dma_addr=0x%\" PRIx64,\n+\t\t     rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,\n+\t\t     rxq->rx_ring_phys_addr);\n+\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\ttxgbe_reset_rx_queue(adapter, rxq);\n+\n+\treturn 0;\n+}\n+\n void __rte_cold\n txgbe_set_rx_function(struct rte_eth_dev *dev)\n {\ndiff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h\nindex 7d3d9c275..be165dd19 100644\n--- a/drivers/net/txgbe/txgbe_rxtx.h\n+++ b/drivers/net/txgbe/txgbe_rxtx.h\n@@ -5,38 +5,168 @@\n #ifndef _TXGBE_RXTX_H_\n #define _TXGBE_RXTX_H_\n \n+/*****************************************************************************\n+ * Receive Descriptor\n+ *****************************************************************************/\n+struct txgbe_rx_desc {\n+\tstruct {\n+\t\tunion {\n+\t\t\t__le32 dw0;\n+\t\t\tstruct {\n+\t\t\t\t__le16 pkt;\n+\t\t\t\t__le16 hdr;\n+\t\t\t} lo;\n+\t\t};\n+\t\tunion {\n+\t\t\t__le32 dw1;\n+\t\t\tstruct {\n+\t\t\t\t__le16 ipid;\n+\t\t\t\t__le16 csum;\n+\t\t\t} hi;\n+\t\t};\n+\t} qw0; /* also as r.pkt_addr */\n+\tstruct {\n+\t\tunion {\n+\t\t\t__le32 dw2;\n+\t\t\tstruct {\n+\t\t\t\t__le32 status;\n+\t\t\t} lo;\n+\t\t};\n+\t\tunion {\n+\t\t\t__le32 dw3;\n+\t\t\tstruct {\n+\t\t\t\t__le16 len;\n+\t\t\t\t__le16 tag;\n+\t\t\t} hi;\n+\t\t};\n+\t} qw1; /* also as r.hdr_addr */\n+};\n+\n+/**\n+ * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)\n+ **/\n+struct txgbe_tx_desc {\n+\t__le64 qw0; /* r.buffer_addr ,  w.reserved    */\n+\t__le32 dw2; /* r.cmd_type_len,  w.nxtseq_seed */\n+\t__le32 dw3; /* r.olinfo_status, w.status      */\n+};\n+\n #define RTE_PMD_TXGBE_TX_MAX_BURST 32\n #define RTE_PMD_TXGBE_RX_MAX_BURST 32\n \n+#define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \\\n+\t\t    sizeof(struct txgbe_rx_desc))\n+\n+#define TXGBE_PTID_MASK                 0xFF\n+\n #define TXGBE_TX_MAX_SEG                    40\n \n+/**\n+ * Structure associated with each descriptor of the RX ring of a RX queue.\n+ */\n+struct txgbe_rx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */\n+};\n+\n+struct txgbe_scattered_rx_entry {\n+\tstruct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */\n+};\n+\n+/**\n+ * Structure associated with each descriptor of the TX ring of a TX queue.\n+ */\n+struct txgbe_tx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n+\tuint16_t next_id; /**< Index of next descriptor in ring. */\n+\tuint16_t last_id; /**< Index of last scattered descriptor. */\n+};\n+\n+/**\n+ * Structure associated with each descriptor of the TX ring of a TX queue.\n+ */\n+struct txgbe_tx_entry_v {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n+};\n+\n /**\n  * Structure associated with each RX queue.\n  */\n struct txgbe_rx_queue {\n \tstruct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */\n+\tvolatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */\n \tuint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */\n+\tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n+\tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n+\tstruct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */\n+\t/**< address of scattered Rx software ring. */\n+\tstruct txgbe_scattered_rx_entry *sw_sc_ring;\n+\tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n+\tstruct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */\n \tuint16_t            nb_rx_desc; /**< number of RX descriptors. */\n+\tuint16_t            rx_tail;  /**< current value of RDT register. */\n+\tuint16_t            nb_rx_hold; /**< number of held free RX desc. */\n+\tuint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */\n+\tuint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */\n+\tuint16_t rx_free_trigger; /**< triggers rx buffer allocation */\n+\tuint16_t            rx_free_thresh; /**< max free RX desc to hold. */\n+\tuint16_t            queue_id; /**< RX queue index. */\n \tuint16_t            reg_idx;  /**< RX queue register index. */\n+\t/**< Packet type mask for different NICs. */\n+\tuint16_t            pkt_type_mask;\n+\tuint16_t            port_id;  /**< Device port identifier. */\n \tuint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */\n \tuint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */\n+\tuint8_t             rx_deferred_start; /**< not in global dev start. */\n \tuint64_t\t    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */\n+\t/** need to alloc dummy mbuf, for wraparound when scanning hw ring */\n+\tstruct rte_mbuf fake_mbuf;\n+\t/** hold packets to return to application */\n+\tstruct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2];\n };\n \n /**\n  * Structure associated with each TX queue.\n  */\n struct txgbe_tx_queue {\n+\t/** TX ring virtual address. */\n+\tvolatile struct txgbe_tx_desc *tx_ring;\n \tuint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */\n+\tunion {\n+\t\t/**< address of SW ring for scalar PMD. */\n+\t\tstruct txgbe_tx_entry *sw_ring;\n+\t\t/**< address of SW ring for vector PMD */\n+\t\tstruct txgbe_tx_entry_v *sw_ring_v;\n+\t};\n+\tvolatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */\n+\tvolatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */\n \tuint16_t            nb_tx_desc;    /**< number of TX descriptors. */\n \t/**< Start freeing TX buffers if there are less free descriptors than\n \t *   this value.\n \t */\n \tuint16_t            tx_free_thresh;\n+\tuint16_t            queue_id;      /**< TX queue index. */\n \tuint16_t            reg_idx;       /**< TX queue register index. */\n+\tuint16_t            port_id;       /**< Device port identifier. */\n+\tuint8_t             pthresh;       /**< Prefetch threshold register. */\n+\tuint8_t             hthresh;       /**< Host threshold register. */\n+\tuint8_t             wthresh;       /**< Write-back threshold reg. */\n \tuint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */\n+\tconst struct txgbe_txq_ops *ops;       /**< txq ops */\n+\tuint8_t             tx_deferred_start; /**< not in global dev start. */\n+};\n+\n+struct txgbe_txq_ops {\n+\tvoid (*release_mbufs)(struct txgbe_tx_queue *txq);\n+\tvoid (*free_swring)(struct txgbe_tx_queue *txq);\n+\tvoid (*reset)(struct txgbe_tx_queue *txq);\n };\n \n+/* Takes an ethdev and a queue and sets up the tx function to be used based on\n+ * the queue parameters. Used in tx_queue_setup by primary process and then\n+ * in dev_init by secondary process when attaching to an existing ethdev.\n+ */\n+void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);\n+\n void txgbe_set_rx_function(struct rte_eth_dev *dev);\n \n uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);\n",
    "prefixes": [
        "v4",
        "21/58"
    ]
}