get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/125635/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 125635,
    "url": "http://patches.dpdk.org/api/patches/125635/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230330062939.1206267-4-feifei.wang2@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230330062939.1206267-4-feifei.wang2@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230330062939.1206267-4-feifei.wang2@arm.com",
    "date": "2023-03-30T06:29:39",
    "name": "[v5,3/3] net/ixgbe: implement recycle buffer mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8e58860d7109e482db6ba824a3cca2965eaf5c70",
    "submitter": {
        "id": 1771,
        "url": "http://patches.dpdk.org/api/people/1771/?format=api",
        "name": "Feifei Wang",
        "email": "feifei.wang2@arm.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230330062939.1206267-4-feifei.wang2@arm.com/mbox/",
    "series": [
        {
            "id": 27580,
            "url": "http://patches.dpdk.org/api/series/27580/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=27580",
            "date": "2023-03-30T06:29:36",
            "name": "Recycle buffers from Tx to Rx",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/27580/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/125635/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/125635/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B3F9B4286D;\n\tThu, 30 Mar 2023 08:30:08 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D660D42D13;\n\tThu, 30 Mar 2023 08:30:01 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by mails.dpdk.org (Postfix) with ESMTP id A1ADE42D0C\n for <dev@dpdk.org>; Thu, 30 Mar 2023 08:29:59 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 7D21C2F4;\n Wed, 29 Mar 2023 23:30:43 -0700 (PDT)",
            "from net-x86-dell-8268.shanghai.arm.com\n (net-x86-dell-8268.shanghai.arm.com [10.169.210.116])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 384C73F663;\n Wed, 29 Mar 2023 23:29:55 -0700 (PDT)"
        ],
        "From": "Feifei Wang <feifei.wang2@arm.com>",
        "To": "Qiming Yang <qiming.yang@intel.com>,\n\tWenjun Wu <wenjun1.wu@intel.com>",
        "Cc": "dev@dpdk.org, konstantin.v.ananyev@yandex.ru, mb@smartsharesystems.com,\n nd@arm.com, Feifei Wang <feifei.wang2@arm.com>,\n Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,\n Ruifeng Wang <ruifeng.wang@arm.com>",
        "Subject": "[PATCH v5 3/3] net/ixgbe: implement recycle buffer mode",
        "Date": "Thu, 30 Mar 2023 14:29:39 +0800",
        "Message-Id": "<20230330062939.1206267-4-feifei.wang2@arm.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230330062939.1206267-1-feifei.wang2@arm.com>",
        "References": "<20211224164613.32569-1-feifei.wang2@arm.com>\n <20230330062939.1206267-1-feifei.wang2@arm.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Define specific function implementation for ixgbe driver.\nCurrently, recycle buffer mode can support 128bit\nvector path. And can be enabled both in fast free and\nno fast free mode.\n\nSuggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nSigned-off-by: Feifei Wang <feifei.wang2@arm.com>\nReviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\n---\n drivers/net/ixgbe/ixgbe_ethdev.c |   1 +\n drivers/net/ixgbe/ixgbe_ethdev.h |   3 +\n drivers/net/ixgbe/ixgbe_rxtx.c   | 153 +++++++++++++++++++++++++++++++\n drivers/net/ixgbe/ixgbe_rxtx.h   |   4 +\n 4 files changed, 161 insertions(+)",
    "diff": "diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c\nindex 88118bc305..3bada9abbd 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.c\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.c\n@@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {\n \t.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,\n \t.rxq_info_get         = ixgbe_rxq_info_get,\n \t.txq_info_get         = ixgbe_txq_info_get,\n+\t.rxq_buf_recycle_info_get = ixgbe_rxq_buf_recycle_info_get,\n \t.timesync_enable      = ixgbe_timesync_enable,\n \t.timesync_disable     = ixgbe_timesync_disable,\n \t.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex 48290af512..ca6aa0da64 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -625,6 +625,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \tstruct rte_eth_txq_info *qinfo);\n \n+void ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\tstruct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);\n+\n int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);\n \n void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex c9d6ca9efe..ee27121315 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -953,6 +953,133 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \treturn nb_tx;\n }\n \n+uint16_t\n+ixgbe_tx_buf_stash_vec(void *tx_queue,\n+\t\tstruct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)\n+{\n+\tstruct ixgbe_tx_queue *txq = tx_queue;\n+\tstruct ixgbe_tx_entry *txep;\n+\tstruct rte_mbuf **rxep;\n+\tstruct rte_mbuf *m[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];\n+\tint i, j, n;\n+\tuint32_t status;\n+\tuint16_t avail = 0;\n+\tuint16_t buf_ring_size = rxq_buf_recycle_info->buf_ring_size;\n+\tuint16_t mask = rxq_buf_recycle_info->buf_ring_size - 1;\n+\tuint16_t refill_request = rxq_buf_recycle_info->refill_request;\n+\tuint16_t refill_head = *rxq_buf_recycle_info->refill_head;\n+\tuint16_t receive_tail = *rxq_buf_recycle_info->receive_tail;\n+\n+\t/* Get available recycling Rx buffers. */\n+\tavail = (buf_ring_size - (refill_head - receive_tail)) & mask;\n+\n+\t/* Check Tx free thresh and Rx available space. */\n+\tif (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)\n+\t\treturn 0;\n+\n+\t/* check DD bits on threshold descriptor */\n+\tstatus = txq->tx_ring[txq->tx_next_dd].wb.status;\n+\tif (!(status & IXGBE_ADVTXD_STAT_DD))\n+\t\treturn 0;\n+\n+\tn = txq->tx_rs_thresh;\n+\n+\t/* Buffer recycle can only support no ring buffer wraparound.\n+\t * Two case for this:\n+\t *\n+\t * case 1: The refill head of Rx buffer ring needs to be aligned with\n+\t * buffer ring size. In this case, the number of Tx freeing buffers\n+\t * should be equal to refill_request.\n+\t *\n+\t * case 2: The refill head of Rx ring buffer does not need to be aligned\n+\t * with buffer ring size. In this case, the update of refill head can not\n+\t * exceed the Rx buffer ring size.\n+\t */\n+\tif (refill_request != n ||\n+\t\t(!refill_request && (refill_head + n > buf_ring_size)))\n+\t\treturn 0;\n+\n+\t/* First buffer to free from S/W ring is at index\n+\t * tx_next_dd - (tx_rs_thresh-1).\n+\t */\n+\ttxep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];\n+\trxep = rxq_buf_recycle_info->buf_ring;\n+\trxep += refill_head;\n+\n+\tif (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {\n+\t\t/* Directly put mbufs from Tx to Rx. */\n+\t\tfor (i = 0; i < n; i++, rxep++, txep++)\n+\t\t\t*rxep = txep[0].mbuf;\n+\t} else {\n+\t\tfor (i = 0, j = 0; i < n; i++) {\n+\t\t\t/* Avoid txq contains buffers from expected mempoo. */\n+\t\t\tif (unlikely(rxq_buf_recycle_info->mp\n+\t\t\t\t\t\t!= txep[i].mbuf->pool))\n+\t\t\t\treturn 0;\n+\n+\t\t\tm[j] = rte_pktmbuf_prefree_seg(txep[i].mbuf);\n+\n+\t\t\t/* In case 1, each of Tx buffers should be the\n+\t\t\t * last reference.\n+\t\t\t */\n+\t\t\tif (unlikely(m[j] == NULL && refill_request))\n+\t\t\t\treturn 0;\n+\t\t\t/* In case 2, the number of valid Tx free\n+\t\t\t * buffers should be recorded.\n+\t\t\t */\n+\t\t\tj++;\n+\t\t}\n+\t\trte_memcpy(rxep, m, sizeof(void *) * j);\n+\t}\n+\n+\t/* Update counters for Tx. */\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);\n+\ttxq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);\n+\tif (txq->tx_next_dd >= txq->nb_tx_desc)\n+\t\ttxq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);\n+\n+\treturn n;\n+}\n+\n+uint16_t\n+ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb)\n+{\n+\tstruct ixgbe_rx_queue *rxq = rx_queue;\n+\tstruct ixgbe_rx_entry *rxep;\n+\tvolatile union ixgbe_adv_rx_desc *rxdp;\n+\tuint16_t rx_id;\n+\tuint64_t paddr;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\trxdp = rxq->rx_ring + rxq->rxrearm_start;\n+\trxep = &rxq->sw_ring[rxq->rxrearm_start];\n+\n+\tfor (i = 0; i < nb; i++) {\n+\t\t/* Initialize rxdp descs. */\n+\t\tpaddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;\n+\t\tdma_addr = rte_cpu_to_le_64(paddr);\n+\t\t/* flush desc with pa dma_addr */\n+\t\trxdp[i].read.hdr_addr = 0;\n+\t\trxdp[i].read.pkt_addr = dma_addr;\n+\t}\n+\n+\t/* Update the descriptor initializer index */\n+\trxq->rxrearm_start += nb;\n+\tif (rxq->rxrearm_start >= rxq->nb_rx_desc)\n+\t\trxq->rxrearm_start = 0;\n+\n+\trxq->rxrearm_nb -= nb;\n+\n+\trx_id = (uint16_t)((rxq->rxrearm_start == 0) ?\n+\t\t\t(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));\n+\n+\t/* Update the tail pointer on the NIC */\n+\tIXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+\n+\treturn nb;\n+}\n+\n /*********************************************************************\n  *\n  *  TX prep functions\n@@ -2558,6 +2685,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)\n \t\t\t\t(rte_eal_process_type() != RTE_PROC_PRIMARY ||\n \t\t\t\t\tixgbe_txq_vec_setup(txq) == 0)) {\n \t\t\tPMD_INIT_LOG(DEBUG, \"Vector tx enabled.\");\n+\t\t\tdev->tx_buf_stash = ixgbe_tx_buf_stash_vec;\n \t\t\tdev->tx_pkt_burst = ixgbe_xmit_pkts_vec;\n \t\t} else\n \t\tdev->tx_pkt_burst = ixgbe_xmit_pkts_simple;\n@@ -4823,6 +4951,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)\n \t\t\t\t\t    \"callback (port=%d).\",\n \t\t\t\t     dev->data->port_id);\n \n+\t\t\tdev->rx_descriptors_refill = ixgbe_rx_descriptors_refill_vec;\n \t\t\tdev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;\n \t\t} else if (adapter->rx_bulk_alloc_allowed) {\n \t\t\tPMD_INIT_LOG(DEBUG, \"Using a Scattered with bulk \"\n@@ -4852,6 +4981,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)\n \t\t\t     RTE_IXGBE_DESCS_PER_LOOP,\n \t\t\t     dev->data->port_id);\n \n+\t\tdev->rx_descriptors_refill = ixgbe_rx_descriptors_refill_vec;\n \t\tdev->rx_pkt_burst = ixgbe_recv_pkts_vec;\n \t} else if (adapter->rx_bulk_alloc_allowed) {\n \t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions are \"\n@@ -5623,6 +5753,29 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n \tqinfo->conf.tx_deferred_start = txq->tx_deferred_start;\n }\n \n+void\n+ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)\n+{\n+\tstruct ixgbe_rx_queue *rxq;\n+\tstruct ixgbe_adapter *adapter = dev->data->dev_private;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\trxq_buf_recycle_info->buf_ring = (void *)rxq->sw_ring;\n+\trxq_buf_recycle_info->mp = rxq->mb_pool;\n+\trxq_buf_recycle_info->buf_ring_size = rxq->nb_rx_desc;\n+\trxq_buf_recycle_info->receive_tail = &rxq->rx_tail;\n+\n+\tif (adapter->rx_vec_allowed) {\n+\t\trxq_buf_recycle_info->refill_request = RTE_IXGBE_RXQ_REARM_THRESH;\n+\t\trxq_buf_recycle_info->refill_head = &rxq->rxrearm_start;\n+\t} else {\n+\t\trxq_buf_recycle_info->refill_request = rxq->rx_free_thresh;\n+\t\trxq_buf_recycle_info->refill_head = &rxq->rx_free_trigger;\n+\t}\n+}\n+\n /*\n  * [VF] Initializes Receive Unit.\n  */\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h\nindex 668a5b9814..18f890f91a 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.h\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.h\n@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);\n extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];\n extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];\n \n+uint16_t ixgbe_tx_buf_stash_vec(void *tx_queue,\n+\t\tstruct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);\n+uint16_t ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb);\n+\n uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t    uint16_t nb_pkts);\n int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);\n",
    "prefixes": [
        "v5",
        "3/3"
    ]
}