get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94339/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94339,
    "url": "http://patches.dpdk.org/api/patches/94339/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-16-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210617110005.4132926-16-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210617110005.4132926-16-jiawenwu@trustnetic.com",
    "date": "2021-06-17T11:00:01",
    "name": "[v6,15/19] net/ngbe: add simple Tx flow",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "cdc9e130f083d4bc404ef18fc98ca43f9846c61a",
    "submitter": {
        "id": 1932,
        "url": "http://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-16-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 17372,
            "url": "http://patches.dpdk.org/api/series/17372/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=17372",
            "date": "2021-06-17T10:59:46",
            "name": "net: ngbe PMD",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/17372/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/94339/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/94339/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5D774A0C4D;\n\tThu, 17 Jun 2021 12:59:53 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A57AE41164;\n\tThu, 17 Jun 2021 12:58:37 +0200 (CEST)",
            "from smtpbgeu2.qq.com (smtpbgeu2.qq.com [18.194.254.142])\n by mails.dpdk.org (Postfix) with ESMTP id EBFCD410F4\n for <dev@dpdk.org>; Thu, 17 Jun 2021 12:58:35 +0200 (CEST)",
            "from wxdbg.localdomain.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Thu, 17 Jun 2021 18:58:32 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp46t1623927512tw1v30re",
        "X-QQ-SSF": "01400000000000D0E000B00A0000000",
        "X-QQ-FEAT": "d+oHhNLBBfvnDWbadDsqKF7DIqXzeFo5RqX43KSvEM2Fgq5D+QwaD4jacT/nN\n bVtj3Vlr5tnZEemzUUtRM7JKiOnxsGpJcN4nGEk5sF0zlpg2ZIuI5I/d4X8hc3n4XlQZ45s\n gx30i8gOXuicWaEkeIC5wC/Nk43d1JsQ+esz+AoFHY9Z6b/ojUVhkYlPuDfXkl5GnDvrKr8\n +lcQenYp/6aG7eHaaPIQ+NHzRgv/D83uXfvaca2wIHmg1Fu5TND3EfcatpBJumFKFOTx0Bd\n CKtV+7io63/rnob+Wn6w20tgGN1Fvp+PyHiI8xDkeOc0/Ab8fRaE+KP6ewdSqpvlrwyk9BA\n Pk2Z1jK/zrsj1QRvjIFyCMgWMMyTg==",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Thu, 17 Jun 2021 19:00:01 +0800",
        "Message-Id": "<20210617110005.4132926-16-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "References": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign5",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v6 15/19] net/ngbe: add simple Tx flow",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Initialize device with the simplest transmit functions.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/ngbe/ngbe_ethdev.c |   1 +\n drivers/net/ngbe/ngbe_ethdev.h |   3 +\n drivers/net/ngbe/ngbe_rxtx.c   | 228 +++++++++++++++++++++++++++++++++\n drivers/net/ngbe/ngbe_rxtx.h   |  27 ++++\n 4 files changed, 259 insertions(+)",
    "diff": "diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex 269186acc0..6b4d5ac65b 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -111,6 +111,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n \n \teth_dev->dev_ops = &ngbe_eth_dev_ops;\n \teth_dev->rx_pkt_burst = &ngbe_recv_pkts;\n+\teth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;\n \n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn 0;\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex 8fb7c8a19b..c52cac2ca1 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -75,6 +75,9 @@ int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \n+uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n int\n ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n \t\tint wait_to_complete);\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nindex f97fceaf7c..6dde996659 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.c\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -20,6 +20,234 @@\n  */\n #define rte_ngbe_prefetch(p)   rte_prefetch0(p)\n \n+/*********************************************************************\n+ *\n+ *  Tx functions\n+ *\n+ **********************************************************************/\n+\n+/*\n+ * Check for descriptors with their DD bit set and free mbufs.\n+ * Return the total number of buffers freed.\n+ */\n+static __rte_always_inline int\n+ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)\n+{\n+\tstruct ngbe_tx_entry *txep;\n+\tuint32_t status;\n+\tint i, nb_free = 0;\n+\tstruct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];\n+\n+\t/* check DD bit on threshold descriptor */\n+\tstatus = txq->tx_ring[txq->tx_next_dd].dw3;\n+\tif (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {\n+\t\tif (txq->nb_tx_free >> 1 < txq->tx_free_thresh)\n+\t\t\tngbe_set32_masked(txq->tdc_reg_addr,\n+\t\t\t\tNGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);\n+\t\treturn 0;\n+\t}\n+\n+\t/*\n+\t * first buffer to free from S/W ring is at index\n+\t * tx_next_dd - (tx_free_thresh-1)\n+\t */\n+\ttxep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];\n+\tfor (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {\n+\t\t/* free buffers one at a time */\n+\t\tm = rte_pktmbuf_prefree_seg(txep->mbuf);\n+\t\ttxep->mbuf = NULL;\n+\n+\t\tif (unlikely(m == NULL))\n+\t\t\tcontinue;\n+\n+\t\tif (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||\n+\t\t    (nb_free > 0 && m->pool != free[0]->pool)) {\n+\t\t\trte_mempool_put_bulk(free[0]->pool,\n+\t\t\t\t\t     (void **)free, nb_free);\n+\t\t\tnb_free = 0;\n+\t\t}\n+\n+\t\tfree[nb_free++] = m;\n+\t}\n+\n+\tif (nb_free > 0)\n+\t\trte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);\n+\n+\t/* buffers were freed, update counters */\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);\n+\ttxq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);\n+\tif (txq->tx_next_dd >= txq->nb_tx_desc)\n+\t\ttxq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);\n+\n+\treturn txq->tx_free_thresh;\n+}\n+\n+/* Populate 4 descriptors with data from 4 mbufs */\n+static inline void\n+tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)\n+{\n+\tuint64_t buf_dma_addr;\n+\tuint32_t pkt_len;\n+\tint i;\n+\n+\tfor (i = 0; i < 4; ++i, ++txdp, ++pkts) {\n+\t\tbuf_dma_addr = rte_mbuf_data_iova(*pkts);\n+\t\tpkt_len = (*pkts)->data_len;\n+\n+\t\t/* write data to descriptor */\n+\t\ttxdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);\n+\t\ttxdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |\n+\t\t\t\t\tNGBE_TXD_DATLEN(pkt_len));\n+\t\ttxdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));\n+\n+\t\trte_prefetch0(&(*pkts)->pool);\n+\t}\n+}\n+\n+/* Populate 1 descriptor with data from 1 mbuf */\n+static inline void\n+tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)\n+{\n+\tuint64_t buf_dma_addr;\n+\tuint32_t pkt_len;\n+\n+\tbuf_dma_addr = rte_mbuf_data_iova(*pkts);\n+\tpkt_len = (*pkts)->data_len;\n+\n+\t/* write data to descriptor */\n+\ttxdp->qw0 = cpu_to_le64(buf_dma_addr);\n+\ttxdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |\n+\t\t\t\tNGBE_TXD_DATLEN(pkt_len));\n+\ttxdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));\n+\n+\trte_prefetch0(&(*pkts)->pool);\n+}\n+\n+/*\n+ * Fill H/W descriptor ring with mbuf data.\n+ * Copy mbuf pointers to the S/W ring.\n+ */\n+static inline void\n+ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,\n+\t\t      uint16_t nb_pkts)\n+{\n+\tvolatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];\n+\tstruct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];\n+\tconst int N_PER_LOOP = 4;\n+\tconst int N_PER_LOOP_MASK = N_PER_LOOP - 1;\n+\tint mainpart, leftover;\n+\tint i, j;\n+\n+\t/*\n+\t * Process most of the packets in chunks of N pkts.  Any\n+\t * leftover packets will get processed one at a time.\n+\t */\n+\tmainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));\n+\tleftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));\n+\tfor (i = 0; i < mainpart; i += N_PER_LOOP) {\n+\t\t/* Copy N mbuf pointers to the S/W ring */\n+\t\tfor (j = 0; j < N_PER_LOOP; ++j)\n+\t\t\t(txep + i + j)->mbuf = *(pkts + i + j);\n+\t\ttx4(txdp + i, pkts + i);\n+\t}\n+\n+\tif (unlikely(leftover > 0)) {\n+\t\tfor (i = 0; i < leftover; ++i) {\n+\t\t\t(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);\n+\t\t\ttx1(txdp + mainpart + i, pkts + mainpart + i);\n+\t\t}\n+\t}\n+}\n+\n+static inline uint16_t\n+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t     uint16_t nb_pkts)\n+{\n+\tstruct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;\n+\tuint16_t n = 0;\n+\n+\t/*\n+\t * Begin scanning the H/W ring for done descriptors when the\n+\t * number of available descriptors drops below tx_free_thresh.\n+\t * For each done descriptor, free the associated buffer.\n+\t */\n+\tif (txq->nb_tx_free < txq->tx_free_thresh)\n+\t\tngbe_tx_free_bufs(txq);\n+\n+\t/* Only use descriptors that are available */\n+\tnb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);\n+\tif (unlikely(nb_pkts == 0))\n+\t\treturn 0;\n+\n+\t/* Use exactly nb_pkts descriptors */\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);\n+\n+\t/*\n+\t * At this point, we know there are enough descriptors in the\n+\t * ring to transmit all the packets.  This assumes that each\n+\t * mbuf contains a single segment, and that no new offloads\n+\t * are expected, which would require a new context descriptor.\n+\t */\n+\n+\t/*\n+\t * See if we're going to wrap-around. If so, handle the top\n+\t * of the descriptor ring first, then do the bottom.  If not,\n+\t * the processing looks just like the \"bottom\" part anyway...\n+\t */\n+\tif ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {\n+\t\tn = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);\n+\t\tngbe_tx_fill_hw_ring(txq, tx_pkts, n);\n+\t\ttxq->tx_tail = 0;\n+\t}\n+\n+\t/* Fill H/W descriptor ring with mbuf data */\n+\tngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));\n+\ttxq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));\n+\n+\t/*\n+\t * Check for wrap-around. This would only happen if we used\n+\t * up to the last descriptor in the ring, no more, no less.\n+\t */\n+\tif (txq->tx_tail >= txq->nb_tx_desc)\n+\t\ttxq->tx_tail = 0;\n+\n+\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n+\t\t   (uint16_t)txq->port_id, (uint16_t)txq->queue_id,\n+\t\t   (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);\n+\n+\t/* update tail pointer */\n+\trte_wmb();\n+\tngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);\n+\n+\treturn nb_pkts;\n+}\n+\n+uint16_t\n+ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tuint16_t nb_tx;\n+\n+\t/* Try to transmit at least chunks of TX_MAX_BURST pkts */\n+\tif (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))\n+\t\treturn tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);\n+\n+\t/* transmit more than the max burst, in chunks of TX_MAX_BURST */\n+\tnb_tx = 0;\n+\twhile (nb_pkts) {\n+\t\tuint16_t ret, n;\n+\n+\t\tn = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);\n+\t\tret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);\n+\t\tnb_tx = (uint16_t)(nb_tx + ret);\n+\t\tnb_pkts = (uint16_t)(nb_pkts - ret);\n+\t\tif (ret < n)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n /*********************************************************************\n  *\n  *  Rx functions\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h\nindex 1c8fd76f12..616b41a300 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.h\n+++ b/drivers/net/ngbe/ngbe_rxtx.h\n@@ -147,7 +147,34 @@ struct ngbe_tx_desc {\n \trte_le32_t dw3; /* r.olinfo_status, w.status      */\n };\n \n+/* @ngbe_tx_desc.dw2 */\n+#define NGBE_TXD_DATLEN(v)        ((0xFFFF & (v))) /* data buffer length */\n+#define NGBE_TXD_1588             ((0x1) << 19) /* IEEE1588 time stamp */\n+#define NGBE_TXD_DATA             ((0x0) << 20) /* data descriptor */\n+#define NGBE_TXD_EOP              ((0x1) << 24) /* End of Packet */\n+#define NGBE_TXD_FCS              ((0x1) << 25) /* Insert FCS */\n+#define NGBE_TXD_LINKSEC          ((0x1) << 26) /* Insert LinkSec */\n+#define NGBE_TXD_ECU              ((0x1) << 28) /* forward to ECU */\n+#define NGBE_TXD_CNTAG            ((0x1) << 29) /* insert CN tag */\n+#define NGBE_TXD_VLE              ((0x1) << 30) /* insert VLAN tag */\n+#define NGBE_TXD_TSE              ((0x1) << 31) /* transmit segmentation */\n+\n+#define NGBE_TXD_FLAGS (NGBE_TXD_FCS | NGBE_TXD_EOP)\n+\n+/* @ngbe_tx_desc.dw3 */\n+#define NGBE_TXD_DD_UNUSED        NGBE_TXD_DD\n+#define NGBE_TXD_IDX_UNUSED(v)    NGBE_TXD_IDX(v)\n+#define NGBE_TXD_CC               ((0x1) << 7) /* check context */\n+#define NGBE_TXD_IPSEC            ((0x1) << 8) /* request ipsec offload */\n+#define NGBE_TXD_L4CS             ((0x1) << 9) /* insert TCP/UDP/SCTP csum */\n+#define NGBE_TXD_IPCS             ((0x1) << 10) /* insert IPv4 csum */\n+#define NGBE_TXD_EIPCS            ((0x1) << 11) /* insert outer IP csum */\n+#define NGBE_TXD_MNGFLT           ((0x1) << 12) /* enable management filter */\n+#define NGBE_TXD_PAYLEN(v)        ((0x7FFFF & (v)) << 13) /* payload length */\n+\n+#define RTE_PMD_NGBE_TX_MAX_BURST 32\n #define RTE_PMD_NGBE_RX_MAX_BURST 32\n+#define RTE_NGBE_TX_MAX_FREE_BUF_SZ 64\n \n #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \\\n \t\t    sizeof(struct ngbe_rx_desc))\n",
    "prefixes": [
        "v6",
        "15/19"
    ]
}