get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94340/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94340,
    "url": "https://patches.dpdk.org/api/patches/94340/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-15-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210617110005.4132926-15-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210617110005.4132926-15-jiawenwu@trustnetic.com",
    "date": "2021-06-17T11:00:00",
    "name": "[v6,14/19] net/ngbe: add simple Rx flow",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "c73dcb9158bf56a27169753203525b61bab1d842",
    "submitter": {
        "id": 1932,
        "url": "https://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 3961,
        "url": "https://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210617110005.4132926-15-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 17372,
            "url": "https://patches.dpdk.org/api/series/17372/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17372",
            "date": "2021-06-17T10:59:46",
            "name": "net: ngbe PMD",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/17372/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94340/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/94340/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B902EA0C4D;\n\tThu, 17 Jun 2021 12:59:58 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C4C9641168;\n\tThu, 17 Jun 2021 12:58:38 +0200 (CEST)",
            "from smtpbgau1.qq.com (smtpbgau1.qq.com [54.206.16.166])\n by mails.dpdk.org (Postfix) with ESMTP id 1EB0D4115C\n for <dev@dpdk.org>; Thu, 17 Jun 2021 12:58:35 +0200 (CEST)",
            "from wxdbg.localdomain.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Thu, 17 Jun 2021 18:58:30 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp46t1623927510tvv2a0ul",
        "X-QQ-SSF": "01400000000000D0E000B00A0000000",
        "X-QQ-FEAT": "VvqLbp3JaDNJd4EkO4vlIycaRciYOm5hLoBGGuwilU3VJwI7x8DnNCMTLqc4a\n aaT4gpTKdaEWp7+ZKfaWgccKZrmaEDnqczSF9aYcWLWjxO5xa4jNAvG7LwUuLAfPfS8o0O9\n nzYgxiD9lyQl3nvDGUVl7iDHam8NvpjZNbDagotGc8tbsp2O8tRWbYmTZiHtP/UYr+8XhcV\n MKPvpUtefAmf3/RwKZ9XALBZOekf7GtkIBMwhmLqPueoeD2aogV3bTeU0lw2+7+UMXZ0WaG\n E1PwOyb2oG7XFKPt5BQr2xwxCxbeRxTSVhcXHaf/QbNPRaZic/Keyvr2gdF4bPz+FY5IAZm\n mfV7qHexJfjuvVrNveFRJlZTGTMVidkYcImhK04",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Thu, 17 Jun 2021 19:00:00 +0800",
        "Message-Id": "<20210617110005.4132926-15-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "References": "<20210617110005.4132926-1-jiawenwu@trustnetic.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign1",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v6 14/19] net/ngbe: add simple Rx flow",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Initialize device with the simplest receive function.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/ngbe/ngbe_ethdev.c |   1 +\n drivers/net/ngbe/ngbe_ethdev.h |   3 +\n drivers/net/ngbe/ngbe_rxtx.c   | 168 +++++++++++++++++++++++++++++++++\n drivers/net/ngbe/ngbe_rxtx.h   |  81 ++++++++++++++++\n 4 files changed, 253 insertions(+)",
    "diff": "diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex d6f93cfe46..269186acc0 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -110,6 +110,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)\n \tPMD_INIT_FUNC_TRACE();\n \n \teth_dev->dev_ops = &ngbe_eth_dev_ops;\n+\teth_dev->rx_pkt_burst = &ngbe_recv_pkts;\n \n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn 0;\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex 131671c313..8fb7c8a19b 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -72,6 +72,9 @@ int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\tuint16_t nb_tx_desc, unsigned int socket_id,\n \t\tconst struct rte_eth_txconf *tx_conf);\n \n+uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n int\n ngbe_dev_link_update_share(struct rte_eth_dev *dev,\n \t\tint wait_to_complete);\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nindex da9150b2f1..f97fceaf7c 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.c\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -15,6 +15,174 @@\n #include \"ngbe_ethdev.h\"\n #include \"ngbe_rxtx.h\"\n \n+/*\n+ * Prefetch a cache line into all cache levels.\n+ */\n+#define rte_ngbe_prefetch(p)   rte_prefetch0(p)\n+\n+/*********************************************************************\n+ *\n+ *  Rx functions\n+ *\n+ **********************************************************************/\n+uint16_t\n+ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tstruct ngbe_rx_queue *rxq;\n+\tvolatile struct ngbe_rx_desc *rx_ring;\n+\tvolatile struct ngbe_rx_desc *rxdp;\n+\tstruct ngbe_rx_entry *sw_ring;\n+\tstruct ngbe_rx_entry *rxe;\n+\tstruct rte_mbuf *rxm;\n+\tstruct rte_mbuf *nmb;\n+\tstruct ngbe_rx_desc rxd;\n+\tuint64_t dma_addr;\n+\tuint32_t staterr;\n+\tuint16_t pkt_len;\n+\tuint16_t rx_id;\n+\tuint16_t nb_rx;\n+\tuint16_t nb_hold;\n+\n+\tnb_rx = 0;\n+\tnb_hold = 0;\n+\trxq = rx_queue;\n+\trx_id = rxq->rx_tail;\n+\trx_ring = rxq->rx_ring;\n+\tsw_ring = rxq->sw_ring;\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];\n+\twhile (nb_rx < nb_pkts) {\n+\t\t/*\n+\t\t * The order of operations here is important as the DD status\n+\t\t * bit must not be read after any other descriptor fields.\n+\t\t * rx_ring and rxdp are pointing to volatile data so the order\n+\t\t * of accesses cannot be reordered by the compiler. If they were\n+\t\t * not volatile, they could be reordered which could lead to\n+\t\t * using invalid descriptor fields when read from rxd.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rxdp->qw1.lo.status;\n+\t\tif (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))\n+\t\t\tbreak;\n+\t\trxd = *rxdp;\n+\n+\t\t/*\n+\t\t * End of packet.\n+\t\t *\n+\t\t * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet\n+\t\t * is likely to be invalid and to be dropped by the various\n+\t\t * validation checks performed by the network stack.\n+\t\t *\n+\t\t * Allocate a new mbuf to replenish the RX ring descriptor.\n+\t\t * If the allocation fails:\n+\t\t *    - arrange for that Rx descriptor to be the first one\n+\t\t *      being parsed the next time the receive function is\n+\t\t *      invoked [on the same queue].\n+\t\t *\n+\t\t *    - Stop parsing the Rx ring and return immediately.\n+\t\t *\n+\t\t * This policy do not drop the packet received in the Rx\n+\t\t * descriptor for which the allocation of a new mbuf failed.\n+\t\t * Thus, it allows that packet to be later retrieved if\n+\t\t * mbuf have been freed in the mean time.\n+\t\t * As a side effect, holding Rx descriptors instead of\n+\t\t * systematically giving them back to the NIC may lead to\n+\t\t * Rx ring exhaustion situations.\n+\t\t * However, the NIC can gracefully prevent such situations\n+\t\t * to happen by sending specific \"back-pressure\" flow control\n+\t\t * frames to its peer(s).\n+\t\t */\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u \"\n+\t\t\t   \"ext_err_stat=0x%08x pkt_len=%u\",\n+\t\t\t   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,\n+\t\t\t   (uint16_t)rx_id, (uint32_t)staterr,\n+\t\t\t   (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (nmb == NULL) {\n+\t\t\tPMD_RX_LOG(DEBUG, \"Rx mbuf alloc failed port_id=%u \"\n+\t\t\t\t   \"queue_id=%u\", (uint16_t)rxq->port_id,\n+\t\t\t\t   (uint16_t)rxq->queue_id);\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (rx_id == rxq->nb_rx_desc)\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_ngbe_prefetch(sw_ring[rx_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next Rx descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 Rx descriptors and the next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_ngbe_prefetch(&rx_ring[rx_id]);\n+\t\t\trte_ngbe_prefetch(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\tNGBE_RXD_HDRADDR(rxdp, 0);\n+\t\tNGBE_RXD_PKTADDR(rxdp, dma_addr);\n+\n+\t\t/*\n+\t\t * Initialize the returned mbuf.\n+\t\t * setup generic mbuf fields:\n+\t\t *    - number of segments,\n+\t\t *    - next segment,\n+\t\t *    - packet length,\n+\t\t *    - Rx port identifier.\n+\t\t */\n+\t\tpkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);\n+\t\trxm->nb_segs = 1;\n+\t\trxm->next = NULL;\n+\t\trxm->pkt_len = pkt_len;\n+\t\trxm->data_len = pkt_len;\n+\t\trxm->port = rxq->port_id;\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * If the number of free Rx descriptors is greater than the Rx free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed Rx descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u \"\n+\t\t\t   \"nb_hold=%u nb_rx=%u\",\n+\t\t\t   (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,\n+\t\t\t   (uint16_t)rx_id, (uint16_t)nb_hold,\n+\t\t\t   (uint16_t)nb_rx);\n+\t\trx_id = (uint16_t)((rx_id == 0) ?\n+\t\t\t\t(rxq->nb_rx_desc - 1) : (rx_id - 1));\n+\t\tngbe_set32(rxq->rdt_reg_addr, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+\n /*********************************************************************\n  *\n  *  Queue management functions\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h\nindex 0f2c185dbf..1c8fd76f12 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.h\n+++ b/drivers/net/ngbe/ngbe_rxtx.h\n@@ -43,6 +43,85 @@ struct ngbe_rx_desc {\n \t} qw1; /* also as r.hdr_addr */\n };\n \n+/* @ngbe_rx_desc.qw0 */\n+#define NGBE_RXD_PKTADDR(rxd, v)  \\\n+\t(((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))\n+\n+/* @ngbe_rx_desc.qw1 */\n+#define NGBE_RXD_HDRADDR(rxd, v)  \\\n+\t(((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))\n+\n+/* @ngbe_rx_desc.dw0 */\n+#define NGBE_RXD_RSSTYPE(dw)      RS(dw, 0, 0xF)\n+#define   NGBE_RSSTYPE_NONE       0\n+#define   NGBE_RSSTYPE_IPV4TCP    1\n+#define   NGBE_RSSTYPE_IPV4       2\n+#define   NGBE_RSSTYPE_IPV6TCP    3\n+#define   NGBE_RSSTYPE_IPV4SCTP   4\n+#define   NGBE_RSSTYPE_IPV6       5\n+#define   NGBE_RSSTYPE_IPV6SCTP   6\n+#define   NGBE_RSSTYPE_IPV4UDP    7\n+#define   NGBE_RSSTYPE_IPV6UDP    8\n+#define   NGBE_RSSTYPE_FDIR       15\n+#define NGBE_RXD_SECTYPE(dw)      RS(dw, 4, 0x3)\n+#define NGBE_RXD_SECTYPE_NONE     LS(0, 4, 0x3)\n+#define NGBE_RXD_SECTYPE_IPSECESP LS(2, 4, 0x3)\n+#define NGBE_RXD_SECTYPE_IPSECAH  LS(3, 4, 0x3)\n+#define NGBE_RXD_TPIDSEL(dw)      RS(dw, 6, 0x7)\n+#define NGBE_RXD_PTID(dw)         RS(dw, 9, 0xFF)\n+#define NGBE_RXD_RSCCNT(dw)       RS(dw, 17, 0xF)\n+#define NGBE_RXD_HDRLEN(dw)       RS(dw, 21, 0x3FF)\n+#define NGBE_RXD_SPH              MS(31, 0x1)\n+\n+/* @ngbe_rx_desc.dw1 */\n+/** bit 0-31, as rss hash when  **/\n+#define NGBE_RXD_RSSHASH(rxd)     ((rxd)->qw0.dw1)\n+\n+/** bit 0-31, as ip csum when  **/\n+#define NGBE_RXD_IPID(rxd)        ((rxd)->qw0.hi.ipid)\n+#define NGBE_RXD_CSUM(rxd)        ((rxd)->qw0.hi.csum)\n+\n+/* @ngbe_rx_desc.dw2 */\n+#define NGBE_RXD_STATUS(rxd)      ((rxd)->qw1.lo.status)\n+/** bit 0-1 **/\n+#define NGBE_RXD_STAT_DD          MS(0, 0x1) /* Descriptor Done */\n+#define NGBE_RXD_STAT_EOP         MS(1, 0x1) /* End of Packet */\n+/** bit 2-31, when EOP=0 **/\n+#define NGBE_RXD_NEXTP_RESV(v)    LS(v, 2, 0x3)\n+#define NGBE_RXD_NEXTP(dw)        RS(dw, 4, 0xFFFF) /* Next Descriptor */\n+/** bit 2-31, when EOP=1 **/\n+#define NGBE_RXD_PKT_CLS_MASK     MS(2, 0x7) /* Packet Class */\n+#define NGBE_RXD_PKT_CLS_TC_RSS   LS(0, 2, 0x7) /* RSS Hash */\n+#define NGBE_RXD_PKT_CLS_FLM      LS(1, 2, 0x7) /* FDir Match */\n+#define NGBE_RXD_PKT_CLS_SYN      LS(2, 2, 0x7) /* TCP Sync */\n+#define NGBE_RXD_PKT_CLS_5TUPLE   LS(3, 2, 0x7) /* 5 Tuple */\n+#define NGBE_RXD_PKT_CLS_ETF      LS(4, 2, 0x7) /* Ethertype Filter */\n+#define NGBE_RXD_STAT_VLAN        MS(5, 0x1) /* IEEE VLAN Packet */\n+#define NGBE_RXD_STAT_UDPCS       MS(6, 0x1) /* UDP xsum calculated */\n+#define NGBE_RXD_STAT_L4CS        MS(7, 0x1) /* L4 xsum calculated */\n+#define NGBE_RXD_STAT_IPCS        MS(8, 0x1) /* IP xsum calculated */\n+#define NGBE_RXD_STAT_PIF         MS(9, 0x1) /* Non-unicast address */\n+#define NGBE_RXD_STAT_EIPCS       MS(10, 0x1) /* Encap IP xsum calculated */\n+#define NGBE_RXD_STAT_VEXT        MS(11, 0x1) /* Multi-VLAN */\n+#define NGBE_RXD_STAT_IPV6EX      MS(12, 0x1) /* IPv6 with option header */\n+#define NGBE_RXD_STAT_LLINT       MS(13, 0x1) /* Pkt caused LLI */\n+#define NGBE_RXD_STAT_1588        MS(14, 0x1) /* IEEE1588 Time Stamp */\n+#define NGBE_RXD_STAT_SECP        MS(15, 0x1) /* Security Processing */\n+#define NGBE_RXD_STAT_LB          MS(16, 0x1) /* Loopback Status */\n+/*** bit 17-30, when PTYPE=IP ***/\n+#define NGBE_RXD_STAT_BMC         MS(17, 0x1) /* PTYPE=IP, BMC status */\n+#define NGBE_RXD_ERR_HBO          MS(23, 0x1) /* Header Buffer Overflow */\n+#define NGBE_RXD_ERR_EIPCS        MS(26, 0x1) /* Encap IP header error */\n+#define NGBE_RXD_ERR_SECERR       MS(27, 0x1) /* macsec or ipsec error */\n+#define NGBE_RXD_ERR_RXE          MS(29, 0x1) /* Any MAC Error */\n+#define NGBE_RXD_ERR_L4CS         MS(30, 0x1) /* TCP/UDP xsum error */\n+#define NGBE_RXD_ERR_IPCS         MS(31, 0x1) /* IP xsum error */\n+#define NGBE_RXD_ERR_CSUM(dw)     RS(dw, 30, 0x3)\n+\n+/* @ngbe_rx_desc.dw3 */\n+#define NGBE_RXD_LENGTH(rxd)           ((rxd)->qw1.hi.len)\n+#define NGBE_RXD_VLAN(rxd)             ((rxd)->qw1.hi.tag)\n+\n /*****************************************************************************\n  * Transmit Descriptor\n  *****************************************************************************/\n@@ -73,6 +152,8 @@ struct ngbe_tx_desc {\n #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \\\n \t\t    sizeof(struct ngbe_rx_desc))\n \n+#define rte_packet_prefetch(p)  rte_prefetch1(p)\n+\n #define NGBE_TX_MAX_SEG                    40\n \n #ifndef DEFAULT_TX_FREE_THRESH\n",
    "prefixes": [
        "v6",
        "14/19"
    ]
}