get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48998/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48998,
    "url": "https://patches.dpdk.org/api/patches/48998/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-31-git-send-email-wenzhuo.lu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1545032259-77179-31-git-send-email-wenzhuo.lu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1545032259-77179-31-git-send-email-wenzhuo.lu@intel.com",
    "date": "2018-12-17T07:37:38",
    "name": "[v5,30/31] net/ice: support advance RX/TX",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c780495de8b020ffdf6ed90d27fc4708c1e93921",
    "submitter": {
        "id": 258,
        "url": "https://patches.dpdk.org/api/people/258/?format=api",
        "name": "Wenzhuo Lu",
        "email": "wenzhuo.lu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-31-git-send-email-wenzhuo.lu@intel.com/mbox/",
    "series": [
        {
            "id": 2824,
            "url": "https://patches.dpdk.org/api/series/2824/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2824",
            "date": "2018-12-17T07:37:08",
            "name": "A new net PMD - ICE",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/2824/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48998/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/48998/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id DE40B1BB5C;\n\tMon, 17 Dec 2018 08:33:43 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 4D0861B9FC\n\tfor <dev@dpdk.org>; Mon, 17 Dec 2018 08:33:32 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Dec 2018 23:33:31 -0800",
            "from dpdk26.sh.intel.com ([10.67.110.164])\n\tby orsmga002.jf.intel.com with ESMTP; 16 Dec 2018 23:33:30 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.56,364,1539673200\"; d=\"scan'208\";a=\"118899370\"",
        "From": "Wenzhuo Lu <wenzhuo.lu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Wenzhuo Lu <wenzhuo.lu@intel.com>, Qiming Yang <qiming.yang@intel.com>, \n\tXiaoyun Li <xiaoyun.li@intel.com>, Jingjing Wu <jingjing.wu@intel.com>",
        "Date": "Mon, 17 Dec 2018 15:37:38 +0800",
        "Message-Id": "<1545032259-77179-31-git-send-email-wenzhuo.lu@intel.com>",
        "X-Mailer": "git-send-email 1.9.3",
        "In-Reply-To": "<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "References": "<1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com>\n\t<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 30/31] net/ice: support advance RX/TX",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add RX functions, scatter and bulk.\nAdd TX function, simple.\n\nSigned-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>\nSigned-off-by: Qiming Yang <qiming.yang@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Jingjing Wu <jingjing.wu@intel.com>\n---\n doc/guides/nics/features/ice.ini |   1 +\n drivers/net/ice/ice_lan_rxtx.c   | 660 ++++++++++++++++++++++++++++++++++++++-\n 2 files changed, 659 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini\nindex 19655f1..300eced 100644\n--- a/doc/guides/nics/features/ice.ini\n+++ b/doc/guides/nics/features/ice.ini\n@@ -11,6 +11,7 @@ Rx interrupt         = Y\n Queue start/stop     = Y\n MTU update           = Y\n Jumbo frame          = Y\n+Scattered Rx         = Y\n TSO                  = Y\n Unicast MAC filter   = Y\n Multicast MAC filter = Y\ndiff --git a/drivers/net/ice/ice_lan_rxtx.c b/drivers/net/ice/ice_lan_rxtx.c\nindex c0ee7c5..b328a96 100644\n--- a/drivers/net/ice/ice_lan_rxtx.c\n+++ b/drivers/net/ice/ice_lan_rxtx.c\n@@ -957,6 +957,431 @@\n \tPMD_RX_LOG(DEBUG, \"Mbuf vlan_tci: %u, vlan_tci_outer: %u\",\n \t\t   mb->vlan_tci, mb->vlan_tci_outer);\n }\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+#define ICE_LOOK_AHEAD 8\n+#if (ICE_LOOK_AHEAD != 8)\n+#error \"PMD ICE: ICE_LOOK_AHEAD must be 8\\n\"\n+#endif\n+static inline int\n+ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)\n+{\n+\tvolatile union ice_rx_desc *rxdp;\n+\tstruct ice_rx_entry *rxep;\n+\tstruct rte_mbuf *mb;\n+\tuint16_t pkt_len;\n+\tuint64_t qword1;\n+\tuint32_t rx_status;\n+\tint32_t s[ICE_LOOK_AHEAD], nb_dd;\n+\tint32_t i, j, nb_rx = 0;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;\n+\n+\trxdp = &rxq->rx_ring[rxq->rx_tail];\n+\trxep = &rxq->sw_ring[rxq->rx_tail];\n+\n+\tqword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);\n+\trx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S;\n+\n+\t/* Make sure there is at least 1 packet to receive */\n+\tif (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))\n+\t\treturn 0;\n+\n+\t/**\n+\t * Scan LOOK_AHEAD descriptors at a time to determine which\n+\t * descriptors reference packets that are ready to be received.\n+\t */\n+\tfor (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,\n+\t     rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {\n+\t\t/* Read desc statuses backwards to avoid race condition */\n+\t\tfor (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) {\n+\t\t\tqword1 = rte_le_to_cpu_64(\n+\t\t\t\t\trxdp[j].wb.qword1.status_error_len);\n+\t\t\ts[j] = (qword1 & ICE_RXD_QW1_STATUS_M) >>\n+\t\t\t       ICE_RXD_QW1_STATUS_S;\n+\t\t}\n+\n+\t\trte_smp_rmb();\n+\n+\t\t/* Compute how many status bits were set */\n+\t\tfor (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)\n+\t\t\tnb_dd += s[j] & (1 << ICE_RX_DESC_STATUS_DD_S);\n+\n+\t\tnb_rx += nb_dd;\n+\n+\t\t/* Translate descriptor info to mbuf parameters */\n+\t\tfor (j = 0; j < nb_dd; j++) {\n+\t\t\tmb = rxep[j].mbuf;\n+\t\t\tqword1 = rte_le_to_cpu_64(\n+\t\t\t\t\trxdp[j].wb.qword1.status_error_len);\n+\t\t\tpkt_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>\n+\t\t\t\t   ICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;\n+\t\t\tmb->data_len = pkt_len;\n+\t\t\tmb->pkt_len = pkt_len;\n+\t\t\tmb->ol_flags = 0;\n+\t\t\tpkt_flags = ice_rxd_status_to_pkt_flags(qword1);\n+\t\t\tpkt_flags |= ice_rxd_error_to_pkt_flags(qword1);\n+\t\t\tif (pkt_flags & PKT_RX_RSS_HASH)\n+\t\t\t\tmb->hash.rss =\n+\t\t\t\t\trte_le_to_cpu_32(\n+\t\t\t\t\t\trxdp[j].wb.qword0.hi_dword.rss);\n+\t\t\tmb->packet_type = ptype_tbl[(uint8_t)(\n+\t\t\t\t\t\t(qword1 &\n+\t\t\t\t\t\t ICE_RXD_QW1_PTYPE_M) >>\n+\t\t\t\t\t\tICE_RXD_QW1_PTYPE_S)];\n+\t\t\tice_rxd_to_vlan_tci(mb, &rxdp[j]);\n+\n+\t\t\tmb->ol_flags |= pkt_flags;\n+\t\t}\n+\n+\t\tfor (j = 0; j < ICE_LOOK_AHEAD; j++)\n+\t\t\trxq->rx_stage[i + j] = rxep[j].mbuf;\n+\n+\t\tif (nb_dd != ICE_LOOK_AHEAD)\n+\t\t\tbreak;\n+\t}\n+\n+\t/* Clear software ring entries */\n+\tfor (i = 0; i < nb_rx; i++)\n+\t\trxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;\n+\n+\tPMD_RX_LOG(DEBUG, \"ice_rx_scan_hw_ring: \"\n+\t\t   \"port_id=%u, queue_id=%u, nb_rx=%d\",\n+\t\t   rxq->port_id, rxq->queue_id, nb_rx);\n+\n+\treturn nb_rx;\n+}\n+\n+static inline uint16_t\n+ice_rx_fill_from_stage(struct ice_rx_queue *rxq,\n+\t\t       struct rte_mbuf **rx_pkts,\n+\t\t       uint16_t nb_pkts)\n+{\n+\tuint16_t i;\n+\tstruct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];\n+\n+\tnb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);\n+\n+\tfor (i = 0; i < nb_pkts; i++)\n+\t\trx_pkts[i] = stage[i];\n+\n+\trxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);\n+\trxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);\n+\n+\treturn nb_pkts;\n+}\n+\n+static inline int\n+ice_rx_alloc_bufs(struct ice_rx_queue *rxq)\n+{\n+\tvolatile union ice_rx_desc *rxdp;\n+\tstruct ice_rx_entry *rxep;\n+\tstruct rte_mbuf *mb;\n+\tuint16_t alloc_idx, i;\n+\tuint64_t dma_addr;\n+\tint diag;\n+\n+\t/* Allocate buffers in bulk */\n+\talloc_idx = (uint16_t)(rxq->rx_free_trigger -\n+\t\t\t       (rxq->rx_free_thresh - 1));\n+\trxep = &rxq->sw_ring[alloc_idx];\n+\tdiag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,\n+\t\t\t\t    rxq->rx_free_thresh);\n+\tif (unlikely(diag != 0)) {\n+\t\tPMD_RX_LOG(ERR, \"Failed to get mbufs in bulk\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxdp = &rxq->rx_ring[alloc_idx];\n+\tfor (i = 0; i < rxq->rx_free_thresh; i++) {\n+\t\tif (likely(i < (rxq->rx_free_thresh - 1)))\n+\t\t\t/* Prefetch next mbuf */\n+\t\t\trte_prefetch0(rxep[i + 1].mbuf);\n+\n+\t\tmb = rxep[i].mbuf;\n+\t\trte_mbuf_refcnt_set(mb, 1);\n+\t\tmb->next = NULL;\n+\t\tmb->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmb->nb_segs = 1;\n+\t\tmb->port = rxq->port_id;\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));\n+\t\trxdp[i].read.hdr_addr = 0;\n+\t\trxdp[i].read.pkt_addr = dma_addr;\n+\t}\n+\n+\t/* Update rx tail regsiter */\n+\trte_wmb();\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);\n+\n+\trxq->rx_free_trigger =\n+\t\t(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);\n+\tif (rxq->rx_free_trigger >= rxq->nb_rx_desc)\n+\t\trxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);\n+\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;\n+\tuint16_t nb_rx = 0;\n+\tstruct rte_eth_dev *dev;\n+\n+\tif (!nb_pkts)\n+\t\treturn 0;\n+\n+\tif (rxq->rx_nb_avail)\n+\t\treturn ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);\n+\n+\tnb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);\n+\trxq->rx_next_avail = 0;\n+\trxq->rx_nb_avail = nb_rx;\n+\trxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);\n+\n+\tif (rxq->rx_tail > rxq->rx_free_trigger) {\n+\t\tif (ice_rx_alloc_bufs(rxq) != 0) {\n+\t\t\tuint16_t i, j;\n+\n+\t\t\tdev = ICE_VSI_TO_ETH_DEV(rxq->vsi);\n+\t\t\tdev->data->rx_mbuf_alloc_failed +=\n+\t\t\t\trxq->rx_free_thresh;\n+\t\t\tPMD_RX_LOG(DEBUG, \"Rx mbuf alloc failed for \"\n+\t\t\t\t   \"port_id=%u, queue_id=%u\",\n+\t\t\t\t   rxq->port_id, rxq->queue_id);\n+\t\t\trxq->rx_nb_avail = 0;\n+\t\t\trxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);\n+\t\t\tfor (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)\n+\t\t\t\trxq->sw_ring[j].mbuf = rxq->rx_stage[i];\n+\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\tif (rxq->rx_tail >= rxq->nb_rx_desc)\n+\t\trxq->rx_tail = 0;\n+\n+\tif (rxq->rx_nb_avail)\n+\t\treturn ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);\n+\n+\treturn 0;\n+}\n+\n+static uint16_t\n+ice_recv_pkts_bulk_alloc(void *rx_queue,\n+\t\t\t struct rte_mbuf **rx_pkts,\n+\t\t\t uint16_t nb_pkts)\n+{\n+\tuint16_t nb_rx = 0;\n+\tuint16_t n;\n+\tuint16_t count;\n+\n+\tif (unlikely(nb_pkts == 0))\n+\t\treturn nb_rx;\n+\n+\tif (likely(nb_pkts <= ICE_RX_MAX_BURST))\n+\t\treturn rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);\n+\n+\twhile (nb_pkts) {\n+\t\tn = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);\n+\t\tcount = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);\n+\t\tnb_rx = (uint16_t)(nb_rx + count);\n+\t\tnb_pkts = (uint16_t)(nb_pkts - count);\n+\t\tif (count < n)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn nb_rx;\n+}\n+#else\n+static uint16_t\n+ice_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,\n+\t\t\t struct rte_mbuf __rte_unused **rx_pkts,\n+\t\t\t uint16_t __rte_unused nb_pkts)\n+{\n+\treturn 0;\n+}\n+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */\n+\n+static uint16_t\n+ice_recv_scattered_pkts(void *rx_queue,\n+\t\t\tstruct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct ice_rx_queue *rxq = rx_queue;\n+\tvolatile union ice_rx_desc *rx_ring = rxq->rx_ring;\n+\tvolatile union ice_rx_desc *rxdp;\n+\tunion ice_rx_desc rxd;\n+\tstruct ice_rx_entry *sw_ring = rxq->sw_ring;\n+\tstruct ice_rx_entry *rxe;\n+\tstruct rte_mbuf *first_seg = rxq->pkt_first_seg;\n+\tstruct rte_mbuf *last_seg = rxq->pkt_last_seg;\n+\tstruct rte_mbuf *nmb; /* new allocated mbuf */\n+\tstruct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\tuint16_t rx_packet_len;\n+\tuint32_t rx_status;\n+\tuint64_t qword1;\n+\tuint64_t dma_addr;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;\n+\tstruct rte_eth_dev *dev;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tqword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);\n+\t\trx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>\n+\t\t\t    ICE_RXD_QW1_STATUS_S;\n+\n+\t\t/* Check the DD bit first */\n+\t\tif (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))\n+\t\t\tbreak;\n+\n+\t\t/* allocate mbuf */\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!nmb)) {\n+\t\t\tdev = ICE_VSI_TO_ETH_DEV(rxq->vsi);\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\trxd = *rxdp; /* copy descriptor in ring to temp variable*/\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf */\n+\t\trte_prefetch0(sw_ring[rx_id].mbuf);\n+\n+\t\t/**\n+\t\t * When next RX descriptor is on a cache line boundary,\n+\t\t * prefetch the next 4 RX descriptors and next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_prefetch0(&rx_ring[rx_id]);\n+\t\t\trte_prefetch0(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\n+\t\t/* Set data buffer address and data length of the mbuf */\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr = dma_addr;\n+\t\trx_packet_len = (qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>\n+\t\t\t\tICE_RXD_QW1_LEN_PBUF_S;\n+\t\trxm->data_len = rx_packet_len;\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tice_rxd_to_vlan_tci(rxm, rxdp);\n+\t\trxm->packet_type = ptype_tbl[(uint8_t)((qword1 &\n+\t\t\t\t\t\t\tICE_RXD_QW1_PTYPE_M) >>\n+\t\t\t\t\t\t       ICE_RXD_QW1_PTYPE_S)];\n+\n+\t\t/**\n+\t\t * If this is the first buffer of the received packet, set the\n+\t\t * pointer to the first mbuf of the packet and initialize its\n+\t\t * context. Otherwise, update the total length and the number\n+\t\t * of segments of the current scattered packet, and update the\n+\t\t * pointer to the last mbuf of the current packet.\n+\t\t */\n+\t\tif (!first_seg) {\n+\t\t\tfirst_seg = rxm;\n+\t\t\tfirst_seg->nb_segs = 1;\n+\t\t\tfirst_seg->pkt_len = rx_packet_len;\n+\t\t} else {\n+\t\t\tfirst_seg->pkt_len =\n+\t\t\t\t(uint16_t)(first_seg->pkt_len +\n+\t\t\t\t\t   rx_packet_len);\n+\t\t\tfirst_seg->nb_segs++;\n+\t\t\tlast_seg->next = rxm;\n+\t\t}\n+\n+\t\t/**\n+\t\t * If this is not the last buffer of the received packet,\n+\t\t * update the pointer to the last mbuf of the current scattered\n+\t\t * packet and continue to parse the RX ring.\n+\t\t */\n+\t\tif (!(rx_status & (1 << ICE_RX_DESC_STATUS_EOF_S))) {\n+\t\t\tlast_seg = rxm;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\t/**\n+\t\t * This is the last buffer of the received packet. If the CRC\n+\t\t * is not stripped by the hardware:\n+\t\t *  - Subtract the CRC length from the total packet length.\n+\t\t *  - If the last buffer only contains the whole CRC or a part\n+\t\t *  of it, free the mbuf associated to the last buffer. If part\n+\t\t *  of the CRC is also contained in the previous mbuf, subtract\n+\t\t *  the length of that CRC part from the data length of the\n+\t\t *  previous mbuf.\n+\t\t */\n+\t\trxm->next = NULL;\n+\t\tif (unlikely(rxq->crc_len > 0)) {\n+\t\t\tfirst_seg->pkt_len -= ETHER_CRC_LEN;\n+\t\t\tif (rx_packet_len <= ETHER_CRC_LEN) {\n+\t\t\t\trte_pktmbuf_free_seg(rxm);\n+\t\t\t\tfirst_seg->nb_segs--;\n+\t\t\t\tlast_seg->data_len =\n+\t\t\t\t\t(uint16_t)(last_seg->data_len -\n+\t\t\t\t\t(ETHER_CRC_LEN - rx_packet_len));\n+\t\t\t\tlast_seg->next = NULL;\n+\t\t\t} else\n+\t\t\t\trxm->data_len = (uint16_t)(rx_packet_len -\n+\t\t\t\t\t\t\t   ETHER_CRC_LEN);\n+\t\t}\n+\n+\t\tfirst_seg->port = rxq->port_id;\n+\t\tfirst_seg->ol_flags = 0;\n+\n+\t\tpkt_flags = ice_rxd_status_to_pkt_flags(qword1);\n+\t\tpkt_flags |= ice_rxd_error_to_pkt_flags(qword1);\n+\t\tif (pkt_flags & PKT_RX_RSS_HASH)\n+\t\t\tfirst_seg->hash.rss =\n+\t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\n+\n+\t\tfirst_seg->ol_flags |= pkt_flags;\n+\t\t/* Prefetch data of first segment, if configured to do so. */\n+\t\trte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,\n+\t\t\t\t\t  first_seg->data_off));\n+\t\trx_pkts[nb_rx++] = first_seg;\n+\t\tfirst_seg = NULL;\n+\t}\n+\n+\t/* Record index of the next RX descriptor to probe. */\n+\trxq->rx_tail = rx_id;\n+\trxq->pkt_first_seg = first_seg;\n+\trxq->pkt_last_seg = last_seg;\n+\n+\t/**\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register. Update the RDT with the value of the last processed RX\n+\t * descriptor minus 1, to guarantee that the RDT register is never\n+\t * equal to the RDH register, which creates a \"full\" ring situtation\n+\t * from the hardware point of view.\n+\t */\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\trx_id = (uint16_t)(rx_id == 0 ?\n+\t\t\t\t   (rxq->nb_rx_desc - 1) : (rx_id - 1));\n+\t\t/* write TAIL register */\n+\t\tICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\n+\t/* return received packet in the burst */\n+\treturn nb_rx;\n+}\n+\n const uint32_t *\n ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)\n {\n@@ -990,7 +1415,11 @@\n \t\tRTE_PTYPE_UNKNOWN\n \t};\n \n-\tif (dev->rx_pkt_burst == ice_recv_pkts)\n+\tif (dev->rx_pkt_burst == ice_recv_pkts ||\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\t    dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||\n+#endif\n+\t    dev->rx_pkt_burst == ice_recv_scattered_pkts)\n \t\treturn ptypes;\n \treturn NULL;\n }\n@@ -1313,6 +1742,20 @@\n \treturn 0;\n }\n \n+/* Construct the tx flags */\n+static inline uint64_t\n+ice_build_ctob(uint32_t td_cmd,\n+\t       uint32_t td_offset,\n+\t       uint16_t size,\n+\t       uint32_t td_tag)\n+{\n+\treturn rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |\n+\t\t\t\t((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |\n+\t\t\t\t((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |\n+\t\t\t\t((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |\n+\t\t\t\t((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));\n+}\n+\n /* Check if the context descriptor is needed for TX offloading */\n static inline uint16_t\n ice_calc_context_desc(uint64_t flags)\n@@ -1531,10 +1974,213 @@\n \treturn nb_tx;\n }\n \n+static inline int __attribute__((always_inline))\n+ice_tx_free_bufs(struct ice_tx_queue *txq)\n+{\n+\tstruct ice_tx_entry *txep;\n+\tuint16_t i;\n+\n+\tif ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &\n+\t     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=\n+\t    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))\n+\t\treturn 0;\n+\n+\ttxep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];\n+\n+\tfor (i = 0; i < txq->tx_rs_thresh; i++)\n+\t\trte_prefetch0((txep + i)->mbuf);\n+\n+\tif (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {\n+\t\tfor (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {\n+\t\t\trte_mempool_put(txep->mbuf->pool, txep->mbuf);\n+\t\t\ttxep->mbuf = NULL;\n+\t\t}\n+\t} else {\n+\t\tfor (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {\n+\t\t\trte_pktmbuf_free_seg(txep->mbuf);\n+\t\t\ttxep->mbuf = NULL;\n+\t\t}\n+\t}\n+\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);\n+\ttxq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);\n+\tif (txq->tx_next_dd >= txq->nb_tx_desc)\n+\t\ttxq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);\n+\n+\treturn txq->tx_rs_thresh;\n+}\n+\n+/* Populate 4 descriptors with data from 4 mbufs */\n+static inline void\n+tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)\n+{\n+\tuint64_t dma_addr;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < 4; i++, txdp++, pkts++) {\n+\t\tdma_addr = rte_mbuf_data_iova(*pkts);\n+\t\ttxdp->buf_addr = rte_cpu_to_le_64(dma_addr);\n+\t\ttxdp->cmd_type_offset_bsz =\n+\t\t\tice_build_ctob((uint32_t)ICE_TD_CMD, 0,\n+\t\t\t\t       (*pkts)->data_len, 0);\n+\t}\n+}\n+\n+/* Populate 1 descriptor with data from 1 mbuf */\n+static inline void\n+tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)\n+{\n+\tuint64_t dma_addr;\n+\n+\tdma_addr = rte_mbuf_data_iova(*pkts);\n+\ttxdp->buf_addr = rte_cpu_to_le_64(dma_addr);\n+\ttxdp->cmd_type_offset_bsz =\n+\t\tice_build_ctob((uint32_t)ICE_TD_CMD, 0,\n+\t\t\t       (*pkts)->data_len, 0);\n+}\n+\n+static inline void\n+ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,\n+\t\t    uint16_t nb_pkts)\n+{\n+\tvolatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];\n+\tstruct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];\n+\tconst int N_PER_LOOP = 4;\n+\tconst int N_PER_LOOP_MASK = N_PER_LOOP - 1;\n+\tint mainpart, leftover;\n+\tint i, j;\n+\n+\t/**\n+\t * Process most of the packets in chunks of N pkts.  Any\n+\t * leftover packets will get processed one at a time.\n+\t */\n+\tmainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);\n+\tleftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);\n+\tfor (i = 0; i < mainpart; i += N_PER_LOOP) {\n+\t\t/* Copy N mbuf pointers to the S/W ring */\n+\t\tfor (j = 0; j < N_PER_LOOP; ++j)\n+\t\t\t(txep + i + j)->mbuf = *(pkts + i + j);\n+\t\ttx4(txdp + i, pkts + i);\n+\t}\n+\n+\tif (unlikely(leftover > 0)) {\n+\t\tfor (i = 0; i < leftover; ++i) {\n+\t\t\t(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);\n+\t\t\ttx1(txdp + mainpart + i, pkts + mainpart + i);\n+\t\t}\n+\t}\n+}\n+\n+static inline uint16_t\n+tx_xmit_pkts(struct ice_tx_queue *txq,\n+\t     struct rte_mbuf **tx_pkts,\n+\t     uint16_t nb_pkts)\n+{\n+\tvolatile struct ice_tx_desc *txr = txq->tx_ring;\n+\tuint16_t n = 0;\n+\n+\t/**\n+\t * Begin scanning the H/W ring for done descriptors when the number\n+\t * of available descriptors drops below tx_free_thresh. For each done\n+\t * descriptor, free the associated buffer.\n+\t */\n+\tif (txq->nb_tx_free < txq->tx_free_thresh)\n+\t\tice_tx_free_bufs(txq);\n+\n+\t/* Use available descriptor only */\n+\tnb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);\n+\tif (unlikely(!nb_pkts))\n+\t\treturn 0;\n+\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);\n+\tif ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {\n+\t\tn = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);\n+\t\tice_tx_fill_hw_ring(txq, tx_pkts, n);\n+\t\ttxr[txq->tx_next_rs].cmd_type_offset_bsz |=\n+\t\t\trte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<\n+\t\t\t\t\t ICE_TXD_QW1_CMD_S);\n+\t\ttxq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);\n+\t\ttxq->tx_tail = 0;\n+\t}\n+\n+\t/* Fill hardware descriptor ring with mbuf data */\n+\tice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));\n+\ttxq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));\n+\n+\t/* Determin if RS bit needs to be set */\n+\tif (txq->tx_tail > txq->tx_next_rs) {\n+\t\ttxr[txq->tx_next_rs].cmd_type_offset_bsz |=\n+\t\t\trte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<\n+\t\t\t\t\t ICE_TXD_QW1_CMD_S);\n+\t\ttxq->tx_next_rs =\n+\t\t\t(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);\n+\t\tif (txq->tx_next_rs >= txq->nb_tx_desc)\n+\t\t\ttxq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);\n+\t}\n+\n+\tif (txq->tx_tail >= txq->nb_tx_desc)\n+\t\ttxq->tx_tail = 0;\n+\n+\t/* Update the tx tail register */\n+\trte_wmb();\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);\n+\n+\treturn nb_pkts;\n+}\n+\n+static uint16_t\n+ice_xmit_pkts_simple(void *tx_queue,\n+\t\t     struct rte_mbuf **tx_pkts,\n+\t\t     uint16_t nb_pkts)\n+{\n+\tuint16_t nb_tx = 0;\n+\n+\tif (likely(nb_pkts <= ICE_TX_MAX_BURST))\n+\t\treturn tx_xmit_pkts((struct ice_tx_queue *)tx_queue,\n+\t\t\t\t    tx_pkts, nb_pkts);\n+\n+\twhile (nb_pkts) {\n+\t\tuint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,\n+\t\t\t\t\t\t      ICE_TX_MAX_BURST);\n+\n+\t\tret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,\n+\t\t\t\t   &tx_pkts[nb_tx], num);\n+\t\tnb_tx = (uint16_t)(nb_tx + ret);\n+\t\tnb_pkts = (uint16_t)(nb_pkts - ret);\n+\t\tif (ret < num)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n void __attribute__((cold))\n ice_set_rx_function(struct rte_eth_dev *dev)\n {\n-\tdev->rx_pkt_burst = ice_recv_pkts;\n+\tPMD_INIT_FUNC_TRACE();\n+\tstruct ice_adapter *ad =\n+\t\tICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);\n+\n+\tif (dev->data->scattered_rx) {\n+\t\t/* Set the non-LRO scattered function */\n+\t\tPMD_INIT_LOG(DEBUG,\n+\t\t\t     \"Using a Scattered function on port %d.\",\n+\t\t\t     dev->data->port_id);\n+\t\tdev->rx_pkt_burst = ice_recv_scattered_pkts;\n+\t} else if (ad->rx_bulk_alloc_allowed) {\n+\t\tPMD_INIT_LOG(DEBUG,\n+\t\t\t     \"Rx Burst Bulk Alloc Preconditions are \"\n+\t\t\t     \"satisfied. Rx Burst Bulk Alloc function \"\n+\t\t\t     \"will be used on port %d.\",\n+\t\t\t     dev->data->port_id);\n+\t\tdev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;\n+\t} else {\n+\t\tPMD_INIT_LOG(DEBUG,\n+\t\t\t     \"Rx Burst Bulk Alloc Preconditions are not \"\n+\t\t\t     \"satisfied, Normal Rx will be used on port %d.\",\n+\t\t\t     dev->data->port_id);\n+\t\tdev->rx_pkt_burst = ice_recv_pkts;\n+\t}\n }\n \n /*********************************************************************\n@@ -1588,8 +2234,18 @@ void __attribute__((cold))\n void __attribute__((cold))\n ice_set_tx_function(struct rte_eth_dev *dev)\n {\n+\tstruct ice_adapter *ad =\n+\t\tICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);\n+\n+\tif (ad->tx_simple_allowed) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Simple tx finally be used.\");\n+\t\tdev->tx_pkt_burst = ice_xmit_pkts_simple;\n+\t\tdev->tx_pkt_prepare = NULL;\n+\t} else {\n+\t\tPMD_INIT_LOG(DEBUG, \"Normal tx finally be used.\");\n \t\tdev->tx_pkt_burst = ice_xmit_pkts;\n \t\tdev->tx_pkt_prepare = ice_prep_pkts;\n+\t}\n }\n \n /* For each value it means, datasheet of hardware can tell more details\n",
    "prefixes": [
        "v5",
        "30/31"
    ]
}