get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48997/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48997,
    "url": "https://patches.dpdk.org/api/patches/48997/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-30-git-send-email-wenzhuo.lu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1545032259-77179-30-git-send-email-wenzhuo.lu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1545032259-77179-30-git-send-email-wenzhuo.lu@intel.com",
    "date": "2018-12-17T07:37:37",
    "name": "[v5,29/31] net/ice: support basic RX/TX",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d15e99c3c531b23ad55564c4226e8587153f5f19",
    "submitter": {
        "id": 258,
        "url": "https://patches.dpdk.org/api/people/258/?format=api",
        "name": "Wenzhuo Lu",
        "email": "wenzhuo.lu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-30-git-send-email-wenzhuo.lu@intel.com/mbox/",
    "series": [
        {
            "id": 2824,
            "url": "https://patches.dpdk.org/api/series/2824/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2824",
            "date": "2018-12-17T07:37:08",
            "name": "A new net PMD - ICE",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/2824/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48997/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/48997/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AF1051BB37;\n\tMon, 17 Dec 2018 08:33:42 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id D462C1B9EF\n\tfor <dev@dpdk.org>; Mon, 17 Dec 2018 08:33:30 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Dec 2018 23:33:29 -0800",
            "from dpdk26.sh.intel.com ([10.67.110.164])\n\tby orsmga002.jf.intel.com with ESMTP; 16 Dec 2018 23:33:29 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.56,364,1539673200\"; d=\"scan'208\";a=\"118899363\"",
        "From": "Wenzhuo Lu <wenzhuo.lu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Wenzhuo Lu <wenzhuo.lu@intel.com>, Qiming Yang <qiming.yang@intel.com>, \n\tXiaoyun Li <xiaoyun.li@intel.com>, Jingjing Wu <jingjing.wu@intel.com>",
        "Date": "Mon, 17 Dec 2018 15:37:37 +0800",
        "Message-Id": "<1545032259-77179-30-git-send-email-wenzhuo.lu@intel.com>",
        "X-Mailer": "git-send-email 1.9.3",
        "In-Reply-To": "<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "References": "<1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com>\n\t<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 29/31] net/ice: support basic RX/TX",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>\nSigned-off-by: Qiming Yang <qiming.yang@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Jingjing Wu <jingjing.wu@intel.com>\n---\n doc/guides/nics/features/ice.ini |   5 +\n drivers/net/ice/ice_ethdev.c     |   5 +\n drivers/net/ice/ice_lan_rxtx.c   | 568 ++++++++++++++++++++++++++++++++++++++-\n drivers/net/ice/ice_rxtx.h       |   8 +\n 4 files changed, 584 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini\nindex 67fd044..19655f1 100644\n--- a/doc/guides/nics/features/ice.ini\n+++ b/doc/guides/nics/features/ice.ini\n@@ -11,14 +11,19 @@ Rx interrupt         = Y\n Queue start/stop     = Y\n MTU update           = Y\n Jumbo frame          = Y\n+TSO                  = Y\n Unicast MAC filter   = Y\n Multicast MAC filter = Y\n RSS hash             = Y\n RSS key update       = Y\n RSS reta update      = Y\n VLAN filter          = Y\n+CRC offload          = Y\n VLAN offload         = Y\n QinQ offload         = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n+Packet type parsing  = Y\n Basic stats          = Y\n Extended stats       = Y\n FW version           = Y\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 3235d01..ab8fe3b 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -1260,6 +1260,9 @@ struct ice_xstats_name_off {\n \tint ret;\n \n \tdev->dev_ops = &ice_eth_dev_ops;\n+\tdev->rx_pkt_burst = ice_recv_pkts;\n+\tdev->tx_pkt_burst = ice_xmit_pkts;\n+\tdev->tx_pkt_prepare = ice_prep_pkts;\n \n \tice_set_default_ptype_table(dev);\n \tpci_dev = RTE_DEV_TO_PCI(dev->device);\n@@ -1732,6 +1735,8 @@ static int ice_init_rss(struct ice_pf *pf)\n \t\tgoto rx_err;\n \t}\n \n+\tice_set_rx_function(dev);\n+\n \t/* enable Rx interrput and mapping Rx queue to interrupt vector */\n \tif (ice_rxq_intr_setup(dev))\n \t\treturn -EIO;\ndiff --git a/drivers/net/ice/ice_lan_rxtx.c b/drivers/net/ice/ice_lan_rxtx.c\nindex fed12b4..c0ee7c5 100644\n--- a/drivers/net/ice/ice_lan_rxtx.c\n+++ b/drivers/net/ice/ice_lan_rxtx.c\n@@ -884,8 +884,81 @@\n \trte_free(q);\n }\n \n+/* Translate the rx descriptor status to pkt flags */\n+static inline uint64_t\n+ice_rxd_status_to_pkt_flags(uint64_t qword)\n+{\n+\tuint64_t flags;\n+\n+\t/* Check if RSS_HASH */\n+\tflags = (((qword >> ICE_RX_DESC_STATUS_FLTSTAT_S) &\n+\t\t  ICE_RX_DESC_FLTSTAT_RSS_HASH) ==\n+\t\t ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;\n+\n+\treturn flags;\n+}\n+\n+/* Rx L3/L4 checksum */\n+static inline uint64_t\n+ice_rxd_error_to_pkt_flags(uint64_t qword)\n+{\n+\tuint64_t flags = 0;\n+\tuint64_t error_bits = (qword >> ICE_RXD_QW1_ERROR_S);\n+\n+\tif (likely((error_bits & ICE_RX_ERR_BITS) == 0)) {\n+\t\tflags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);\n+\t\treturn flags;\n+\t}\n+\n+\tif (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_IPE_S)))\n+\t\tflags |= PKT_RX_IP_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_IP_CKSUM_GOOD;\n+\n+\tif (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_L4E_S)))\n+\t\tflags |= PKT_RX_L4_CKSUM_BAD;\n+\telse\n+\t\tflags |= PKT_RX_L4_CKSUM_GOOD;\n+\n+\tif (unlikely(error_bits & (1 << ICE_RX_DESC_ERROR_EIPE_S)))\n+\t\tflags |= PKT_RX_EIP_CKSUM_BAD;\n+\n+\treturn flags;\n+}\n+\n+static inline void\n+ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)\n+{\n+\tif (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &\n+\t    (1 << ICE_RX_DESC_STATUS_L2TAG1P_S)) {\n+\t\tmb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+\t\tmb->vlan_tci =\n+\t\t\trte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);\n+\t\tPMD_RX_LOG(DEBUG, \"Descriptor l2tag1: %u\",\n+\t\t\t   rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1));\n+\t} else {\n+\t\tmb->vlan_tci = 0;\n+\t}\n+\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tif (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &\n+\t    (1 << ICE_RX_DESC_EXT_STATUS_L2TAG2P_S)) {\n+\t\tmb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |\n+\t\t\t\tPKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;\n+\t\tmb->vlan_tci_outer = mb->vlan_tci;\n+\t\tmb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);\n+\t\tPMD_RX_LOG(DEBUG, \"Descriptor l2tag2_1: %u, l2tag2_2: %u\",\n+\t\t\t   rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_1),\n+\t\t\t   rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2));\n+\t} else {\n+\t\tmb->vlan_tci_outer = 0;\n+\t}\n+#endif\n+\tPMD_RX_LOG(DEBUG, \"Mbuf vlan_tci: %u, vlan_tci_outer: %u\",\n+\t\t   mb->vlan_tci, mb->vlan_tci_outer);\n+}\n const uint32_t *\n-ice_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)\n+ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)\n {\n \tstatic const uint32_t ptypes[] = {\n \t\t/* refers to ice_get_default_pkt_type() */\n@@ -917,7 +990,9 @@\n \t\tRTE_PTYPE_UNKNOWN\n \t};\n \n-\treturn ptypes;\n+\tif (dev->rx_pkt_burst == ice_recv_pkts)\n+\t\treturn ptypes;\n+\treturn NULL;\n }\n \n void\n@@ -1028,6 +1103,495 @@\n \tdev->data->nb_tx_queues = 0;\n }\n \n+uint16_t\n+ice_recv_pkts(void *rx_queue,\n+\t      struct rte_mbuf **rx_pkts,\n+\t      uint16_t nb_pkts)\n+{\n+\tstruct ice_rx_queue *rxq = rx_queue;\n+\tvolatile union ice_rx_desc *rx_ring = rxq->rx_ring;\n+\tvolatile union ice_rx_desc *rxdp;\n+\tunion ice_rx_desc rxd;\n+\tstruct ice_rx_entry *sw_ring = rxq->sw_ring;\n+\tstruct ice_rx_entry *rxe;\n+\tstruct rte_mbuf *nmb; /* new allocated mbuf */\n+\tstruct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\tuint16_t rx_packet_len;\n+\tuint32_t rx_status;\n+\tuint64_t qword1;\n+\tuint64_t dma_addr;\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;\n+\tstruct rte_eth_dev *dev;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tqword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);\n+\t\trx_status = (qword1 & ICE_RXD_QW1_STATUS_M) >>\n+\t\t\t    ICE_RXD_QW1_STATUS_S;\n+\n+\t\t/* Check the DD bit first */\n+\t\tif (!(rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)))\n+\t\t\tbreak;\n+\n+\t\t/* allocate mbuf */\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!nmb)) {\n+\t\t\tdev = ICE_VSI_TO_ETH_DEV(rxq->vsi);\n+\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\t\trxd = *rxdp; /* copy descriptor in ring to temp variable*/\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */\n+\t\trx_id++;\n+\t\tif (unlikely(rx_id == rxq->nb_rx_desc))\n+\t\t\trx_id = 0;\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\n+\t\t/**\n+\t\t * fill the read format of descriptor with physic address in\n+\t\t * new allocated mbuf: nmb\n+\t\t */\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr = dma_addr;\n+\n+\t\t/* calculate rx_packet_len of the received pkt */\n+\t\trx_packet_len = ((qword1 & ICE_RXD_QW1_LEN_PBUF_M) >>\n+\t\t\t\tICE_RXD_QW1_LEN_PBUF_S) - rxq->crc_len;\n+\n+\t\t/* fill old mbuf with received descriptor: rxd */\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\trte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));\n+\t\trxm->nb_segs = 1;\n+\t\trxm->next = NULL;\n+\t\trxm->pkt_len = rx_packet_len;\n+\t\trxm->data_len = rx_packet_len;\n+\t\trxm->port = rxq->port_id;\n+\t\tice_rxd_to_vlan_tci(rxm, rxdp);\n+\t\trxm->packet_type = ptype_tbl[(uint8_t)((qword1 &\n+\t\t\t\t\t\t\tICE_RXD_QW1_PTYPE_M) >>\n+\t\t\t\t\t\t       ICE_RXD_QW1_PTYPE_S)];\n+\t\tpkt_flags = ice_rxd_status_to_pkt_flags(qword1);\n+\t\tpkt_flags |= ice_rxd_error_to_pkt_flags(qword1);\n+\t\tif (pkt_flags & PKT_RX_RSS_HASH)\n+\t\t\trxm->hash.rss =\n+\t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\n+\t\trxm->ol_flags |= pkt_flags;\n+\t\t/* copy old mbuf to rx_pkts */\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\t/**\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the receive tail register of queue.\n+\t * Update that register with the value of the last processed RX\n+\t * descriptor minus 1.\n+\t */\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\trx_id = (uint16_t)(rx_id == 0 ?\n+\t\t\t\t   (rxq->nb_rx_desc - 1) : (rx_id - 1));\n+\t\t/* write TAIL register */\n+\t\tICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\n+\t/* return received packet in the burst */\n+\treturn nb_rx;\n+}\n+\n+static inline void\n+ice_txd_enable_checksum(uint64_t ol_flags,\n+\t\t\tuint32_t *td_cmd,\n+\t\t\tuint32_t *td_offset,\n+\t\t\tunion ice_tx_offload tx_offload)\n+{\n+\t/* L2 length must be set. */\n+\t*td_offset |= (tx_offload.l2_len >> 1) <<\n+\t\t      ICE_TX_DESC_LEN_MACLEN_S;\n+\n+\t/* Enable L3 checksum offloads */\n+\tif (ol_flags & PKT_TX_IP_CKSUM) {\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;\n+\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_IPLEN_S;\n+\t} else if (ol_flags & PKT_TX_IPV4) {\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;\n+\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_IPLEN_S;\n+\t} else if (ol_flags & PKT_TX_IPV6) {\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;\n+\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_IPLEN_S;\n+\t}\n+\n+\tif (ol_flags & PKT_TX_TCP_SEG) {\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;\n+\t\t*td_offset |= (tx_offload.l4_len >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_L4_LEN_S;\n+\t\treturn;\n+\t}\n+\n+\t/* Enable L4 checksum offloads */\n+\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\tcase PKT_TX_TCP_CKSUM:\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;\n+\t\t*td_offset |= (sizeof(struct tcp_hdr) >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tcase PKT_TX_SCTP_CKSUM:\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;\n+\t\t*td_offset |= (sizeof(struct sctp_hdr) >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tcase PKT_TX_UDP_CKSUM:\n+\t\t*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;\n+\t\t*td_offset |= (sizeof(struct udp_hdr) >> 2) <<\n+\t\t\t      ICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+}\n+\n+static inline int\n+ice_xmit_cleanup(struct ice_tx_queue *txq)\n+{\n+\tstruct ice_tx_entry *sw_ring = txq->sw_ring;\n+\tvolatile struct ice_tx_desc *txd = txq->tx_ring;\n+\tuint16_t last_desc_cleaned = txq->last_desc_cleaned;\n+\tuint16_t nb_tx_desc = txq->nb_tx_desc;\n+\tuint16_t desc_to_clean_to;\n+\tuint16_t nb_tx_to_clean;\n+\n+\t/* Determine the last descriptor needing to be cleaned */\n+\tdesc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);\n+\tif (desc_to_clean_to >= nb_tx_desc)\n+\t\tdesc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);\n+\n+\t/* Check to make sure the last descriptor to clean is done */\n+\tdesc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n+\tif (!(txd[desc_to_clean_to].cmd_type_offset_bsz &\n+\t    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {\n+\t\tPMD_TX_FREE_LOG(DEBUG, \"TX descriptor %4u is not done \"\n+\t\t\t\t\"(port=%d queue=%d) value=0x%\"PRIx64\"\\n\",\n+\t\t\t\tdesc_to_clean_to,\n+\t\t\t\ttxq->port_id, txq->queue_id,\n+\t\t\t\ttxd[desc_to_clean_to].cmd_type_offset_bsz);\n+\t\t/* Failed to clean any descriptors */\n+\t\treturn -1;\n+\t}\n+\n+\t/* Figure out how many descriptors will be cleaned */\n+\tif (last_desc_cleaned > desc_to_clean_to)\n+\t\tnb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +\n+\t\t\t\t\t    desc_to_clean_to);\n+\telse\n+\t\tnb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n+\t\t\t\t\t    last_desc_cleaned);\n+\n+\t/* The last descriptor to clean is done, so that means all the\n+\t * descriptors from the last descriptor that was cleaned\n+\t * up to the last descriptor with the RS bit set\n+\t * are done. Only reset the threshold descriptor.\n+\t */\n+\ttxd[desc_to_clean_to].cmd_type_offset_bsz = 0;\n+\n+\t/* Update the txq to reflect the last descriptor that was cleaned */\n+\ttxq->last_desc_cleaned = desc_to_clean_to;\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);\n+\n+\treturn 0;\n+}\n+\n+/* Check if the context descriptor is needed for TX offloading */\n+static inline uint16_t\n+ice_calc_context_desc(uint64_t flags)\n+{\n+\tstatic uint64_t mask = PKT_TX_TCP_SEG | PKT_TX_QINQ_PKT;\n+\n+\treturn (flags & mask) ? 1 : 0;\n+}\n+\n+/* set ice TSO context descriptor */\n+static inline uint64_t\n+ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)\n+{\n+\tuint64_t ctx_desc = 0;\n+\tuint32_t cd_cmd, hdr_len, cd_tso_len;\n+\n+\tif (!tx_offload.l4_len) {\n+\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n+\t\treturn ctx_desc;\n+\t}\n+\n+\t/**\n+\t * in case of non tunneling packet, the outer_l2_len and\n+\t * outer_l3_len must be 0.\n+\t */\n+\thdr_len = tx_offload.outer_l2_len +\n+\t\t  tx_offload.outer_l3_len +\n+\t\t  tx_offload.l2_len +\n+\t\t  tx_offload.l3_len +\n+\t\t  tx_offload.l4_len;\n+\n+\tcd_cmd = ICE_TX_CTX_DESC_TSO;\n+\tcd_tso_len = mbuf->pkt_len - hdr_len;\n+\tctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |\n+\t\t    ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |\n+\t\t    ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);\n+\n+\treturn ctx_desc;\n+}\n+\n+uint16_t\n+ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tvolatile struct ice_tx_desc *tx_ring;\n+\tvolatile struct ice_tx_desc *txd;\n+\tstruct ice_tx_entry *sw_ring;\n+\tstruct ice_tx_entry *txe, *txn;\n+\tstruct rte_mbuf *tx_pkt;\n+\tstruct rte_mbuf *m_seg;\n+\tuint16_t tx_id;\n+\tuint16_t nb_tx;\n+\tuint16_t nb_used;\n+\tuint16_t nb_ctx;\n+\tuint32_t td_cmd = 0;\n+\tuint32_t td_offset = 0;\n+\tuint32_t td_tag = 0;\n+\tuint16_t tx_last;\n+\tuint64_t buf_dma_addr;\n+\tuint64_t ol_flags;\n+\tunion ice_tx_offload tx_offload = {0};\n+\n+\ttxq = tx_queue;\n+\tsw_ring = txq->sw_ring;\n+\ttx_ring = txq->tx_ring;\n+\ttx_id = txq->tx_tail;\n+\ttxe = &sw_ring[tx_id];\n+\n+\t/* Check if the descriptor ring needs to be cleaned. */\n+\tif (txq->nb_tx_free < txq->tx_free_thresh)\n+\t\tice_xmit_cleanup(txq);\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttx_pkt = *tx_pkts++;\n+\n+\t\ttd_cmd = 0;\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\ttx_offload.outer_l2_len = tx_pkt->outer_l2_len;\n+\t\ttx_offload.outer_l3_len = tx_pkt->outer_l3_len;\n+\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t/* Calculate the number of context descriptors needed. */\n+\t\tnb_ctx = ice_calc_context_desc(ol_flags);\n+\n+\t\t/* The number of descriptors that must be allocated for\n+\t\t * a packet equals to the number of the segments of that\n+\t\t * packet plus the number of context descriptor if needed.\n+\t\t */\n+\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n+\t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n+\n+\t\t/* Circular ring */\n+\t\tif (tx_last >= txq->nb_tx_desc)\n+\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\n+\t\tif (nb_used > txq->nb_tx_free) {\n+\t\t\tif (ice_xmit_cleanup(txq) != 0) {\n+\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\treturn 0;\n+\t\t\t\tgoto end_of_tx;\n+\t\t\t}\n+\t\t\tif (unlikely(nb_used > txq->tx_rs_thresh)) {\n+\t\t\t\twhile (nb_used > txq->nb_tx_free) {\n+\t\t\t\t\tif (ice_xmit_cleanup(txq) != 0) {\n+\t\t\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\t\t\treturn 0;\n+\t\t\t\t\t\tgoto end_of_tx;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Descriptor based VLAN insertion */\n+\t\tif (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {\n+\t\t\ttd_cmd |= ICE_TX_DESC_CMD_IL2TAG1;\n+\t\t\ttd_tag = tx_pkt->vlan_tci;\n+\t\t}\n+\n+\t\t/* Enable checksum offloading */\n+\t\tif (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) {\n+\t\t\tice_txd_enable_checksum(ol_flags, &td_cmd,\n+\t\t\t\t\t\t&td_offset, tx_offload);\n+\t\t}\n+\n+\t\tif (nb_ctx) {\n+\t\t\t/* Setup TX context descriptor if required */\n+\t\t\tvolatile struct ice_tx_ctx_desc *ctx_txd =\n+\t\t\t\t(volatile struct ice_tx_ctx_desc *)\n+\t\t\t\t\t&tx_ring[tx_id];\n+\t\t\tuint16_t cd_l2tag2 = 0;\n+\t\t\tuint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;\n+\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\t\t\tif (txe->mbuf) {\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t}\n+\n+\t\t\tif (ol_flags & PKT_TX_TCP_SEG)\n+\t\t\t\tcd_type_cmd_tso_mss |=\n+\t\t\t\t\tice_set_tso_ctx(tx_pkt, tx_offload);\n+\n+\t\t\t/* TX context descriptor based double VLAN insert */\n+\t\t\tif (ol_flags & PKT_TX_QINQ_PKT) {\n+\t\t\t\tcd_l2tag2 = tx_pkt->vlan_tci_outer;\n+\t\t\t\tcd_type_cmd_tso_mss |=\n+\t\t\t\t\t((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<\n+\t\t\t\t\t ICE_TXD_CTX_QW1_CMD_S);\n+\t\t\t}\n+\t\t\tctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);\n+\t\t\tctx_txd->qw1 =\n+\t\t\t\trte_cpu_to_le_64(cd_type_cmd_tso_mss);\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t}\n+\t\tm_seg = tx_pkt;\n+\n+\t\tdo {\n+\t\t\ttxd = &tx_ring[tx_id];\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\n+\t\t\tif (txe->mbuf)\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = m_seg;\n+\n+\t\t\t/* Setup TX Descriptor */\n+\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n+\t\t\ttxd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);\n+\t\t\ttxd->cmd_type_offset_bsz =\n+\t\t\t\trte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |\n+\t\t\t\t((uint64_t)td_cmd  << ICE_TXD_QW1_CMD_S) |\n+\t\t\t\t((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |\n+\t\t\t\t((uint64_t)m_seg->data_len  <<\n+\t\t\t\t ICE_TXD_QW1_TX_BUF_SZ_S) |\n+\t\t\t\t((uint64_t)td_tag  << ICE_TXD_QW1_L2TAG1_S));\n+\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\tm_seg = m_seg->next;\n+\t\t} while (m_seg);\n+\n+\t\t/* fill the last descriptor with End of Packet (EOP) bit */\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_EOP;\n+\t\ttxq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);\n+\t\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);\n+\n+\t\t/* set RS bit on the last descriptor of one packet */\n+\t\tif (txq->nb_tx_used >= txq->tx_rs_thresh) {\n+\t\t\tPMD_TX_FREE_LOG(DEBUG,\n+\t\t\t\t\t\"Setting RS bit on TXD id=\"\n+\t\t\t\t\t\"%4u (port=%d queue=%d)\",\n+\t\t\t\t\ttx_last, txq->port_id, txq->queue_id);\n+\n+\t\t\ttd_cmd |= ICE_TX_DESC_CMD_RS;\n+\n+\t\t\t/* Update txq RS bit counters */\n+\t\t\ttxq->nb_tx_used = 0;\n+\t\t}\n+\t\ttxd->cmd_type_offset_bsz |=\n+\t\t\trte_cpu_to_le_64(((uint64_t)td_cmd) <<\n+\t\t\t\t\t ICE_TXD_QW1_CMD_S);\n+\t}\n+end_of_tx:\n+\trte_wmb();\n+\n+\t/* update Tail register */\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);\n+\ttxq->tx_tail = tx_id;\n+\n+\treturn nb_tx;\n+}\n+\n+void __attribute__((cold))\n+ice_set_rx_function(struct rte_eth_dev *dev)\n+{\n+\tdev->rx_pkt_burst = ice_recv_pkts;\n+}\n+\n+/*********************************************************************\n+ *\n+ *  TX prep functions\n+ *\n+ **********************************************************************/\n+/* The default values of TSO MSS */\n+#define ICE_MIN_TSO_MSS            64\n+#define ICE_MAX_TSO_MSS            9728\n+#define ICE_MAX_TSO_FRAME_SIZE     262144\n+uint16_t\n+ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t      uint16_t nb_pkts)\n+{\n+\tint i, ret;\n+\tuint64_t ol_flags;\n+\tstruct rte_mbuf *m;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tm = tx_pkts[i];\n+\t\tol_flags = m->ol_flags;\n+\n+\t\tif (ol_flags & PKT_TX_TCP_SEG &&\n+\t\t    (m->tso_segsz < ICE_MIN_TSO_MSS ||\n+\t\t     m->tso_segsz > ICE_MAX_TSO_MSS ||\n+\t\t     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {\n+\t\t\t/**\n+\t\t\t * MSS outside the range are considered malicious\n+\t\t\t */\n+\t\t\trte_errno = -EINVAL;\n+\t\t\treturn i;\n+\t\t}\n+\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n+\t\tret = rte_net_intel_cksum_prepare(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = ret;\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\treturn i;\n+}\n+\n+void __attribute__((cold))\n+ice_set_tx_function(struct rte_eth_dev *dev)\n+{\n+\t\tdev->tx_pkt_burst = ice_xmit_pkts;\n+\t\tdev->tx_pkt_prepare = ice_prep_pkts;\n+}\n+\n /* For each value it means, datasheet of hardware can tell more details\n  *\n  * @note: fix ice_dev_supported_ptypes_get() if any change here.\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex bad2b89..e0218b3 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -134,6 +134,14 @@ int ice_tx_queue_setup(struct rte_eth_dev *dev,\n void ice_tx_queue_release(void *txq);\n void ice_clear_queues(struct rte_eth_dev *dev);\n void ice_free_queues(struct rte_eth_dev *dev);\n+uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t       uint16_t nb_pkts);\n+uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t       uint16_t nb_pkts);\n+void ice_set_rx_function(struct rte_eth_dev *dev);\n+uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t       uint16_t nb_pkts);\n+void ice_set_tx_function(struct rte_eth_dev *dev);\n uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n void ice_set_default_ptype_table(struct rte_eth_dev *dev);\n const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n",
    "prefixes": [
        "v5",
        "29/31"
    ]
}