get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/101964/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 101964,
    "url": "http://patches.dpdk.org/api/patches/101964/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211018101019.957804-3-radu.nicolau@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211018101019.957804-3-radu.nicolau@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211018101019.957804-3-radu.nicolau@intel.com",
    "date": "2021-10-18T10:10:14",
    "name": "[v9,2/7] net/iavf: rework tx path",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "72cbe69aad62fcd985b2349d3f76760d96e60cb1",
    "submitter": {
        "id": 743,
        "url": "http://patches.dpdk.org/api/people/743/?format=api",
        "name": "Radu Nicolau",
        "email": "radu.nicolau@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211018101019.957804-3-radu.nicolau@intel.com/mbox/",
    "series": [
        {
            "id": 19731,
            "url": "http://patches.dpdk.org/api/series/19731/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19731",
            "date": "2021-10-18T10:10:12",
            "name": "iavf: add iAVF IPsec inline crypto support",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/19731/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/101964/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/101964/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7433AA0C43;\n\tMon, 18 Oct 2021 12:23:09 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B6995410E8;\n\tMon, 18 Oct 2021 12:23:03 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n by mails.dpdk.org (Postfix) with ESMTP id 39BEB410E8\n for <dev@dpdk.org>; Mon, 18 Oct 2021 12:23:01 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Oct 2021 03:23:00 -0700",
            "from silpixa00400884.ir.intel.com ([10.243.22.82])\n by orsmga004.jf.intel.com with ESMTP; 18 Oct 2021 03:22:58 -0700"
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10140\"; a=\"251675761\"",
            "E=Sophos;i=\"5.85,381,1624345200\"; d=\"scan'208\";a=\"251675761\"",
            "E=Sophos;i=\"5.85,381,1624345200\"; d=\"scan'208\";a=\"593759752\""
        ],
        "X-ExtLoop1": "1",
        "From": "Radu Nicolau <radu.nicolau@intel.com>",
        "To": "Jingjing Wu <jingjing.wu@intel.com>, Beilei Xing <beilei.xing@intel.com>,\n Bruce Richardson <bruce.richardson@intel.com>,\n Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "Cc": "dev@dpdk.org, declan.doherty@intel.com, abhijit.sinha@intel.com,\n qi.z.zhang@intel.com, Radu Nicolau <radu.nicolau@intel.com>",
        "Date": "Mon, 18 Oct 2021 11:10:14 +0100",
        "Message-Id": "<20211018101019.957804-3-radu.nicolau@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211018101019.957804-1-radu.nicolau@intel.com>",
        "References": "<20210909142428.750634-1-radu.nicolau@intel.com>\n <20211018101019.957804-1-radu.nicolau@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v9 2/7] net/iavf: rework tx path",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Rework the TX path and TX descriptor usage in order to\nallow for better use of oflload flags and to facilitate enabling of\ninline crypto offload feature.\n\nSigned-off-by: Declan Doherty <declan.doherty@intel.com>\nSigned-off-by: Abhijit Sinha <abhijit.sinha@intel.com>\nSigned-off-by: Radu Nicolau <radu.nicolau@intel.com>\nAcked-by: Jingjing Wu <jingjing.wu@intel.com>\n---\n drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------\n drivers/net/iavf/iavf_rxtx.h         | 117 +++++-\n drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-\n 3 files changed, 431 insertions(+), 234 deletions(-)",
    "diff": "diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c\nindex 88bbd40c10..11b7fea36f 100644\n--- a/drivers/net/iavf/iavf_rxtx.c\n+++ b/drivers/net/iavf/iavf_rxtx.c\n@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)\n \n static inline void\n iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,\n-\t\t\t  volatile union iavf_rx_flex_desc *rxdp,\n-\t\t\t  uint8_t rx_flags)\n+\t\t\t  volatile union iavf_rx_flex_desc *rxdp)\n {\n-\tuint16_t vlan_tci = 0;\n-\n-\tif (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&\n-\t    rte_le_to_cpu_64(rxdp->wb.status_error0) &\n-\t    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))\n-\t\tvlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);\n+\tif (rte_le_to_cpu_64(rxdp->wb.status_error0) &\n+\t\t(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {\n+\t\tmb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+\t\tmb->vlan_tci =\n+\t\t\trte_le_to_cpu_16(rxdp->wb.l2tag1);\n+\t} else {\n+\t\tmb->vlan_tci = 0;\n+\t}\n \n #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC\n-\tif (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&\n-\t    rte_le_to_cpu_16(rxdp->wb.status_error1) &\n-\t    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))\n-\t\tvlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);\n-#endif\n-\n-\tif (vlan_tci) {\n-\t\tmb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n-\t\tmb->vlan_tci = vlan_tci;\n+\tif (rte_le_to_cpu_16(rxdp->wb.status_error1) &\n+\t    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {\n+\t\tmb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |\n+\t\t\t\tPKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;\n+\t\tmb->vlan_tci_outer = mb->vlan_tci;\n+\t\tmb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);\n+\t\tPMD_RX_LOG(DEBUG, \"Descriptor l2tag2_1: %u, l2tag2_2: %u\",\n+\t\t\t   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),\n+\t\t\t   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));\n+\t} else {\n+\t\tmb->vlan_tci_outer = 0;\n \t}\n+#endif\n }\n \n /* Translate the rx descriptor status and error fields to pkt flags */\n@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,\n \t\trxm->ol_flags = 0;\n \t\trxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &\n \t\t\trte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];\n-\t\tiavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);\n+\t\tiavf_flex_rxd_to_vlan_tci(rxm, &rxd);\n \t\trxq->rxd_to_pkt_fields(rxq, rxm, &rxd);\n \t\tpkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);\n \t\trxm->ol_flags |= pkt_flags;\n@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tfirst_seg->ol_flags = 0;\n \t\tfirst_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &\n \t\t\trte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];\n-\t\tiavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);\n+\t\tiavf_flex_rxd_to_vlan_tci(first_seg, &rxd);\n \t\trxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);\n \t\tpkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);\n \n@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)\n \n \t\t\tmb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &\n \t\t\t\trte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];\n-\t\t\tiavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);\n+\t\t\tiavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);\n \t\t\trxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);\n \t\t\tstat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);\n \t\t\tpkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);\n@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)\n \treturn 0;\n }\n \n-/* Check if the context descriptor is needed for TX offloading */\n+\n+\n+static inline void\n+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)\n+{\n+\tuint64_t cmd = 0;\n+\n+\t/* TSO enabled */\n+\tif (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))\n+\t\tcmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;\n+\n+\t/* Time Sync - Currently not supported */\n+\n+\t/* Outer L2 TAG 2 Insertion - Currently not supported */\n+\t/* Inner L2 TAG 2 Insertion - Currently not supported */\n+\n+\t*field |= cmd;\n+}\n+\n+static inline void\n+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,\n+\t\tconst struct rte_mbuf *m)\n+{\n+\tuint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;\n+\tuint64_t eip_len = 0;\n+\tuint64_t eip_noinc = 0;\n+\t/* Default - IP_ID is increment in each segment of LSO */\n+\n+\tswitch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |\n+\t\t\tPKT_TX_OUTER_IP_CKSUM)) {\n+\tcase PKT_TX_OUTER_IPV4:\n+\t\teip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;\n+\t\teip_len = m->outer_l3_len >> 2;\n+\tbreak;\n+\tcase PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:\n+\t\teip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;\n+\t\teip_len = m->outer_l3_len >> 2;\n+\tbreak;\n+\tcase PKT_TX_OUTER_IPV6:\n+\t\teip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;\n+\t\teip_len = m->outer_l3_len >> 2;\n+\tbreak;\n+\t}\n+\n+\t*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |\n+\t\teip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |\n+\t\teip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;\n+}\n+\n static inline uint16_t\n-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)\n+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,\n+\tstruct rte_mbuf *m)\n {\n-\tif (flags & PKT_TX_TCP_SEG)\n-\t\treturn 1;\n-\tif (flags & PKT_TX_VLAN_PKT &&\n-\t    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)\n-\t\treturn 1;\n-\treturn 0;\n+\tuint64_t segmentation_field = 0;\n+\tuint64_t total_length = 0;\n+\n+\ttotal_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);\n+\n+\tif (m->ol_flags & PKT_TX_TUNNEL_MASK)\n+\t\ttotal_length -= m->outer_l3_len;\n+\n+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX\n+\tif (!m->l4_len || !m->tso_segsz)\n+\t\tPMD_TX_LOG(DEBUG, \"L4 length %d, LSO Segment size %d\",\n+\t\t\t m->l4_len, m->tso_segsz);\n+\tif (m->tso_segsz < 88)\n+\t\tPMD_TX_LOG(DEBUG, \"LSO Segment size %d is less than minimum %d\",\n+\t\t\tm->tso_segsz, 88);\n+#endif\n+\tsegmentation_field =\n+\t\t(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &\n+\t\t\t\tIAVF_TXD_CTX_QW1_TSO_LEN_MASK) |\n+\t\t(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &\n+\t\t\t\tIAVF_TXD_CTX_QW1_MSS_MASK);\n+\n+\t*field |= segmentation_field;\n+\n+\treturn total_length;\n }\n \n+\n+struct iavf_tx_context_desc_qws {\n+\t__le64 qw0;\n+\t__le64 qw1;\n+};\n+\n static inline void\n-iavf_txd_enable_checksum(uint64_t ol_flags,\n-\t\t\tuint32_t *td_cmd,\n-\t\t\tuint32_t *td_offset,\n-\t\t\tunion iavf_tx_offload tx_offload)\n+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,\n+\tstruct rte_mbuf *m, uint16_t *tlen)\n {\n+\tvolatile struct iavf_tx_context_desc_qws *desc_qws =\n+\t\t\t(volatile struct iavf_tx_context_desc_qws *)desc;\n+\t/* fill descriptor type field */\n+\tdesc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;\n+\n+\t/* fill command field */\n+\tiavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);\n+\n+\t/* fill segmentation field */\n+\tif (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {\n+\t\t*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,\n+\t\t\t\tm);\n+\t}\n+\n+\t/* fill tunnelling field */\n+\tif (m->ol_flags & PKT_TX_TUNNEL_MASK)\n+\t\tiavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);\n+\telse\n+\t\tdesc_qws->qw0 = 0;\n+\n+\tdesc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);\n+\tdesc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);\n+}\n+\n+\n+static inline void\n+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,\n+\t\tstruct rte_mbuf *m)\n+{\n+\tuint64_t command = 0;\n+\tuint64_t offset = 0;\n+\tuint64_t l2tag1 = 0;\n+\n+\t*qw1 = IAVF_TX_DESC_DTYPE_DATA;\n+\n+\tcommand = (uint64_t)IAVF_TX_DESC_CMD_ICRC;\n+\n+\t/* Descriptor based VLAN insertion */\n+\tif (m->ol_flags & PKT_TX_VLAN_PKT) {\n+\t\tcommand |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;\n+\t\tl2tag1 |= m->vlan_tci;\n+\t}\n+\n \t/* Set MACLEN */\n-\t*td_offset |= (tx_offload.l2_len >> 1) <<\n-\t\t      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;\n-\n-\t/* Enable L3 checksum offloads */\n-\tif (ol_flags & PKT_TX_IP_CKSUM) {\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;\n-\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n-\t} else if (ol_flags & PKT_TX_IPV4) {\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;\n-\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n-\t} else if (ol_flags & PKT_TX_IPV6) {\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;\n-\t\t*td_offset |= (tx_offload.l3_len >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n-\t}\n-\n-\tif (ol_flags & PKT_TX_TCP_SEG) {\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;\n-\t\t*td_offset |= (tx_offload.l4_len >> 2) <<\n+\toffset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;\n+\n+\t/* Enable L3 checksum offloading inner */\n+\tif (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {\n+\t\tcommand |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;\n+\t\toffset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n+\t} else if (m->ol_flags & PKT_TX_IPV4) {\n+\t\tcommand |= IAVF_TX_DESC_CMD_IIPT_IPV4;\n+\t\toffset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n+\t} else if (m->ol_flags & PKT_TX_IPV6) {\n+\t\tcommand |= IAVF_TX_DESC_CMD_IIPT_IPV6;\n+\t\toffset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;\n+\t}\n+\n+\tif (m->ol_flags & PKT_TX_TCP_SEG) {\n+\t\tcommand |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;\n+\t\toffset |= (m->l4_len >> 2) <<\n \t\t\t      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n-\t\treturn;\n \t}\n \n \t/* Enable L4 checksum offloads */\n-\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\tswitch (m->ol_flags & PKT_TX_L4_MASK) {\n \tcase PKT_TX_TCP_CKSUM:\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;\n-\t\t*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n+\t\tcommand |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;\n+\t\toffset |= (sizeof(struct rte_tcp_hdr) >> 2) <<\n+\t\t\t\tIAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n \t\tbreak;\n \tcase PKT_TX_SCTP_CKSUM:\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;\n-\t\t*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n+\t\tcommand |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;\n+\t\toffset |= (sizeof(struct rte_sctp_hdr) >> 2) <<\n+\t\t\t\tIAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n \t\tbreak;\n \tcase PKT_TX_UDP_CKSUM:\n-\t\t*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;\n-\t\t*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<\n-\t\t\t      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n-\t\tbreak;\n-\tdefault:\n+\t\tcommand |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;\n+\t\toffset |= (sizeof(struct rte_udp_hdr) >> 2) <<\n+\t\t\t\tIAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;\n \t\tbreak;\n \t}\n+\n+\t*qw1 = rte_cpu_to_le_64((((uint64_t)command <<\n+\t\tIAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |\n+\t\t(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &\n+\t\tIAVF_TXD_DATA_QW1_OFFSET_MASK) |\n+\t\t((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));\n }\n \n-/* set TSO context descriptor\n- * support IP -> L4 and IP -> IP -> L4\n- */\n-static inline uint64_t\n-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)\n+static inline void\n+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)\n {\n-\tuint64_t ctx_desc = 0;\n-\tuint32_t cd_cmd, hdr_len, cd_tso_len;\n-\n-\tif (!tx_offload.l4_len) {\n-\t\tPMD_TX_LOG(DEBUG, \"L4 length set to 0\");\n-\t\treturn ctx_desc;\n+\t*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &\n+\t\t\tIAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);\n \t}\n \n-\thdr_len = tx_offload.l2_len +\n-\t\t  tx_offload.l3_len +\n-\t\t  tx_offload.l4_len;\n+static inline void\n+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,\n+\tstruct rte_mbuf *m, uint64_t desc_template,\n+\tuint16_t tlen, uint16_t ipseclen)\n+{\n+\tuint32_t hdrlen = m->l2_len;\n+\tuint32_t bufsz = 0;\n \n-\tcd_cmd = IAVF_TX_CTX_DESC_TSO;\n-\tcd_tso_len = mbuf->pkt_len - hdr_len;\n-\tctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |\n-\t\t     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |\n-\t\t     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);\n+\t/* fill data descriptor qw1 from template */\n+\tdesc->cmd_type_offset_bsz = desc_template;\n \n-\treturn ctx_desc;\n-}\n+\t/* set data buffer address */\n+\tdesc->buffer_addr = rte_mbuf_data_iova(m);\n \n-/* Construct the tx flags */\n-static inline uint64_t\n-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,\n-\t       uint32_t td_tag)\n-{\n-\treturn rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |\n-\t\t\t\t((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |\n-\t\t\t\t((uint64_t)td_offset <<\n-\t\t\t\t IAVF_TXD_QW1_OFFSET_SHIFT) |\n-\t\t\t\t((uint64_t)size  <<\n-\t\t\t\t IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |\n-\t\t\t\t((uint64_t)td_tag  <<\n-\t\t\t\t IAVF_TXD_QW1_L2TAG1_SHIFT));\n+\t/* calculate data buffer size less set header lengths */\n+\tif ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&\n+\t\t\t(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {\n+\t\thdrlen += m->outer_l3_len;\n+\t\tif (m->ol_flags & PKT_TX_L4_MASK)\n+\t\t\thdrlen += m->l3_len + m->l4_len;\n+\t\telse\n+\t\t\thdrlen += m->l3_len;\n+\t\tif (m->ol_flags & PKT_TX_SEC_OFFLOAD)\n+\t\t\thdrlen += ipseclen;\n+\t\tbufsz = hdrlen + tlen;\n+\t} else {\n+\t\tbufsz = m->data_len;\n+\t}\n+\n+\t/* set data buffer size */\n+\tdesc->cmd_type_offset_bsz |=\n+\t\t(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &\n+\t\tIAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);\n+\n+\tdesc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);\n+\tdesc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);\n }\n \n+\n /* TX function */\n uint16_t\n iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n {\n-\tvolatile struct iavf_tx_desc *txd;\n-\tvolatile struct iavf_tx_desc *txr;\n-\tstruct iavf_tx_queue *txq;\n-\tstruct iavf_tx_entry *sw_ring;\n+\tstruct iavf_tx_queue *txq = tx_queue;\n+\tvolatile struct iavf_tx_desc *txr = txq->tx_ring;\n+\tstruct iavf_tx_entry *txe_ring = txq->sw_ring;\n \tstruct iavf_tx_entry *txe, *txn;\n-\tstruct rte_mbuf *tx_pkt;\n-\tstruct rte_mbuf *m_seg;\n-\tuint16_t tx_id;\n-\tuint16_t nb_tx;\n-\tuint32_t td_cmd;\n-\tuint32_t td_offset;\n-\tuint32_t td_tag;\n-\tuint64_t ol_flags;\n-\tuint16_t nb_used;\n-\tuint16_t nb_ctx;\n-\tuint16_t tx_last;\n-\tuint16_t slen;\n-\tuint64_t buf_dma_addr;\n-\tuint16_t cd_l2tag2 = 0;\n-\tunion iavf_tx_offload tx_offload = {0};\n-\n-\ttxq = tx_queue;\n-\tsw_ring = txq->sw_ring;\n-\ttxr = txq->tx_ring;\n-\ttx_id = txq->tx_tail;\n-\ttxe = &sw_ring[tx_id];\n+\tstruct rte_mbuf *mb, *mb_seg;\n+\tuint16_t desc_idx, desc_idx_last;\n+\tuint16_t idx;\n+\n \n \t/* Check if the descriptor ring needs to be cleaned. */\n \tif (txq->nb_free < txq->free_thresh)\n-\t\t(void)iavf_xmit_cleanup(txq);\n+\t\tiavf_xmit_cleanup(txq);\n+\n+\tdesc_idx = txq->tx_tail;\n+\ttxe = &txe_ring[desc_idx];\n+\n+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING\n+\t\tiavf_dump_tx_entry_ring(txq);\n+\t\tiavf_dump_tx_desc_ring(txq);\n+#endif\n+\n \n-\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n-\t\ttd_cmd = 0;\n-\t\ttd_tag = 0;\n-\t\ttd_offset = 0;\n+\tfor (idx = 0; idx < nb_pkts; idx++) {\n+\t\tvolatile struct iavf_tx_desc *ddesc;\n+\t\tuint16_t nb_desc_ctx;\n+\t\tuint16_t nb_desc_data, nb_desc_required;\n+\t\tuint16_t tlen = 0, ipseclen = 0;\n+\t\tuint64_t ddesc_template = 0;\n+\t\tuint64_t ddesc_cmd = 0;\n+\n+\t\tmb = tx_pkts[idx];\n \n-\t\ttx_pkt = *tx_pkts++;\n \t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n \n-\t\tol_flags = tx_pkt->ol_flags;\n-\t\ttx_offload.l2_len = tx_pkt->l2_len;\n-\t\ttx_offload.l3_len = tx_pkt->l3_len;\n-\t\ttx_offload.l4_len = tx_pkt->l4_len;\n-\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n-\t\t/* Calculate the number of context descriptors needed. */\n-\t\tnb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);\n+\t\tnb_desc_data = mb->nb_segs;\n+\t\tnb_desc_ctx = !!(mb->ol_flags &\n+\t\t\t(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));\n \n-\t\t/* The number of descriptors that must be allocated for\n+\t\t/**\n+\t\t * The number of descriptors that must be allocated for\n \t\t * a packet equals to the number of the segments of that\n-\t\t * packet plus 1 context descriptor if needed.\n+\t\t * packet plus the context and ipsec descriptors if needed.\n \t\t */\n-\t\tnb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);\n-\t\ttx_last = (uint16_t)(tx_id + nb_used - 1);\n+\t\tnb_desc_required = nb_desc_data + nb_desc_ctx;\n+\n+\t\tdesc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);\n \n-\t\t/* Circular ring */\n-\t\tif (tx_last >= txq->nb_tx_desc)\n-\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\t\t/* wrap descriptor ring */\n+\t\tif (desc_idx_last >= txq->nb_tx_desc)\n+\t\t\tdesc_idx_last =\n+\t\t\t\t(uint16_t)(desc_idx_last - txq->nb_tx_desc);\n \n-\t\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u\"\n-\t\t\t   \" tx_first=%u tx_last=%u\",\n-\t\t\t   txq->port_id, txq->queue_id, tx_id, tx_last);\n+\t\tPMD_TX_LOG(DEBUG,\n+\t\t\t\"port_id=%u queue_id=%u tx_first=%u tx_last=%u\",\n+\t\t\ttxq->port_id, txq->queue_id, desc_idx, desc_idx_last);\n \n-\t\tif (nb_used > txq->nb_free) {\n+\t\tif (nb_desc_required > txq->nb_free) {\n \t\t\tif (iavf_xmit_cleanup(txq)) {\n-\t\t\t\tif (nb_tx == 0)\n+\t\t\t\tif (idx == 0)\n \t\t\t\t\treturn 0;\n \t\t\t\tgoto end_of_tx;\n \t\t\t}\n-\t\t\tif (unlikely(nb_used > txq->rs_thresh)) {\n-\t\t\t\twhile (nb_used > txq->nb_free) {\n+\t\t\tif (unlikely(nb_desc_required > txq->rs_thresh)) {\n+\t\t\t\twhile (nb_desc_required > txq->nb_free) {\n \t\t\t\t\tif (iavf_xmit_cleanup(txq)) {\n-\t\t\t\t\t\tif (nb_tx == 0)\n+\t\t\t\t\t\tif (idx == 0)\n \t\t\t\t\t\t\treturn 0;\n \t\t\t\t\t\tgoto end_of_tx;\n \t\t\t\t\t}\n@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\t\t}\n \t\t}\n \n-\t\t/* Descriptor based VLAN insertion */\n-\t\tif (ol_flags & PKT_TX_VLAN_PKT &&\n-\t\t    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {\n-\t\t\ttd_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;\n-\t\t\ttd_tag = tx_pkt->vlan_tci;\n-\t\t}\n-\n-\t\t/* According to datasheet, the bit2 is reserved and must be\n-\t\t * set to 1.\n-\t\t */\n-\t\ttd_cmd |= 0x04;\n-\n-\t\t/* Enable checksum offloading */\n-\t\tif (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)\n-\t\t\tiavf_txd_enable_checksum(ol_flags, &td_cmd,\n-\t\t\t\t\t\t&td_offset, tx_offload);\n+\t\tiavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);\n \n-\t\tif (nb_ctx) {\n \t\t\t/* Setup TX context descriptor if required */\n-\t\t\tuint64_t cd_type_cmd_tso_mss =\n-\t\t\t\tIAVF_TX_DESC_DTYPE_CONTEXT;\n-\t\t\tvolatile struct iavf_tx_context_desc *ctx_txd =\n+\t\tif (nb_desc_ctx) {\n+\t\t\tvolatile struct iavf_tx_context_desc *ctx_desc =\n \t\t\t\t(volatile struct iavf_tx_context_desc *)\n-\t\t\t\t\t\t\t&txr[tx_id];\n+\t\t\t\t\t&txr[desc_idx];\n \n \t\t\t/* clear QW0 or the previous writeback value\n \t\t\t * may impact next write\n \t\t\t */\n-\t\t\t*(volatile uint64_t *)ctx_txd = 0;\n+\t\t\t*(volatile uint64_t *)ctx_desc = 0;\n \n-\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\ttxn = &txe_ring[txe->next_id];\n \t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\n \t\t\tif (txe->mbuf) {\n \t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n \t\t\t\ttxe->mbuf = NULL;\n \t\t\t}\n \n-\t\t\t/* TSO enabled */\n-\t\t\tif (ol_flags & PKT_TX_TCP_SEG)\n-\t\t\t\tcd_type_cmd_tso_mss |=\n-\t\t\t\t\tiavf_set_tso_ctx(tx_pkt, tx_offload);\n+\t\t\tiavf_fill_context_desc(ctx_desc, mb, &tlen);\n+\t\t\tIAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);\n \n-\t\t\tif (ol_flags & PKT_TX_VLAN_PKT &&\n-\t\t\t   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {\n-\t\t\t\tcd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2\n-\t\t\t\t\t<< IAVF_TXD_CTX_QW1_CMD_SHIFT;\n-\t\t\t\tcd_l2tag2 = tx_pkt->vlan_tci;\n+\t\t\ttxe->last_id = desc_idx_last;\n+\t\t\tdesc_idx = txe->next_id;\n+\t\t\ttxe = txn;\n \t\t\t}\n \n-\t\t\tctx_txd->type_cmd_tso_mss =\n-\t\t\t\trte_cpu_to_le_64(cd_type_cmd_tso_mss);\n-\t\t\tctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);\n \n-\t\t\tIAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);\n-\t\t\ttxe->last_id = tx_last;\n-\t\t\ttx_id = txe->next_id;\n-\t\t\ttxe = txn;\n-\t\t}\n \n-\t\tm_seg = tx_pkt;\n+\t\tmb_seg = mb;\n+\n \t\tdo {\n-\t\t\ttxd = &txr[tx_id];\n-\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tddesc = (volatile struct iavf_tx_desc *)\n+\t\t\t\t\t&txr[desc_idx];\n+\n+\t\t\ttxn = &txe_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n \n \t\t\tif (txe->mbuf)\n \t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n-\t\t\ttxe->mbuf = m_seg;\n-\n-\t\t\t/* Setup TX Descriptor */\n-\t\t\tslen = m_seg->data_len;\n-\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n-\t\t\ttxd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);\n-\t\t\ttxd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,\n-\t\t\t\t\t\t\t\t  td_offset,\n-\t\t\t\t\t\t\t\t  slen,\n-\t\t\t\t\t\t\t\t  td_tag);\n-\n-\t\t\tIAVF_DUMP_TX_DESC(txq, txd, tx_id);\n-\t\t\ttxe->last_id = tx_last;\n-\t\t\ttx_id = txe->next_id;\n+\n+\t\t\ttxe->mbuf = mb_seg;\n+\t\t\tiavf_fill_data_desc(ddesc, mb_seg,\n+\t\t\t\t\tddesc_template, tlen, ipseclen);\n+\n+\t\t\tIAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);\n+\n+\t\t\ttxe->last_id = desc_idx_last;\n+\t\t\tdesc_idx = txe->next_id;\n \t\t\ttxe = txn;\n-\t\t\tm_seg = m_seg->next;\n-\t\t} while (m_seg);\n+\t\t\tmb_seg = mb_seg->next;\n+\t\t} while (mb_seg);\n \n \t\t/* The last packet data descriptor needs End Of Packet (EOP) */\n-\t\ttd_cmd |= IAVF_TX_DESC_CMD_EOP;\n-\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_used);\n-\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_used);\n+\t\tddesc_cmd = IAVF_TX_DESC_CMD_EOP;\n+\n+\t\ttxq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);\n+\t\ttxq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);\n \n \t\tif (txq->nb_used >= txq->rs_thresh) {\n \t\t\tPMD_TX_LOG(DEBUG, \"Setting RS bit on TXD id=\"\n \t\t\t\t   \"%4u (port=%d queue=%d)\",\n-\t\t\t\t   tx_last, txq->port_id, txq->queue_id);\n+\t\t\t\t   desc_idx_last, txq->port_id, txq->queue_id);\n \n-\t\t\ttd_cmd |= IAVF_TX_DESC_CMD_RS;\n+\t\t\tddesc_cmd |= IAVF_TX_DESC_CMD_RS;\n \n \t\t\t/* Update txq RS bit counters */\n \t\t\ttxq->nb_used = 0;\n \t\t}\n \n-\t\ttxd->cmd_type_offset_bsz |=\n-\t\t\trte_cpu_to_le_64(((uint64_t)td_cmd) <<\n-\t\t\t\t\t IAVF_TXD_QW1_CMD_SHIFT);\n-\t\tIAVF_DUMP_TX_DESC(txq, txd, tx_id);\n+\t\tddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<\n+\t\t\t\tIAVF_TXD_DATA_QW1_CMD_SHIFT);\n+\n+\t\tIAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);\n \t}\n \n end_of_tx:\n \trte_wmb();\n \n \tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n-\t\t   txq->port_id, txq->queue_id, tx_id, nb_tx);\n+\t\t   txq->port_id, txq->queue_id, desc_idx, idx);\n \n-\tIAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);\n-\ttxq->tx_tail = tx_id;\n+\tIAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);\n+\ttxq->tx_tail = desc_idx;\n \n-\treturn nb_tx;\n+\treturn idx;\n }\n \n /* Check if the packet with vlan user priority is transmitted in the\ndiff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h\nindex f4ae2fd6e1..d05a525ef9 100644\n--- a/drivers/net/iavf/iavf_rxtx.h\n+++ b/drivers/net/iavf/iavf_rxtx.h\n@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {\n \tIAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */\n };\n \n+\n+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT\t(0)\n+#define IAVF_TXD_DATA_QW1_DTYPE_MASK\t(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_CMD_SHIFT\t(4)\n+#define IAVF_TXD_DATA_QW1_CMD_MASK\t(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT\t(16)\n+#define IAVF_TXD_DATA_QW1_OFFSET_MASK\t(0x3FFFFULL << \\\n+\t\t\t\t\tIAVF_TXD_DATA_QW1_OFFSET_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT\t(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)\n+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK\t\\\n+\t(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT\t\\\n+\t(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)\n+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK\t\\\n+\t(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT\t\\\n+\t(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)\n+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK\t\\\n+\t(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_MACLEN_MASK\t\\\n+\t(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)\n+#define IAVF_TXD_DATA_QW1_IPLEN_MASK\t\\\n+\t(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)\n+#define IAVF_TXD_DATA_QW1_L4LEN_MASK\t\\\n+\t(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)\n+#define IAVF_TXD_DATA_QW1_FCLEN_MASK\t\\\n+\t(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT\t(34)\n+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK\t\\\n+\t(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)\n+\n+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT\t\t(48)\n+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK\t\t\\\n+\t(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT\t(11)\n+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK\t\\\n+\t(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT\t(14)\n+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK\t\\\n+\t(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT\t\t(30)\n+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK\t\t\\\n+\t(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT\t(30)\n+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK\t\t\\\n+\t(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT\t\t(50)\n+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK\t\t\\\n+\t(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT\t\t(0)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK\t\t(0x3UL)\n+\n+enum iavf_tx_ctx_desc_tunnel_external_ip_type {\n+\tIAVF_TX_CTX_DESC_EIPT_NONE,\n+\tIAVF_TX_CTX_DESC_EIPT_IPV6,\n+\tIAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,\n+\tIAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD\n+};\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT\t(2)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK\t\t(0x7FUL)\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT\t(9)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK\t\t(0x3UL)\n+\n+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {\n+\tIAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,\n+\tIAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,\n+\tIAVF_TX_CTX_DESC_L4_TUN_TYP_GRE\n+};\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT\t(11)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK\t(0x1UL)\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT\t(12)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK\t(0x7FUL)\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT\t(19)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK\t\t(0xFUL)\n+\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT\t(23)\n+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK\t\t(0x1UL)\n+\n+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM\t\t\t(32)\n+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK\t\t\t(0xFFFFUL)\n+\n+\n+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK\t(0xFFFFF)\n+\n+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */\n+#define IAVF_RX_FLEX_DESC_PTYPE_M\t(0x3FF) /* 10-bits */\n+\n+\n /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */\n #define IAVF_RX_FLEX_DESC_PTYPE_M\t(0x3FF) /* 10-bits */\n \n@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,\n \tconst volatile struct iavf_tx_desc *tx_desc = desc;\n \tenum iavf_tx_desc_dtype_value type;\n \n-\ttype = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(\n-\t\ttx_desc->cmd_type_offset_bsz &\n-\t\trte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));\n+\n+\ttype = (enum iavf_tx_desc_dtype_value)\n+\t\trte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &\n+\t\t\trte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));\n \tswitch (type) {\n \tcase IAVF_TX_DESC_DTYPE_DATA:\n \t\tname = \"Tx_data_desc\";\n@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,\n \t}\n \n \tprintf(\"Queue %d %s %d: QW0: 0x%016\"PRIx64\" QW1: 0x%016\"PRIx64\"\\n\",\n-\t       txq->queue_id, name, tx_id, tx_desc->buffer_addr,\n-\t       tx_desc->cmd_type_offset_bsz);\n+\t\ttxq->queue_id, name, tx_id, tx_desc->buffer_addr,\n+\t\ttx_desc->cmd_type_offset_bsz);\n }\n \n #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \\\ndiff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c\nindex edb54991e2..2c3bb0b05f 100644\n--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c\n+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c\n@@ -363,10 +363,12 @@ static inline void\n flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,\n \t\t     const uint32_t *type_table)\n {\n-\tconst __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,\n-\t\t\t\t\t\t 0, IAVF_RX_FLEX_DESC_PTYPE_M,\n-\t\t\t\t\t\t 0, IAVF_RX_FLEX_DESC_PTYPE_M,\n-\t\t\t\t\t\t 0, IAVF_RX_FLEX_DESC_PTYPE_M);\n+\tconst __m128i ptype_mask = _mm_set_epi16(\n+\t\t\t\t\tIAVF_RX_FLEX_DESC_PTYPE_M, 0x0,\n+\t\t\t\t\tIAVF_RX_FLEX_DESC_PTYPE_M, 0x0,\n+\t\t\t\t\tIAVF_RX_FLEX_DESC_PTYPE_M, 0x0,\n+\t\t\t\t\tIAVF_RX_FLEX_DESC_PTYPE_M, 0x0);\n+\n \t__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);\n \t__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);\n \t__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);\n",
    "prefixes": [
        "v9",
        "2/7"
    ]
}