get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/91559/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 91559,
    "url": "https://patches.dpdk.org/api/patches/91559/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210415085811.56429-2-leyi.rong@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210415085811.56429-2-leyi.rong@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210415085811.56429-2-leyi.rong@intel.com",
    "date": "2021-04-15T08:58:10",
    "name": "[v5,1/2] net/ice: add Tx AVX512 offload path",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "f52d6697592c75074f99a87c4fa00899c3b7c3ce",
    "submitter": {
        "id": 1204,
        "url": "https://patches.dpdk.org/api/people/1204/?format=api",
        "name": "Leyi Rong",
        "email": "leyi.rong@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210415085811.56429-2-leyi.rong@intel.com/mbox/",
    "series": [
        {
            "id": 16405,
            "url": "https://patches.dpdk.org/api/series/16405/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=16405",
            "date": "2021-04-15T08:58:09",
            "name": "add alternative AVX512 offload path",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/16405/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/91559/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/91559/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E2D60A0A0E;\n\tThu, 15 Apr 2021 11:23:44 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 35D1A162185;\n\tThu, 15 Apr 2021 11:23:43 +0200 (CEST)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id 52A97162170\n for <dev@dpdk.org>; Thu, 15 Apr 2021 11:23:39 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 15 Apr 2021 02:23:38 -0700",
            "from dpdk-lrong-srv-04.sh.intel.com ([10.67.119.221])\n by fmsmga002.fm.intel.com with ESMTP; 15 Apr 2021 02:23:37 -0700"
        ],
        "IronPort-SDR": [
            "\n +qCBhYVS0Fwo1uW/+eSAInBX7WvgIenyFiyCOJZTukMcx9R+Z7zaELTyRlKssUDp2yHZV5HjLf\n GX0K0xLjX5gQ==",
            "\n FeP/gOfnEAYLMG+0nCOL5+d3PiIU250bvwZZyc3TXNeZhmhvzVQvRs935dOHdBMB4eNog5hUgp\n 6UqFxFCOAooQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,9954\"; a=\"182320657\"",
            "E=Sophos;i=\"5.82,223,1613462400\"; d=\"scan'208\";a=\"182320657\"",
            "E=Sophos;i=\"5.82,223,1613462400\"; d=\"scan'208\";a=\"452841428\""
        ],
        "X-ExtLoop1": "1",
        "From": "Leyi Rong <leyi.rong@intel.com>",
        "To": "qi.z.zhang@intel.com,\n\twenzhuo.lu@intel.com",
        "Cc": "dev@dpdk.org,\n\tLeyi Rong <leyi.rong@intel.com>",
        "Date": "Thu, 15 Apr 2021 16:58:10 +0800",
        "Message-Id": "<20210415085811.56429-2-leyi.rong@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210415085811.56429-1-leyi.rong@intel.com>",
        "References": "<20210317091409.11725-1-leyi.rong@intel.com>\n <20210415085811.56429-1-leyi.rong@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 1/2] net/ice: add Tx AVX512 offload path",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add alternative Tx data path for AVX512 which can support partial\nTx offload features, including Tx checksum offload, vlan/QinQ\ninsertion offload.\n\nSigned-off-by: Leyi Rong <leyi.rong@intel.com>\nSigned-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>\n---\n drivers/net/ice/ice_rxtx.c            |  28 +++++--\n drivers/net/ice/ice_rxtx.h            |   3 +\n drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-\n drivers/net/ice/ice_rxtx_vec_avx512.c |  58 +++++++++++---\n drivers/net/ice/ice_rxtx_vec_common.h | 106 ++++++++++++++++++++++----\n drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-\n 6 files changed, 164 insertions(+), 35 deletions(-)",
    "diff": "diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex 0827db9c9e..75326c76ab 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -8,6 +8,7 @@\n \n #include \"rte_pmd_ice.h\"\n #include \"ice_rxtx.h\"\n+#include \"ice_rxtx_vec_common.h\"\n \n #define ICE_TX_CKSUM_OFFLOAD_MASK (\t\t \\\n \t\tPKT_TX_IP_CKSUM |\t\t \\\n@@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev)\n #ifdef RTE_ARCH_X86\n \tstruct ice_tx_queue *txq;\n \tint i;\n+\tint tx_check_ret;\n \tbool use_avx512 = false;\n \tbool use_avx2 = false;\n \n \tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n-\t\tif (!ice_tx_vec_dev_check(dev) &&\n-\t\t\t\trte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {\n+\t\ttx_check_ret = ice_tx_vec_dev_check(dev);\n+\t\tif (tx_check_ret >= 0 &&\n+\t\t    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {\n \t\t\tad->tx_vec_allowed = true;\n \t\t\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\t\t\ttxq = dev->data->tx_queues[i];\n@@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)\n \t\t\tPMD_DRV_LOG(NOTICE,\n \t\t\t\t\"AVX512 is not supported in build env\");\n #endif\n-\t\t\tif (!use_avx512 &&\n+\t\t\tif (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&\n \t\t\t(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||\n \t\t\trte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&\n \t\t\trte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)\n \t\t\t\tuse_avx2 = true;\n \n+\t\t\tif (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)\n+\t\t\t\tad->tx_vec_allowed = false;\n+\n \t\t} else {\n \t\t\tad->tx_vec_allowed = false;\n \t\t}\n@@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)\n \tif (ad->tx_vec_allowed) {\n \t\tif (use_avx512) {\n #ifdef CC_AVX512_SUPPORT\n-\t\t\tPMD_DRV_LOG(NOTICE, \"Using AVX512 Vector Tx (port %d).\",\n-\t\t\t\t    dev->data->port_id);\n-\t\t\tdev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;\n+\t\t\tif (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {\n+\t\t\t\tPMD_DRV_LOG(NOTICE,\n+\t\t\t\t\t    \"Using AVX512 OFFLOAD Vector Tx (port %d).\",\n+\t\t\t\t\t    dev->data->port_id);\n+\t\t\t\tdev->tx_pkt_burst =\n+\t\t\t\t\tice_xmit_pkts_vec_avx512_offload;\n+\t\t\t} else {\n+\t\t\t\tPMD_DRV_LOG(NOTICE,\n+\t\t\t\t\t    \"Using AVX512 Vector Tx (port %d).\",\n+\t\t\t\t\t    dev->data->port_id);\n+\t\t\t\tdev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;\n+\t\t\t}\n #endif\n \t\t} else {\n \t\t\tPMD_DRV_LOG(DEBUG, \"Using %sVector Tx (port %d).\",\n@@ -3343,6 +3358,7 @@ static const struct {\n #ifdef RTE_ARCH_X86\n #ifdef CC_AVX512_SUPPORT\n \t{ ice_xmit_pkts_vec_avx512, \"Vector AVX512\" },\n+\t{ ice_xmit_pkts_vec_avx512_offload, \"Offload Vector AVX512\" },\n #endif\n \t{ ice_xmit_pkts_vec_avx2, \"Vector AVX2\" },\n \t{ ice_xmit_pkts_vec,      \"Vector SSE\" },\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex 99096e4c21..f72fad0255 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,\n \t\t\t\t\t    uint16_t nb_pkts);\n uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t  uint16_t nb_pkts);\n+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,\n+\t\t\t\t\t  struct rte_mbuf **tx_pkts,\n+\t\t\t\t\t  uint16_t nb_pkts);\n int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);\n int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);\n int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);\ndiff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c\nindex 83dcdf15d4..8d4bd6df1b 100644\n--- a/drivers/net/ice/ice_rxtx_vec_avx2.c\n+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c\n@@ -853,7 +853,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tnb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);\n \n \tif (txq->nb_tx_free < txq->tx_free_thresh)\n-\t\tice_tx_free_bufs(txq);\n+\t\tice_tx_free_bufs_vec(txq);\n \n \tnb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);\n \tif (unlikely(nb_pkts == 0))\ndiff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c\nindex a668b82232..1c4a59a170 100644\n--- a/drivers/net/ice/ice_rxtx_vec_avx512.c\n+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c\n@@ -982,23 +982,26 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)\n \treturn txq->tx_rs_thresh;\n }\n \n-static inline void\n+static __rte_always_inline void\n ice_vtx1(volatile struct ice_tx_desc *txdp,\n-\t struct rte_mbuf *pkt, uint64_t flags)\n+\t struct rte_mbuf *pkt, uint64_t flags, bool do_offload)\n {\n \tuint64_t high_qw =\n \t\t(ICE_TX_DESC_DTYPE_DATA |\n \t\t ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |\n \t\t ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));\n \n+\tif (do_offload)\n+\t\tice_txd_enable_offload(pkt, &high_qw);\n+\n \t__m128i descriptor = _mm_set_epi64x(high_qw,\n \t\t\t\tpkt->buf_iova + pkt->data_off);\n \t_mm_store_si128((__m128i *)txdp, descriptor);\n }\n \n-static inline void\n-ice_vtx(volatile struct ice_tx_desc *txdp,\n-\tstruct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)\n+static __rte_always_inline void\n+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,\n+\tuint16_t nb_pkts,  uint64_t flags, bool do_offload)\n {\n \tconst uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |\n \t\t\t((uint64_t)flags  << ICE_TXD_QW1_CMD_S));\n@@ -1008,18 +1011,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp,\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[3]->data_len <<\n \t\t\t ICE_TXD_QW1_TX_BUF_SZ_S);\n+\t\tif (do_offload)\n+\t\t\tice_txd_enable_offload(pkt[3], &hi_qw3);\n \t\tuint64_t hi_qw2 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[2]->data_len <<\n \t\t\t ICE_TXD_QW1_TX_BUF_SZ_S);\n+\t\tif (do_offload)\n+\t\t\tice_txd_enable_offload(pkt[2], &hi_qw2);\n \t\tuint64_t hi_qw1 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[1]->data_len <<\n \t\t\t ICE_TXD_QW1_TX_BUF_SZ_S);\n+\t\tif (do_offload)\n+\t\t\tice_txd_enable_offload(pkt[1], &hi_qw1);\n \t\tuint64_t hi_qw0 =\n \t\t\thi_qw_tmpl |\n \t\t\t((uint64_t)pkt[0]->data_len <<\n \t\t\t ICE_TXD_QW1_TX_BUF_SZ_S);\n+\t\tif (do_offload)\n+\t\t\tice_txd_enable_offload(pkt[0], &hi_qw0);\n \n \t\t__m512i desc0_3 =\n \t\t\t_mm512_set_epi64\n@@ -1036,7 +1047,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp,\n \n \t/* do any last ones */\n \twhile (nb_pkts) {\n-\t\tice_vtx1(txdp, *pkt, flags);\n+\t\tice_vtx1(txdp, *pkt, flags, do_offload);\n \t\ttxdp++, pkt++, nb_pkts--;\n \t}\n }\n@@ -1051,9 +1062,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,\n \t\ttxep[i].mbuf = tx_pkts[i];\n }\n \n-static inline uint16_t\n+static __rte_always_inline uint16_t\n ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n-\t\t\t\tuint16_t nb_pkts)\n+\t\t\t\tuint16_t nb_pkts, bool do_offload)\n {\n \tstruct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;\n \tvolatile struct ice_tx_desc *txdp;\n@@ -1083,11 +1094,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tif (nb_commit >= n) {\n \t\tice_tx_backlog_entry_avx512(txep, tx_pkts, n);\n \n-\t\tice_vtx(txdp, tx_pkts, n - 1, flags);\n+\t\tice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);\n \t\ttx_pkts += (n - 1);\n \t\ttxdp += (n - 1);\n \n-\t\tice_vtx1(txdp, *tx_pkts++, rs);\n+\t\tice_vtx1(txdp, *tx_pkts++, rs, do_offload);\n \n \t\tnb_commit = (uint16_t)(nb_commit - n);\n \n@@ -1101,7 +1112,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \tice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);\n \n-\tice_vtx(txdp, tx_pkts, nb_commit, flags);\n+\tice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);\n \n \ttx_id = (uint16_t)(tx_id + nb_commit);\n \tif (tx_id > txq->tx_next_rs) {\n@@ -1131,7 +1142,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,\n \n \t\tnum = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);\n \t\tret = ice_xmit_fixed_burst_vec_avx512(tx_queue,\n-\t\t\t\t\t\t      &tx_pkts[nb_tx], num);\n+\t\t\t\t&tx_pkts[nb_tx], num, false);\n+\t\tnb_tx += ret;\n+\t\tnb_pkts -= ret;\n+\t\tif (ret < num)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn nb_tx;\n+}\n+\n+uint16_t\n+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\t\t\t uint16_t nb_pkts)\n+{\n+\tuint16_t nb_tx = 0;\n+\tstruct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;\n+\n+\twhile (nb_pkts) {\n+\t\tuint16_t ret, num;\n+\n+\t\tnum = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);\n+\t\tret = ice_xmit_fixed_burst_vec_avx512(tx_queue,\n+\t\t\t\t&tx_pkts[nb_tx], num, true);\n+\n \t\tnb_tx += ret;\n \t\tnb_pkts -= ret;\n \t\tif (ret < num)\ndiff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h\nindex a5d76a2936..942647f4e9 100644\n--- a/drivers/net/ice/ice_rxtx_vec_common.h\n+++ b/drivers/net/ice/ice_rxtx_vec_common.h\n@@ -77,7 +77,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,\n }\n \n static __rte_always_inline int\n-ice_tx_free_bufs(struct ice_tx_queue *txq)\n+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)\n {\n \tstruct ice_tx_entry *txep;\n \tuint32_t n;\n@@ -197,7 +197,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)\n #ifdef CC_AVX512_SUPPORT\n \tstruct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;\n \n-\tif (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {\n+\tif (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||\n+\t    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {\n \t\tstruct ice_vec_tx_entry *swr = (void *)txq->sw_ring;\n \n \t\tif (txq->tx_tail < i) {\n@@ -267,29 +268,39 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)\n \treturn 0;\n }\n \n-#define ICE_NO_VECTOR_FLAGS (\t\t\t\t \\\n-\t\tDEV_TX_OFFLOAD_MULTI_SEGS |\t\t \\\n-\t\tDEV_TX_OFFLOAD_VLAN_INSERT |\t\t \\\n-\t\tDEV_TX_OFFLOAD_IPV4_CKSUM |\t\t \\\n-\t\tDEV_TX_OFFLOAD_SCTP_CKSUM |\t\t \\\n-\t\tDEV_TX_OFFLOAD_UDP_CKSUM |\t\t \\\n-\t\tDEV_TX_OFFLOAD_TCP_TSO |\t\t \\\n+#define ICE_TX_NO_VECTOR_FLAGS (\t\t\t\\\n+\t\tDEV_TX_OFFLOAD_MULTI_SEGS |\t\t\\\n+\t\tDEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |\t\\\n+\t\tDEV_TX_OFFLOAD_TCP_TSO)\n+\n+#define ICE_TX_VECTOR_OFFLOAD (\t\t\t\t\\\n+\t\tDEV_TX_OFFLOAD_VLAN_INSERT |\t\t\\\n+\t\tDEV_TX_OFFLOAD_QINQ_INSERT |\t\t\\\n+\t\tDEV_TX_OFFLOAD_IPV4_CKSUM |\t\t\\\n+\t\tDEV_TX_OFFLOAD_SCTP_CKSUM |\t\t\\\n+\t\tDEV_TX_OFFLOAD_UDP_CKSUM |\t\t\\\n \t\tDEV_TX_OFFLOAD_TCP_CKSUM)\n \n+#define ICE_VECTOR_PATH\t\t0\n+#define ICE_VECTOR_OFFLOAD_PATH\t1\n+\n static inline int\n ice_tx_vec_queue_default(struct ice_tx_queue *txq)\n {\n \tif (!txq)\n \t\treturn -1;\n \n-\tif (txq->offloads & ICE_NO_VECTOR_FLAGS)\n-\t\treturn -1;\n-\n \tif (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||\n \t    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)\n \t\treturn -1;\n \n-\treturn 0;\n+\tif (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)\n+\t\treturn -1;\n+\n+\tif (txq->offloads & ICE_TX_VECTOR_OFFLOAD)\n+\t\treturn ICE_VECTOR_OFFLOAD_PATH;\n+\n+\treturn ICE_VECTOR_PATH;\n }\n \n static inline int\n@@ -312,14 +323,19 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)\n {\n \tint i;\n \tstruct ice_tx_queue *txq;\n+\tint ret = 0;\n+\tint result = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\ttxq = dev->data->tx_queues[i];\n-\t\tif (ice_tx_vec_queue_default(txq))\n+\t\tret = ice_tx_vec_queue_default(txq);\n+\t\tif (ret < 0)\n \t\t\treturn -1;\n+\t\tif (ret == ICE_VECTOR_OFFLOAD_PATH)\n+\t\t\tresult = ret;\n \t}\n \n-\treturn 0;\n+\treturn result;\n }\n \n #ifdef CC_AVX2_SUPPORT\n@@ -521,4 +537,64 @@ ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)\n }\n #endif\n \n+static inline void\n+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,\n+\t\t       uint64_t *txd_hi)\n+{\n+\tuint64_t ol_flags = tx_pkt->ol_flags;\n+\tuint32_t td_cmd = 0;\n+\tuint32_t td_offset = 0;\n+\n+\t/* Tx Checksum Offload */\n+\t/* SET MACLEN */\n+\ttd_offset |= (tx_pkt->l2_len >> 1) <<\n+\t\t\tICE_TX_DESC_LEN_MACLEN_S;\n+\n+\t/* Enable L3 checksum offload */\n+\tif (ol_flags & PKT_TX_IP_CKSUM) {\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;\n+\t\ttd_offset |= (tx_pkt->l3_len >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_IPLEN_S;\n+\t} else if (ol_flags & PKT_TX_IPV4) {\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;\n+\t\ttd_offset |= (tx_pkt->l3_len >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_IPLEN_S;\n+\t} else if (ol_flags & PKT_TX_IPV6) {\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;\n+\t\ttd_offset |= (tx_pkt->l3_len >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_IPLEN_S;\n+\t}\n+\n+\t/* Enable L4 checksum offloads */\n+\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\tcase PKT_TX_TCP_CKSUM:\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;\n+\t\ttd_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tcase PKT_TX_SCTP_CKSUM:\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;\n+\t\ttd_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tcase PKT_TX_UDP_CKSUM:\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;\n+\t\ttd_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<\n+\t\t\tICE_TX_DESC_LEN_L4_LEN_S;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\t*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;\n+\n+\t/* Tx VLAN/QINQ insertion Offload */\n+\tif (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {\n+\t\ttd_cmd |= ICE_TX_DESC_CMD_IL2TAG1;\n+\t\t*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<\n+\t\t\t\tICE_TXD_QW1_L2TAG1_S);\n+\t}\n+\n+\t*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;\n+}\n #endif\ndiff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c\nindex 3e467c48f1..6029cc2d99 100644\n--- a/drivers/net/ice/ice_rxtx_vec_sse.c\n+++ b/drivers/net/ice/ice_rxtx_vec_sse.c\n@@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tnb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);\n \n \tif (txq->nb_tx_free < txq->tx_free_thresh)\n-\t\tice_tx_free_bufs(txq);\n+\t\tice_tx_free_bufs_vec(txq);\n \n \tnb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);\n \tnb_commit = nb_pkts;\n",
    "prefixes": [
        "v5",
        "1/2"
    ]
}