get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/7763/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 7763,
    "url": "https://patches.dpdk.org/api/patches/7763/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1445340132-2630-2-git-send-email-xiao.w.wang@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1445340132-2630-2-git-send-email-xiao.w.wang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1445340132-2630-2-git-send-email-xiao.w.wang@intel.com",
    "date": "2015-10-20T11:22:11",
    "name": "[dpdk-dev,v3,1/2] e1000: enable igb TSO support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1f69b5f61ccece411d16db9cb46cfa076f14c741",
    "submitter": {
        "id": 281,
        "url": "https://patches.dpdk.org/api/people/281/?format=api",
        "name": "Xiao Wang",
        "email": "xiao.w.wang@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1445340132-2630-2-git-send-email-xiao.w.wang@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/7763/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/7763/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 840B28E88;\n\tTue, 20 Oct 2015 13:22:27 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 1B1EA8E88\n\tfor <dev@dpdk.org>; Tue, 20 Oct 2015 13:22:23 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby fmsmga101.fm.intel.com with ESMTP; 20 Oct 2015 04:22:22 -0700",
            "from shvmail01.sh.intel.com ([10.239.29.42])\n\tby orsmga001.jf.intel.com with ESMTP; 20 Oct 2015 04:22:22 -0700",
            "from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com\n\t[10.239.29.89])\n\tby shvmail01.sh.intel.com with ESMTP id t9KBMKhK003371;\n\tTue, 20 Oct 2015 19:22:20 +0800",
            "from shecgisg004.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid t9KBMHAU002671; Tue, 20 Oct 2015 19:22:19 +0800",
            "(from xiaowan1@localhost)\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t9KBMHQF002667; \n\tTue, 20 Oct 2015 19:22:17 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.17,707,1437462000\"; d=\"scan'208\";a=\"797902969\"",
        "From": "Wang Xiao W <xiao.w.wang@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 20 Oct 2015 19:22:11 +0800",
        "Message-Id": "<1445340132-2630-2-git-send-email-xiao.w.wang@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": "<1445340132-2630-1-git-send-email-xiao.w.wang@intel.com>",
        "References": "<1444472872-26866-2-git-send-email-xiao.w.wang@intel.com>\n\t<1445340132-2630-1-git-send-email-xiao.w.wang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 1/2] e1000: enable igb TSO support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch enables igb TSO feature, the feature works on both PF and VF.\nThe TCP segmentation offload needs to write the offload related information\ninto the advanced context descriptors, which is similar to checksum offload.\n\nSigned-off-by: Wang Xiao W <xiao.w.wang@intel.com>\n---\n drivers/net/e1000/igb_ethdev.c |   6 +-\n drivers/net/e1000/igb_rxtx.c   | 200 +++++++++++++++++++++++++----------------\n 2 files changed, 127 insertions(+), 79 deletions(-)",
    "diff": "diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c\nindex 848ef6e..2e69394 100644\n--- a/drivers/net/e1000/igb_ethdev.c\n+++ b/drivers/net/e1000/igb_ethdev.c\n@@ -1497,7 +1497,8 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\tDEV_TX_OFFLOAD_IPV4_CKSUM  |\n \t\tDEV_TX_OFFLOAD_UDP_CKSUM   |\n \t\tDEV_TX_OFFLOAD_TCP_CKSUM   |\n-\t\tDEV_TX_OFFLOAD_SCTP_CKSUM;\n+\t\tDEV_TX_OFFLOAD_SCTP_CKSUM  |\n+\t\tDEV_TX_OFFLOAD_TCP_TSO;\n \n \tswitch (hw->mac.type) {\n \tcase e1000_82575:\n@@ -1588,7 +1589,8 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t\t\tDEV_TX_OFFLOAD_IPV4_CKSUM  |\n \t\t\t\tDEV_TX_OFFLOAD_UDP_CKSUM   |\n \t\t\t\tDEV_TX_OFFLOAD_TCP_CKSUM   |\n-\t\t\t\tDEV_TX_OFFLOAD_SCTP_CKSUM;\n+\t\t\t\tDEV_TX_OFFLOAD_SCTP_CKSUM  |\n+\t\t\t\tDEV_TX_OFFLOAD_TCP_TSO;\n \tswitch (hw->mac.type) {\n \tcase e1000_vfadapt:\n \t\tdev_info->max_rx_queues = 2;\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex 19905fd..afb8478 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -76,7 +76,8 @@\n #define IGB_TX_OFFLOAD_MASK (\t\t\t \\\n \t\tPKT_TX_VLAN_PKT |\t\t \\\n \t\tPKT_TX_IP_CKSUM |\t\t \\\n-\t\tPKT_TX_L4_MASK)\n+\t\tPKT_TX_L4_MASK |\t\t \\\n+\t\tPKT_TX_TCP_SEG)\n \n static inline struct rte_mbuf *\n rte_rxmbuf_alloc(struct rte_mempool *mp)\n@@ -146,32 +147,40 @@ enum igb_advctx_num {\n };\n \n /** Offload features */\n-union igb_vlan_macip {\n-\tuint32_t data;\n+union igb_tx_offload {\n+\tuint64_t data;\n \tstruct {\n-\t\tuint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */\n-\t\tuint16_t vlan_tci;\n-\t\t/**< VLAN Tag Control Identifier (CPU order). */\n-\t} f;\n+\t\tuint64_t l3_len:9; /**< L3 (IP) Header Length. */\n+\t\tuint64_t l2_len:7; /**< L2 (MAC) Header Length. */\n+\t\tuint64_t vlan_tci:16;  /**< VLAN Tag Control Identifier(CPU order). */\n+\t\tuint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */\n+\t\tuint64_t tso_segsz:16; /**< TCP TSO segment size. */\n+\n+\t\t/* uint64_t unused:8; */\n+\t};\n };\n \n /*\n- * Compare mask for vlan_macip_len.data,\n- * should be in sync with igb_vlan_macip.f layout.\n+ * Compare mask for igb_tx_offload.data,\n+ * should be in sync with igb_tx_offload layout.\n  * */\n-#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */\n-#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */\n-#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */\n-/** MAC+IP  length. */\n-#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)\n+#define TX_MACIP_LEN_CMP_MASK\t0x000000000000FFFF /**< L2L3 header mask. */\n+#define TX_VLAN_CMP_MASK\t\t0x00000000FFFF0000 /**< Vlan mask. */\n+#define TX_TCP_LEN_CMP_MASK\t\t0x000000FF00000000 /**< TCP header mask. */\n+#define TX_TSO_MSS_CMP_MASK\t\t0x00FFFF0000000000 /**< TSO segsz mask. */\n+/** Mac + IP + TCP + Mss mask. */\n+#define TX_TSO_CMP_MASK\t\\\n+\t(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)\n \n /**\n  * Strucutre to check if new context need be built\n  */\n struct igb_advctx_info {\n \tuint64_t flags;           /**< ol_flags related to context build. */\n-\tuint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */\n-\tunion igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */\n+\t/** tx offload: vlan, tso, l2-l3-l4 lengths. */\n+\tunion igb_tx_offload tx_offload;\n+\t/** compare mask for tx offload. */\n+\tunion igb_tx_offload tx_offload_mask;\n };\n \n /**\n@@ -221,6 +230,8 @@ struct igb_tx_queue {\n  * Macro for VMDq feature for 1 GbE NIC.\n  */\n #define E1000_VMOLR_SIZE\t\t\t(8)\n+#define IGB_TSO_MAX_HDRLEN\t\t\t(512)\n+#define IGB_TSO_MAX_MSS\t\t\t\t(9216)\n \n /*********************************************************************\n  *\n@@ -229,6 +240,23 @@ struct igb_tx_queue {\n  **********************************************************************/\n \n /*\n+ *There're some limitations in hardware for TCP segmentation offload. We\n+ *should check whether the parameters are valid.\n+ */\n+static inline uint64_t\n+check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)\n+{\n+\tif (!(ol_req & PKT_TX_TCP_SEG))\n+\t\treturn ol_req;\n+\tif ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +\n+\t\t\tol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {\n+\t\tol_req &= ~PKT_TX_TCP_SEG;\n+\t\tol_req |= PKT_TX_TCP_CKSUM;\n+\t}\n+\treturn ol_req;\n+}\n+\n+/*\n  * Advanced context descriptor are almost same between igb/ixgbe\n  * This is a separate function, looking for optimization opportunity here\n  * Rework required to go with the pre-defined values.\n@@ -237,64 +265,81 @@ struct igb_tx_queue {\n static inline void\n igbe_set_xmit_ctx(struct igb_tx_queue* txq,\n \t\tvolatile struct e1000_adv_tx_context_desc *ctx_txd,\n-\t\tuint64_t ol_flags, uint32_t vlan_macip_lens)\n+\t\tuint64_t ol_flags, union igb_tx_offload tx_offload)\n {\n \tuint32_t type_tucmd_mlhl;\n \tuint32_t mss_l4len_idx;\n \tuint32_t ctx_idx, ctx_curr;\n-\tuint32_t cmp_mask;\n+\tuint32_t vlan_macip_lens;\n+\tunion igb_tx_offload tx_offload_mask;\n \n \tctx_curr = txq->ctx_curr;\n \tctx_idx = ctx_curr + txq->ctx_start;\n \n-\tcmp_mask = 0;\n+\ttx_offload_mask.data = 0;\n \ttype_tucmd_mlhl = 0;\n \n-\tif (ol_flags & PKT_TX_VLAN_PKT) {\n-\t\tcmp_mask |= TX_VLAN_CMP_MASK;\n-\t}\n-\n-\tif (ol_flags & PKT_TX_IP_CKSUM) {\n-\t\ttype_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;\n-\t\tcmp_mask |= TX_MACIP_LEN_CMP_MASK;\n-\t}\n-\n \t/* Specify which HW CTX to upload. */\n \tmss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);\n-\tswitch (ol_flags & PKT_TX_L4_MASK) {\n-\tcase PKT_TX_UDP_CKSUM:\n-\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |\n+\n+\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\ttx_offload_mask.data |= TX_VLAN_CMP_MASK;\n+\n+\t/* check if TCP segmentation required for this packet */\n+\tif (ol_flags & PKT_TX_TCP_SEG) {\n+\t\t/* implies IP cksum in IPv4 */\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |\n+\t\t\t\tE1000_ADVTXD_TUCMD_L4T_TCP |\n \t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n-\t\tmss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n-\t\tcmp_mask |= TX_MACIP_LEN_CMP_MASK;\n-\t\tbreak;\n-\tcase PKT_TX_TCP_CKSUM:\n-\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |\n+\t\telse\n+\t\t\ttype_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |\n+\t\t\t\tE1000_ADVTXD_TUCMD_L4T_TCP |\n \t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n-\t\tmss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n-\t\tcmp_mask |= TX_MACIP_LEN_CMP_MASK;\n-\t\tbreak;\n-\tcase PKT_TX_SCTP_CKSUM:\n-\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |\n+\n+\t\ttx_offload_mask.data |= TX_TSO_CMP_MASK;\n+\t\tmss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;\n+\t\tmss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;\n+\t} else { /* no TSO, check if hardware checksum is needed */\n+\t\tif (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))\n+\t\t\ttx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;\n+\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;\n+\n+\t\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\t\tcase PKT_TX_UDP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |\n \t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n-\t\tmss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n-\t\tcmp_mask |= TX_MACIP_LEN_CMP_MASK;\n-\t\tbreak;\n-\tdefault:\n-\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |\n+\t\t\tmss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_TCP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |\n \t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n-\t\tbreak;\n+\t\t\tmss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_SCTP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |\n+\t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\ttype_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |\n+\t\t\t\tE1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;\n+\t\t\tbreak;\n+\t\t}\n \t}\n \n-\ttxq->ctx_cache[ctx_curr].flags           = ol_flags;\n-\ttxq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;\n-\ttxq->ctx_cache[ctx_curr].vlan_macip_lens.data =\n-\t\tvlan_macip_lens & cmp_mask;\n+\ttxq->ctx_cache[ctx_curr].flags = ol_flags;\n+\ttxq->ctx_cache[ctx_idx].tx_offload.data =\n+\t\ttx_offload_mask.data & tx_offload.data;\n+\ttxq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;\n \n \tctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);\n+\tvlan_macip_lens = (uint32_t)tx_offload.data;\n \tctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);\n-\tctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);\n-\tctx_txd->seqnum_seed     = 0;\n+\tctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);\n+\tctx_txd->seqnum_seed = 0;\n }\n \n /*\n@@ -303,20 +348,20 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,\n  */\n static inline uint32_t\n what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,\n-\t\tuint32_t vlan_macip_lens)\n+\t\tunion igb_tx_offload tx_offload)\n {\n \t/* If match with the current context */\n \tif (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&\n-\t\t(txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==\n-\t\t(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {\n+\t\t(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {\n \t\t\treturn txq->ctx_curr;\n \t}\n \n \t/* If match with the second context */\n \ttxq->ctx_curr ^= 1;\n \tif (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&\n-\t\t(txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==\n-\t\t(txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {\n+\t\t(txq->ctx_cache[txq->ctx_curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {\n \t\t\treturn txq->ctx_curr;\n \t}\n \n@@ -333,14 +378,19 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)\n \n \ttmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];\n \ttmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];\n+\ttmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];\n \treturn tmp;\n }\n \n static inline uint32_t\n tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)\n {\n+\tuint32_t cmdtype;\n \tstatic uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};\n-\treturn vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];\n+\tstatic uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};\n+\tcmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];\n+\tcmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];\n+\treturn cmdtype;\n }\n \n uint16_t\n@@ -354,14 +404,6 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tvolatile union e1000_adv_tx_desc *txd;\n \tstruct rte_mbuf     *tx_pkt;\n \tstruct rte_mbuf     *m_seg;\n-\tunion igb_vlan_macip vlan_macip_lens;\n-\tunion {\n-\t\tuint16_t u16;\n-\t\tstruct {\n-\t\t\tuint16_t l3_len:9;\n-\t\t\tuint16_t l2_len:7;\n-\t\t};\n-\t} l2_l3_len;\n \tuint64_t buf_dma_addr;\n \tuint32_t olinfo_status;\n \tuint32_t cmd_type_len;\n@@ -375,6 +417,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \tuint64_t tx_ol_req;\n \tuint32_t new_ctx = 0;\n \tuint32_t ctx = 0;\n+\tunion igb_tx_offload tx_offload = {0};\n \n \ttxq = tx_queue;\n \tsw_ring = txq->sw_ring;\n@@ -399,16 +442,18 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\ttx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);\n \n \t\tol_flags = tx_pkt->ol_flags;\n-\t\tl2_l3_len.l2_len = tx_pkt->l2_len;\n-\t\tl2_l3_len.l3_len = tx_pkt->l3_len;\n-\t\tvlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;\n-\t\tvlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;\n \t\ttx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;\n \n \t\t/* If a Context Descriptor need be built . */\n \t\tif (tx_ol_req) {\n-\t\t\tctx = what_advctx_update(txq, tx_ol_req,\n-\t\t\t\tvlan_macip_lens.data);\n+\t\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\t\ttx_offload.vlan_tci = tx_pkt->vlan_tci;\n+\t\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t\ttx_ol_req = check_tso_para(tx_ol_req, tx_offload);\n+\n+\t\t\tctx = what_advctx_update(txq, tx_ol_req, tx_offload);\n \t\t\t/* Only allocate context descriptor if required*/\n \t\t\tnew_ctx = (ctx == IGB_CTX_NUM);\n \t\t\tctx = txq->ctx_curr;\n@@ -500,6 +545,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t */\n \t\tcmd_type_len = txq->txd_type |\n \t\t\tE1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;\n+\t\tif (tx_ol_req & PKT_TX_TCP_SEG)\n+\t\t\tpkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);\n \t\tolinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);\n #if defined(RTE_LIBRTE_IEEE1588)\n \t\tif (ol_flags & PKT_TX_IEEE1588_TMST)\n@@ -523,8 +570,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\t\ttxe->mbuf = NULL;\n \t\t\t\t}\n \n-\t\t\t\tigbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,\n-\t\t\t\t    vlan_macip_lens.data);\n+\t\t\t\tigbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);\n \n \t\t\t\ttxe->last_id = tx_last;\n \t\t\t\ttx_id = txe->next_id;\n@@ -532,8 +578,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t}\n \n \t\t\t/* Setup the TX Advanced Data Descriptor */\n-\t\t\tcmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);\n-\t\t\tolinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);\n+\t\t\tcmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);\n+\t\t\tolinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);\n \t\t\tolinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);\n \t\t}\n \n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "1/2"
    ]
}