From patchwork Thu Jan 14 01:38:35 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Wenzhuo Lu X-Patchwork-Id: 9841 X-Patchwork-Delegate: bruce.richardson@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 0A9E28E99; Thu, 14 Jan 2016 02:39:15 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 65AA48E87 for ; Thu, 14 Jan 2016 02:39:11 +0100 (CET) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga101.jf.intel.com with ESMTP; 13 Jan 2016 17:38:54 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.22,292,1449561600"; d="scan'208";a="860101916" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 13 Jan 2016 17:38:53 -0800 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u0E1cqCZ032225; Thu, 14 Jan 2016 09:38:52 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u0E1cnsD004603; Thu, 14 Jan 2016 09:38:51 +0800 Received: (from wenzhuol@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u0E1cnwb004599; Thu, 14 Jan 2016 09:38:49 +0800 From: Wenzhuo Lu To: dev@dpdk.org Date: Thu, 14 Jan 2016 09:38:35 +0800 Message-Id: <1452735516-4527-6-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1452735516-4527-1-git-send-email-wenzhuo.lu@intel.com> References: <1452496044-17524-1-git-send-email-wenzhuo.lu@intel.com> <1452735516-4527-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v2 5/6] ixgbe: support VxLAN & NVGRE TX checksum off-load X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch add VxLAN & NVGRE TX checksum off-load. When the flag of outer IP header checksum offload is set, we'll set the context descriptor to enable this checksum off-load. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_rxtx.c | 52 ++++++++++++++++++++++++++++++++++-------- drivers/net/ixgbe/ixgbe_rxtx.h | 6 ++++- lib/librte_mbuf/rte_mbuf.h | 2 ++ 3 files changed, 49 insertions(+), 11 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 512ac3a..fea2495 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -85,7 +85,8 @@ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG) + PKT_TX_TCP_SEG | \ + PKT_TX_OUTER_IP_CKSUM) static inline struct rte_mbuf * rte_rxmbuf_alloc(struct rte_mempool *mp) @@ -364,9 +365,11 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, uint32_t ctx_idx; uint32_t vlan_macip_lens; union ixgbe_tx_offload tx_offload_mask; + uint32_t seqnum_seed = 0; ctx_idx = txq->ctx_curr; - tx_offload_mask.data = 0; + tx_offload_mask.data[0] = 0; + tx_offload_mask.data[1] = 0; type_tucmd_mlhl = 0; /* Specify which HW CTX to upload. */ @@ -430,9 +433,20 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, } } + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + tx_offload_mask.outer_l3_len |= ~0; + tx_offload_mask.outer_l2_len |= ~0; + seqnum_seed |= tx_offload.outer_l3_len + << IXGBE_ADVTXD_OUTER_IPLEN; + seqnum_seed |= tx_offload.outer_l2_len + << IXGBE_ADVTXD_TUNNEL_LEN; + } + txq->ctx_cache[ctx_idx].flags = ol_flags; - txq->ctx_cache[ctx_idx].tx_offload.data = - tx_offload_mask.data & tx_offload.data; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; + txq->ctx_cache[ctx_idx].tx_offload.data[1] = + tx_offload_mask.data[1] & tx_offload.data[1]; txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); @@ -441,7 +455,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT); ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); - ctx_txd->seqnum_seed = 0; + ctx_txd->seqnum_seed = seqnum_seed; } /* @@ -454,16 +468,24 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, { /* If match with the current used context */ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) { return txq->ctx_curr; } /* What if match with the next context */ txq->ctx_curr ^= 1; if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) { + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) { return txq->ctx_curr; } @@ -492,6 +514,12 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) cmdtype |= IXGBE_ADVTXD_DCMD_VLE; if (ol_flags & PKT_TX_TCP_SEG) cmdtype |= IXGBE_ADVTXD_DCMD_TSE; + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT); + if (ol_flags & PKT_TX_VXLAN_PKT) + cmdtype &= ~(1 << IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE); + else + cmdtype |= (1 << IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE); return cmdtype; } @@ -588,8 +616,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t tx_ol_req; uint32_t ctx = 0; uint32_t new_ctx; - union ixgbe_tx_offload tx_offload = {0}; + union ixgbe_tx_offload tx_offload; + tx_offload.data[0] = 0; + tx_offload.data[1] = 0; txq = tx_queue; sw_ring = txq->sw_ring; txr = txq->tx_ring; @@ -623,6 +653,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.l4_len = tx_pkt->l4_len; tx_offload.vlan_tci = tx_pkt->vlan_tci; tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 475a800..c15f9fa 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -163,7 +163,7 @@ enum ixgbe_advctx_num { /** Offload features */ union ixgbe_tx_offload { - uint64_t data; + uint64_t data[2]; struct { uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ uint64_t l3_len:9; /**< L3 (IP) Header Length. */ @@ -171,6 +171,10 @@ union ixgbe_tx_offload { uint64_t tso_segsz:16; /**< TCP TSO segment size */ uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier (CPU order). */ + + /* fields for TX offloading of tunnels */ + uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ }; }; diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 5ad5e59..1bda00e 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -103,6 +103,8 @@ extern "C" { /* add new TX flags here */ +#define PKT_TX_VXLAN_PKT (1ULL << 48) /**< TX packet is a VxLAN packet. */ + /** * Second VLAN insertion (QinQ) flag. */