From patchwork Thu Oct 8 13:46:07 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rahul Lakkireddy X-Patchwork-Id: 7480 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id B9A6D8E67; Thu, 8 Oct 2015 15:46:28 +0200 (CEST) Received: from stargate3.asicdesigners.com (unknown [67.207.115.98]) by dpdk.org (Postfix) with ESMTP id 91B548E5D for ; Thu, 8 Oct 2015 15:46:27 +0200 (CEST) Received: from localhost (scalar.blr.asicdesigners.com [10.193.185.94]) by stargate3.asicdesigners.com (8.13.8/8.13.8) with ESMTP id t98DkOkl023301; Thu, 8 Oct 2015 06:46:25 -0700 From: Rahul Lakkireddy To: dev@dpdk.org Date: Thu, 8 Oct 2015 19:16:07 +0530 Message-Id: <764c5a14e2d748b940037afa70aa98b63adf23d9.1444303884.git.rahul.lakkireddy@chelsio.com> X-Mailer: git-send-email 2.5.3 In-Reply-To: References: In-Reply-To: References: Cc: Felix Marti , Kumar Sanghvi , Nirranjan Kirubaharan Subject: [dpdk-dev] [PATCH v2 3/6] cxgbe: Update tx path to transmit jumbo frames X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add a non-coalesce path. Skip coalescing for Jumbo Frames, and send the packet through non-coalesced path if there are enough credits. Also, free these non-coalesced packets while reclaiming credits. Signed-off-by: Rahul Lakkireddy Signed-off-by: Kumar Sanghvi --- v2: - No change drivers/net/cxgbe/sge.c | 96 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 69ab487..b1d0ea6 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -199,11 +199,20 @@ static void free_tx_desc(struct sge_txq *q, unsigned int n) static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) { + struct tx_sw_desc *d; unsigned int cidx = q->cidx; + d = &q->sdesc[cidx]; while (n--) { - if (++cidx == q->size) + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + ++d; + if (++cidx == q->size) { cidx = 0; + d = q->sdesc; + } } q->cidx = cidx; } @@ -1038,6 +1047,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf) u32 wr_mid; u64 cntrl, *end; bool v6; + u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; /* Reject xmit if queue is stopped */ if (unlikely(txq->flags & EQ_STOPPED)) @@ -1053,6 +1063,10 @@ out_free: return 0; } + if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && + (unlikely(m->pkt_len > max_pkt_len))) + goto out_free; + pi = (struct port_info *)txq->eth_dev->data->dev_private; adap = pi->adapter; @@ -1060,7 +1074,7 @@ out_free: /* align the end of coalesce WR to a 512 byte boundary */ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; - if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) { if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { if (unlikely(map_mbuf(mbuf, addr) < 0)) { dev_warn(adap, "%s: mapping err for coalesce\n", @@ -1107,33 +1121,46 @@ out_free: len = 0; len += sizeof(*cpl); - lso = (void *)(wr + 1); - v6 = (m->ol_flags & PKT_TX_IPV6) != 0; - l3hdr_len = m->l3_len; - l4hdr_len = m->l4_len; - eth_xtra_len = m->l2_len - ETHER_HDR_LEN; - len += sizeof(*lso); - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | - V_FW_WR_IMMDLEN(len)); - lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | - F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | - V_LSO_IPV6(v6) | - V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | - V_LSO_IPHDR_LEN(l3hdr_len / 4) | - V_LSO_TCPHDR_LEN(l4hdr_len / 4)); - lso->ipid_ofst = htons(0); - lso->mss = htons(m->tso_segsz); - lso->seqno_offset = htonl(0); - if (is_t4(adap->params.chip)) - lso->len = htonl(m->pkt_len); - else - lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); - cpl = (void *)(lso + 1); - cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - V_TXPKT_IPHDR_LEN(l3hdr_len) | - V_TXPKT_ETHHDR_LEN(eth_xtra_len); - txq->stats.tso++; - txq->stats.tx_cso += m->tso_segsz; + + /* Coalescing skipped and we send through normal path */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (m->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, m) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } + } else { + lso = (void *)(wr + 1); + v6 = (m->ol_flags & PKT_TX_IPV6) != 0; + l3hdr_len = m->l3_len; + l4hdr_len = m->l4_len; + eth_xtra_len = m->l2_len - ETHER_HDR_LEN; + len += sizeof(*lso); + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | + F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | + V_LSO_IPV6(v6) | + V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | + V_LSO_IPHDR_LEN(l3hdr_len / 4) | + V_LSO_TCPHDR_LEN(l4hdr_len / 4)); + lso->ipid_ofst = htons(0); + lso->mss = htons(m->tso_segsz); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(m->pkt_len); + else + lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); + cpl = (void *)(lso + 1); + cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + V_TXPKT_IPHDR_LEN(l3hdr_len) | + V_TXPKT_ETHHDR_LEN(eth_xtra_len); + txq->stats.tso++; + txq->stats.tx_cso += m->tso_segsz; + } if (m->ol_flags & PKT_TX_VLAN_PKT) { txq->stats.vlan_ins++; @@ -1154,9 +1181,14 @@ out_free: last_desc -= txq->q.size; d = &txq->q.sdesc[last_desc]; - if (d->mbuf) { - rte_pktmbuf_free(d->mbuf); - d->mbuf = NULL; + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; } write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);