From patchwork Thu Jun 28 03:19:39 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "John Daley (johndale)" X-Patchwork-Id: 41783 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EA8037ED7; Thu, 28 Jun 2018 05:27:07 +0200 (CEST) Received: from aer-iport-3.cisco.com (aer-iport-3.cisco.com [173.38.203.53]) by dpdk.org (Postfix) with ESMTP id E84277ED7 for ; Thu, 28 Jun 2018 05:27:06 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=cisco.com; i=@cisco.com; l=9475; q=dns/txt; s=iport; t=1530156426; x=1531366026; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=XvE9StFPfzL+Gi2zVYTc2aUWJhDfgDfKVo4vHD/TjQs=; b=THqzv1wTfF9i0YOMTw4yxTU/tQ5B7TL2WwXsirlkAUfVgTVmSCtzLgMX N4OG0gQSlrO9EoN8o6lQoxaxs2gil8fkRUSG2B8Ljvd8hHQju7i68Aeyu 7aTPjWiPiKgLqRgScHDfzo9uZ5p9MU2BACwd5NtLpuwdXUqbKEHX1yvwk A=; X-IronPort-AV: E=Sophos;i="5.51,281,1526342400"; d="scan'208";a="4769221" Received: from aer-iport-nat.cisco.com (HELO aer-core-4.cisco.com) ([173.38.203.22]) by aer-iport-3.cisco.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 28 Jun 2018 03:27:06 +0000 Received: from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48]) by aer-core-4.cisco.com (8.14.5/8.14.5) with ESMTP id w5S3R5cx025688; Thu, 28 Jun 2018 03:27:06 GMT Received: by cisco.com (Postfix, from userid 392789) id 6467C20F2001; Wed, 27 Jun 2018 20:27:05 -0700 (PDT) From: John Daley To: ferruh.yigit@intel.com Cc: dev@dpdk.org, John Daley , Hyong Youb Kim Date: Wed, 27 Jun 2018 20:19:39 -0700 Message-Id: <20180628031940.17397-13-johndale@cisco.com> X-Mailer: git-send-email 2.16.2 In-Reply-To: <20180628031940.17397-1-johndale@cisco.com> References: <20180628031940.17397-1-johndale@cisco.com> Subject: [dpdk-dev] [PATCH 13/14] net/enic: add simple Rx handler X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add an optimized Rx handler for non-scattered Rx. Signed-off-by: Hyong Youb Kim Signed-off-by: John Daley --- drivers/net/enic/base/cq_desc.h | 1 + drivers/net/enic/base/vnic_rq.h | 2 + drivers/net/enic/enic.h | 2 + drivers/net/enic/enic_ethdev.c | 3 +- drivers/net/enic/enic_main.c | 36 ++++++++++++- drivers/net/enic/enic_res.h | 1 + drivers/net/enic/enic_rxtx.c | 114 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 156 insertions(+), 3 deletions(-) diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h index 7e1380270..ae8847c6d 100644 --- a/drivers/net/enic/base/cq_desc.h +++ b/drivers/net/enic/base/cq_desc.h @@ -38,6 +38,7 @@ struct cq_desc { #define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) #define CQ_DESC_COLOR_MASK 1 #define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80 #define CQ_DESC_Q_NUM_BITS 10 #define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) #define CQ_DESC_COMP_NDX_BITS 12 diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index 9619290de..d8e67f747 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -52,6 +52,8 @@ struct vnic_rq { struct vnic_dev *vdev; struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; + struct rte_mbuf **free_mbufs; /* reserve of free mbufs */ + int num_free_mbufs; struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ unsigned int mbuf_next_idx; /* next mb to consume */ void *os_buf_head; diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index e1d0ea552..d0ffc7783 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -314,6 +314,8 @@ int enic_clsf_init(struct enic *enic); void enic_clsf_destroy(struct enic *enic); uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t enic_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index d013333f9..0d63cb466 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -525,7 +525,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_UNKNOWN }; - if (dev->rx_pkt_burst == enic_recv_pkts) + if (dev->rx_pkt_burst == enic_recv_pkts || + dev->rx_pkt_burst == enic_noscatter_recv_pkts) return ptypes; return NULL; } diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 66706448c..0651ed95f 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -572,6 +572,14 @@ int enic_enable(struct enic *enic) eth_dev->tx_pkt_burst = &enic_xmit_pkts; } + /* Use the non-scatter, simplified RX handler if possible. */ + if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) { + PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler"); + eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts; + } else { + PMD_INIT_LOG(DEBUG, " use the normal Rx handler"); + } + for (index = 0; index < enic->wq_count; index++) enic_start_wq(enic, index); for (index = 0; index < enic->rq_count; index++) @@ -624,6 +632,19 @@ void enic_free_rq(void *rxq) enic = vnic_dev_priv(rq_sop->vdev); rq_data = &enic->rq[rq_sop->data_queue_idx]; + if (rq_sop->free_mbufs) { + struct rte_mbuf **mb; + int i; + + mb = rq_sop->free_mbufs; + for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs; + i < ENIC_RX_BURST_MAX; i++) + rte_pktmbuf_free(mb[i]); + rte_free(rq_sop->free_mbufs); + rq_sop->free_mbufs = NULL; + rq_sop->num_free_mbufs = 0; + } + enic_rxmbuf_queue_release(enic, rq_sop); if (rq_data->in_use) enic_rxmbuf_queue_release(enic, rq_data); @@ -787,13 +808,13 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, rq_data->max_mbufs_per_pkt = mbufs_per_pkt; if (mbufs_per_pkt > 1) { - min_sop = 64; + min_sop = ENIC_RX_BURST_MAX; max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK); min_data = min_sop * (mbufs_per_pkt - 1); max_data = enic->config.rq_desc_count; } else { - min_sop = 64; + min_sop = ENIC_RX_BURST_MAX; max_sop = enic->config.rq_desc_count; min_data = 0; max_data = 0; @@ -864,10 +885,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, goto err_free_sop_mbuf; } + rq_sop->free_mbufs = (struct rte_mbuf **) + rte_zmalloc_socket("rq->free_mbufs", + sizeof(struct rte_mbuf *) * + ENIC_RX_BURST_MAX, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->free_mbufs == NULL) + goto err_free_data_mbuf; + rq_sop->num_free_mbufs = 0; + rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */ return 0; +err_free_data_mbuf: + rte_free(rq_data->mbuf_ring); err_free_sop_mbuf: rte_free(rq_sop->mbuf_ring); err_free_cq: diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index 6b1f6acad..3786bc0e2 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -37,6 +37,7 @@ #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_DEFAULT_RX_FREE_THRESH 32 #define ENIC_TX_XMIT_MAX 64 +#define ENIC_RX_BURST_MAX 64 /* Defaults for dev_info.default_{rx,tx}portconf */ #define ENIC_DEFAULT_RX_BURST 32 diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 04a77fcb4..e0f93dd5e 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -471,6 +471,120 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +uint16_t +enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mb, **rx, **rxmb; + uint16_t cq_idx, nb_rx, max_rx; + struct cq_enet_rq_desc *cqd; + struct rq_enet_desc *rqd; + unsigned int port_id; + struct vnic_cq *cq; + struct vnic_rq *rq; + struct enic *enic; + uint8_t color; + bool overlay; + bool tnl; + + rq = rx_queue; + enic = vnic_dev_priv(rq->vdev); + cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + cq_idx = cq->to_clean; + + /* + * Fill up the reserve of free mbufs. Below, we restock the receive + * ring with these mbufs to avoid allocation failures. + */ + if (rq->num_free_mbufs == 0) { + if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs, + ENIC_RX_BURST_MAX)) + return 0; + rq->num_free_mbufs = ENIC_RX_BURST_MAX; + } + + /* Receive until the end of the ring, at most. */ + max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs); + max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); + + cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; + color = cq->last_color; + rxmb = rq->mbuf_ring + cq_idx; + port_id = enic->port_id; + overlay = enic->overlay_offload; + + rx = rx_pkts; + while (max_rx) { + max_rx--; + if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + break; + if (unlikely(cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) { + rte_pktmbuf_free(*rxmb++); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + cqd++; + continue; + } + + mb = *rxmb++; + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr, + RTE_PKTMBUF_HEADROOM)); + mb->data_len = cqd->bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + mb->pkt_len = mb->data_len; + mb->port = port_id; + tnl = overlay && (cqd->completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; + mb->packet_type = + enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd, + tnl); + enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb); + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + mb->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } + cqd++; + *rx++ = mb; + } + /* Number of descriptors visited */ + nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; + if (nb_rx == 0) + return 0; + rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx; + rxmb = rq->mbuf_ring + cq_idx; + cq_idx += nb_rx; + rq->rx_nb_hold += nb_rx; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; + } + cq->to_clean = cq_idx; + + memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs, + sizeof(struct rte_mbuf *) * nb_rx); + rq->num_free_mbufs -= nb_rx; + while (nb_rx) { + nb_rx--; + mb = *rxmb++; + mb->data_off = RTE_PKTMBUF_HEADROOM; + rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM; + rqd++; + } + if (rq->rx_nb_hold > rq->rx_free_thresh) { + rq->posted_index = enic_ring_add(rq->ring.desc_count, + rq->posted_index, + rq->rx_nb_hold); + rq->rx_nb_hold = 0; + rte_wmb(); + iowrite32_relaxed(rq->posted_index, + &rq->ctrl->posted_index); + } + + return rx - rx_pkts; +} + static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) { unsigned int desc_count, n, nb_to_free, tail_idx;