[13/14] net/enic: add simple Rx handler

Message ID 20180628031940.17397-13-johndale@cisco.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series [01/14] net/enic: fix receive packet types |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

John Daley (johndale) June 28, 2018, 3:19 a.m. UTC
Add an optimized Rx handler for non-scattered Rx.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Signed-off-by: John Daley <johndale@cisco.com>
---
 drivers/net/enic/base/cq_desc.h |   1 +
 drivers/net/enic/base/vnic_rq.h |   2 +
 drivers/net/enic/enic.h         |   2 +
 drivers/net/enic/enic_ethdev.c  |   3 +-
 drivers/net/enic/enic_main.c    |  36 ++++++++++++-
 drivers/net/enic/enic_res.h     |   1 +
 drivers/net/enic/enic_rxtx.c    | 114 ++++++++++++++++++++++++++++++++++++++++
 7 files changed, 156 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h
index 7e1380270..ae8847c6d 100644
--- a/drivers/net/enic/base/cq_desc.h
+++ b/drivers/net/enic/base/cq_desc.h
@@ -38,6 +38,7 @@  struct cq_desc {
 #define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
 #define CQ_DESC_COLOR_MASK       1
 #define CQ_DESC_COLOR_SHIFT      7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
 #define CQ_DESC_Q_NUM_BITS       10
 #define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
 #define CQ_DESC_COMP_NDX_BITS    12
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index 9619290de..d8e67f747 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -52,6 +52,8 @@  struct vnic_rq {
 	struct vnic_dev *vdev;
 	struct vnic_rq_ctrl __iomem *ctrl;	/* memory-mapped */
 	struct vnic_dev_ring ring;
+	struct rte_mbuf **free_mbufs;		/* reserve of free mbufs */
+	int num_free_mbufs;
 	struct rte_mbuf **mbuf_ring;		/* array of allocated mbufs */
 	unsigned int mbuf_next_idx;		/* next mb to consume */
 	void *os_buf_head;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1d0ea552..d0ffc7783 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -314,6 +314,8 @@  int enic_clsf_init(struct enic *enic);
 void enic_clsf_destroy(struct enic *enic);
 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
+uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+				  uint16_t nb_pkts);
 uint16_t enic_dummy_recv_pkts(void *rx_queue,
 			      struct rte_mbuf **rx_pkts,
 			      uint16_t nb_pkts);
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index d013333f9..0d63cb466 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -525,7 +525,8 @@  static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 		RTE_PTYPE_UNKNOWN
 	};
 
-	if (dev->rx_pkt_burst == enic_recv_pkts)
+	if (dev->rx_pkt_burst == enic_recv_pkts ||
+	    dev->rx_pkt_burst == enic_noscatter_recv_pkts)
 		return ptypes;
 	return NULL;
 }
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 66706448c..0651ed95f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -572,6 +572,14 @@  int enic_enable(struct enic *enic)
 		eth_dev->tx_pkt_burst = &enic_xmit_pkts;
 	}
 
+	/* Use the non-scatter, simplified RX handler if possible. */
+	if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+		PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+		eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+	} else {
+		PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+	}
+
 	for (index = 0; index < enic->wq_count; index++)
 		enic_start_wq(enic, index);
 	for (index = 0; index < enic->rq_count; index++)
@@ -624,6 +632,19 @@  void enic_free_rq(void *rxq)
 	enic = vnic_dev_priv(rq_sop->vdev);
 	rq_data = &enic->rq[rq_sop->data_queue_idx];
 
+	if (rq_sop->free_mbufs) {
+		struct rte_mbuf **mb;
+		int i;
+
+		mb = rq_sop->free_mbufs;
+		for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+		     i < ENIC_RX_BURST_MAX; i++)
+			rte_pktmbuf_free(mb[i]);
+		rte_free(rq_sop->free_mbufs);
+		rq_sop->free_mbufs = NULL;
+		rq_sop->num_free_mbufs = 0;
+	}
+
 	enic_rxmbuf_queue_release(enic, rq_sop);
 	if (rq_data->in_use)
 		enic_rxmbuf_queue_release(enic, rq_data);
@@ -787,13 +808,13 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
 
 	if (mbufs_per_pkt > 1) {
-		min_sop = 64;
+		min_sop = ENIC_RX_BURST_MAX;
 		max_sop = ((enic->config.rq_desc_count /
 			    (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
 		min_data = min_sop * (mbufs_per_pkt - 1);
 		max_data = enic->config.rq_desc_count;
 	} else {
-		min_sop = 64;
+		min_sop = ENIC_RX_BURST_MAX;
 		max_sop = enic->config.rq_desc_count;
 		min_data = 0;
 		max_data = 0;
@@ -864,10 +885,21 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 			goto err_free_sop_mbuf;
 	}
 
+	rq_sop->free_mbufs = (struct rte_mbuf **)
+		rte_zmalloc_socket("rq->free_mbufs",
+				   sizeof(struct rte_mbuf *) *
+				   ENIC_RX_BURST_MAX,
+				   RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+	if (rq_sop->free_mbufs == NULL)
+		goto err_free_data_mbuf;
+	rq_sop->num_free_mbufs = 0;
+
 	rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
 
 	return 0;
 
+err_free_data_mbuf:
+	rte_free(rq_data->mbuf_ring);
 err_free_sop_mbuf:
 	rte_free(rq_sop->mbuf_ring);
 err_free_cq:
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 6b1f6acad..3786bc0e2 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -37,6 +37,7 @@ 
 #define ENIC_NON_TSO_MAX_DESC		16
 #define ENIC_DEFAULT_RX_FREE_THRESH	32
 #define ENIC_TX_XMIT_MAX		64
+#define ENIC_RX_BURST_MAX		64
 
 /* Defaults for dev_info.default_{rx,tx}portconf */
 #define ENIC_DEFAULT_RX_BURST		32
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 04a77fcb4..e0f93dd5e 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -471,6 +471,120 @@  enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return nb_rx;
 }
 
+uint16_t
+enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+			 uint16_t nb_pkts)
+{
+	struct rte_mbuf *mb, **rx, **rxmb;
+	uint16_t cq_idx, nb_rx, max_rx;
+	struct cq_enet_rq_desc *cqd;
+	struct rq_enet_desc *rqd;
+	unsigned int port_id;
+	struct vnic_cq *cq;
+	struct vnic_rq *rq;
+	struct enic *enic;
+	uint8_t color;
+	bool overlay;
+	bool tnl;
+
+	rq = rx_queue;
+	enic = vnic_dev_priv(rq->vdev);
+	cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+	cq_idx = cq->to_clean;
+
+	/*
+	 * Fill up the reserve of free mbufs. Below, we restock the receive
+	 * ring with these mbufs to avoid allocation failures.
+	 */
+	if (rq->num_free_mbufs == 0) {
+		if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+					 ENIC_RX_BURST_MAX))
+			return 0;
+		rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+	}
+
+	/* Receive until the end of the ring, at most. */
+	max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+	max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+	cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+	color = cq->last_color;
+	rxmb = rq->mbuf_ring + cq_idx;
+	port_id = enic->port_id;
+	overlay = enic->overlay_offload;
+
+	rx = rx_pkts;
+	while (max_rx) {
+		max_rx--;
+		if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+			break;
+		if (unlikely(cqd->bytes_written_flags &
+			     CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+			rte_pktmbuf_free(*rxmb++);
+			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+			cqd++;
+			continue;
+		}
+
+		mb = *rxmb++;
+		/* prefetch mbuf data for caller */
+		rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
+				    RTE_PKTMBUF_HEADROOM));
+		mb->data_len = cqd->bytes_written_flags &
+			CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+		mb->pkt_len = mb->data_len;
+		mb->port = port_id;
+		tnl = overlay && (cqd->completed_index_flags &
+				  CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+		mb->packet_type =
+			enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
+						     tnl);
+		enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+		/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+		if (tnl) {
+			mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+					     RTE_PTYPE_L4_MASK);
+		}
+		cqd++;
+		*rx++ = mb;
+	}
+	/* Number of descriptors visited */
+	nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+	if (nb_rx == 0)
+		return 0;
+	rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+	rxmb = rq->mbuf_ring + cq_idx;
+	cq_idx += nb_rx;
+	rq->rx_nb_hold += nb_rx;
+	if (unlikely(cq_idx == cq->ring.desc_count)) {
+		cq_idx = 0;
+		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+	}
+	cq->to_clean = cq_idx;
+
+	memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+	       sizeof(struct rte_mbuf *) * nb_rx);
+	rq->num_free_mbufs -= nb_rx;
+	while (nb_rx) {
+		nb_rx--;
+		mb = *rxmb++;
+		mb->data_off = RTE_PKTMBUF_HEADROOM;
+		rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rqd++;
+	}
+	if (rq->rx_nb_hold > rq->rx_free_thresh) {
+		rq->posted_index = enic_ring_add(rq->ring.desc_count,
+						 rq->posted_index,
+						 rq->rx_nb_hold);
+		rq->rx_nb_hold = 0;
+		rte_wmb();
+		iowrite32_relaxed(rq->posted_index,
+				  &rq->ctrl->posted_index);
+	}
+
+	return rx - rx_pkts;
+}
+
 static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
 {
 	unsigned int desc_count, n, nb_to_free, tail_idx;