[dpdk-dev] net/enic: support scatter Rx in the MTU update function

Message ID 20160922170246.15061-2-johndale@cisco.com (mailing list archive)
State Accepted, archived
Delegated to: Bruce Richardson
Headers

Commit Message

John Daley (johndale) Sept. 22, 2016, 5:02 p.m. UTC
  Re-initialize Rq's when MTU is changed. This allows for more
efficient use of mbufs when moving from an MTU that is greater
than the mbuf size to one that is less. And moves to using Rx
scatter mode when moving from an MTU less than the mbuf size
to one that is greater.

Signed-off-by: Nelson Escobar <neescoba@cisco.com>
Signed-off-by: John Daley <johndale@cisco.com>
---
 drivers/net/enic/base/vnic_rq.h |   1 +
 drivers/net/enic/enic.h         |   9 +++
 drivers/net/enic/enic_main.c    | 145 +++++++++++++++++++++++++++++++++++++---
 drivers/net/enic/enic_rxtx.c    |  11 +++
 4 files changed, 156 insertions(+), 10 deletions(-)
  

Comments

Bruce Richardson Sept. 27, 2016, 2:38 p.m. UTC | #1
On Thu, Sep 22, 2016 at 10:02:46AM -0700, John Daley wrote:
> Re-initialize Rq's when MTU is changed. This allows for more
> efficient use of mbufs when moving from an MTU that is greater
> than the mbuf size to one that is less. And moves to using Rx
> scatter mode when moving from an MTU less than the mbuf size
> to one that is greater.
> 
> Signed-off-by: Nelson Escobar <neescoba@cisco.com>
> Signed-off-by: John Daley <johndale@cisco.com>

Applied to dpdk-next-net/rel_16_11

/Bruce
  

Patch

diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index fd9e170..7d96b0f 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -96,6 +96,7 @@  struct vnic_rq {
 	struct rte_mbuf *pkt_first_seg;
 	struct rte_mbuf *pkt_last_seg;
 	unsigned int max_mbufs_per_pkt;
+	uint16_t tot_nb_desc;
 };
 
 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 5c9345f..17d6c05 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -160,9 +160,15 @@  struct enic {
 	/* linked list storing memory allocations */
 	LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
 	rte_spinlock_t memzone_list_lock;
+	rte_spinlock_t mtu_lock;
 
 };
 
+static inline unsigned int enic_rq_sop(unsigned int sop_rq)
+{
+	return sop_rq / 2;
+}
+
 static inline unsigned int enic_sop_rq(unsigned int rq)
 {
 	return rq * 2;
@@ -270,6 +276,9 @@  extern int enic_clsf_init(struct enic *enic);
 extern void enic_clsf_destroy(struct enic *enic);
 uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+			      __rte_unused struct rte_mbuf **rx_pkts,
+			      __rte_unused uint16_t nb_pkts);
 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			       uint16_t nb_pkts);
 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 5d9bf4c..15a05b4 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -542,6 +542,9 @@  void enic_free_rq(void *rxq)
 		vnic_rq_free(rq_data);
 
 	vnic_cq_free(&enic->cq[rq_sop->index]);
+
+	rq_sop->in_use = 0;
+	rq_data->in_use = 0;
 }
 
 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
@@ -610,6 +613,7 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	unsigned int mbuf_size, mbufs_per_pkt;
 	unsigned int nb_sop_desc, nb_data_desc;
 	uint16_t min_sop, max_sop, min_data, max_data;
+	uint16_t mtu = enic->rte_dev->data->mtu;
 
 	rq_sop->is_sop = 1;
 	rq_sop->data_queue_idx = data_queue_idx;
@@ -625,9 +629,9 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 			       RTE_PKTMBUF_HEADROOM);
 
 	if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
-		dev_info(enic, "Scatter rx mode enabled\n");
+		dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
 		/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
-		mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
+		mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
 				 (mbuf_size - 1)) / mbuf_size;
 	} else {
 		dev_info(enic, "Scatter rx mode disabled\n");
@@ -635,10 +639,11 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	}
 
 	if (mbufs_per_pkt > 1) {
-		dev_info(enic, "Scatter rx mode in use\n");
+		dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
 		rq_data->in_use = 1;
 	} else {
-		dev_info(enic, "Scatter rx mode not being used\n");
+		dev_info(enic, "Rq %u Scatter rx mode not being used\n",
+			 queue_idx);
 		rq_data->in_use = 0;
 	}
 
@@ -675,7 +680,7 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 	}
 	if (mbufs_per_pkt > 1) {
 		dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
-			 enic->config.mtu, mbuf_size, min_sop + min_data,
+			 mtu, mbuf_size, min_sop + min_data,
 			 max_sop + max_data);
 	}
 	dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -726,6 +731,8 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 			goto err_free_sop_mbuf;
 	}
 
+	rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
 	return 0;
 
 err_free_sop_mbuf:
@@ -1100,6 +1107,54 @@  int enic_set_vnic_res(struct enic *enic)
 	return rc;
 }
 
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+	struct vnic_rq *sop_rq, *data_rq;
+	unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+	int rc = 0;
+
+	sop_rq = &enic->rq[enic_sop_rq(rq_idx)];
+	data_rq = &enic->rq[enic_data_rq(rq_idx)];
+
+	vnic_cq_clean(&enic->cq[cq_idx]);
+	vnic_cq_init(&enic->cq[cq_idx],
+		     0 /* flow_control_enable */,
+		     1 /* color_enable */,
+		     0 /* cq_head */,
+		     0 /* cq_tail */,
+		     1 /* cq_tail_color */,
+		     0 /* interrupt_enable */,
+		     1 /* cq_entry_enable */,
+		     0 /* cq_message_enable */,
+		     0 /* interrupt offset */,
+		     0 /* cq_message_addr */);
+
+
+	vnic_rq_init_start(sop_rq, enic_cq_rq(enic, enic_sop_rq(rq_idx)),
+			   0, sop_rq->ring.desc_count - 1, 1, 0);
+	if (data_rq->in_use) {
+		vnic_rq_init_start(data_rq,
+				   enic_cq_rq(enic, enic_data_rq(rq_idx)),
+				   0, data_rq->ring.desc_count - 1, 1, 0);
+	}
+
+	rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+	if (rc)
+		return rc;
+
+	if (data_rq->in_use) {
+		rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+		if (rc) {
+			enic_rxmbuf_queue_release(enic, sop_rq);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
 /* The Cisco NIC can send and receive packets up to a max packet size
  * determined by the NIC type and firmware. There is also an MTU
  * configured into the NIC via the CIMC/UCSM management interface
@@ -1109,6 +1164,9 @@  int enic_set_vnic_res(struct enic *enic)
  */
 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
 {
+	unsigned int rq_idx;
+	struct vnic_rq *rq;
+	int rc = 0;
 	uint16_t old_mtu;	/* previous setting */
 	uint16_t config_mtu;	/* Value configured into NIC via CIMC/UCSM */
 	struct rte_eth_dev *eth_dev = enic->rte_dev;
@@ -1116,10 +1174,6 @@  int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
 	old_mtu = eth_dev->data->mtu;
 	config_mtu = enic->config.mtu;
 
-	/* only works with Rx scatter disabled */
-	if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
-		return -ENOTSUP;
-
 	if (new_mtu > enic->max_mtu) {
 		dev_err(enic,
 			"MTU not updated: requested (%u) greater than max (%u)\n",
@@ -1137,11 +1191,82 @@  int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
 			"MTU (%u) is greater than value configured in NIC (%u)\n",
 			new_mtu, config_mtu);
 
+	/* The easy case is when scatter is disabled. However if the MTU
+	 * becomes greater than the mbuf data size, packet drops will ensue.
+	 */
+	if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+		eth_dev->data->mtu = new_mtu;
+		goto set_mtu_done;
+	}
+
+	/* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
+	 * change Rx scatter mode if necessary for better performance. I.e. if
+	 * MTU was greater than the mbuf size and now it's less, scatter Rx
+	 * doesn't have to be used and vice versa.
+	  */
+	rte_spinlock_lock(&enic->mtu_lock);
+
+	/* Stop traffic on all RQs */
+	for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+		rq = &enic->rq[rq_idx];
+		if (rq->is_sop && rq->in_use) {
+			rc = enic_stop_rq(enic, enic_rq_sop(rq_idx));
+			if (rc) {
+				dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+				goto set_mtu_done;
+			}
+		}
+	}
+
+	/* replace Rx funciton with a no-op to avoid getting stale pkts */
+	eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+	rte_mb();
+
+	/* Allow time for threads to exit the real Rx function. */
+	usleep(100000);
+
+	/* now it is safe to reconfigure the RQs */
+
 	/* update the mtu */
 	eth_dev->data->mtu = new_mtu;
 
+	/* free and reallocate RQs with the new MTU */
+	for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+		rq = &enic->rq[enic_sop_rq(rq_idx)];
+
+		enic_free_rq(rq);
+		rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+				   rq->tot_nb_desc);
+		if (rc) {
+			dev_err(enic,
+				"Fatal MTU alloc error- No traffic will pass\n");
+			goto set_mtu_done;
+		}
+
+		rc = enic_reinit_rq(enic, rq_idx);
+		if (rc) {
+			dev_err(enic,
+				"Fatal MTU RQ reinit- No traffic will pass\n");
+			goto set_mtu_done;
+		}
+	}
+
+	/* put back the real receive function */
+	rte_mb();
+	eth_dev->rx_pkt_burst = enic_recv_pkts;
+	rte_mb();
+
+	/* restart Rx traffic */
+	for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+		rq = &enic->rq[enic_sop_rq(rq_idx)];
+		if (rq->is_sop && rq->in_use)
+			enic_start_rq(enic, rq_idx);
+	}
+
+set_mtu_done:
 	dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
-	return 0;
+	rte_spinlock_unlock(&enic->mtu_lock);
+	return rc;
 }
 
 static int enic_dev_init(struct enic *enic)
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index c138160..a80ccb6 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -227,6 +227,17 @@  enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
 	mbuf->ol_flags = pkt_flags;
 }
 
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+		     __rte_unused struct rte_mbuf **rx_pkts,
+		     __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
 uint16_t
 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	       uint16_t nb_pkts)