[4/4] net/failsafe: add Tx queue start and stop functions

Message ID 1535526966-32456-5-git-send-email-arybchenko@solarflare.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series net/failsafe: support deferred queue start |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Andrew Rybchenko Aug. 29, 2018, 7:16 a.m. UTC
  From: Ian Dolzhansky <Ian.Dolzhansky@oktetlabs.ru>

Support Tx queue deferred start.

Signed-off-by: Ian Dolzhansky <Ian.Dolzhansky@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
---
 doc/guides/nics/features/failsafe.ini  |  2 +-
 doc/guides/rel_notes/release_18_11.rst |  4 +-
 drivers/net/failsafe/failsafe_ether.c  | 44 +++++++++++++++
 drivers/net/failsafe/failsafe_ops.c    | 77 ++++++++++++++++++++++++--
 4 files changed, 120 insertions(+), 7 deletions(-)
  

Patch

diff --git a/doc/guides/nics/features/failsafe.ini b/doc/guides/nics/features/failsafe.ini
index 712c0b7f7..74eae4a62 100644
--- a/doc/guides/nics/features/failsafe.ini
+++ b/doc/guides/nics/features/failsafe.ini
@@ -7,7 +7,7 @@ 
 Link status          = Y
 Link status event    = Y
 Rx interrupt         = Y
-Queue start/stop     = P
+Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Promiscuous mode     = Y
diff --git a/doc/guides/rel_notes/release_18_11.rst b/doc/guides/rel_notes/release_18_11.rst
index 882ef8ac6..ad08a204f 100644
--- a/doc/guides/rel_notes/release_18_11.rst
+++ b/doc/guides/rel_notes/release_18_11.rst
@@ -58,8 +58,8 @@  New Features
 
   Updated the failsafe driver including the following changes:
 
-  * Support for Rx queues start and stop.
-  * Support for Rx queues deferred start.
+  * Support for Rx and Tx queues start and stop.
+  * Support for Rx and Tx queues deferred start.
 
 * **Added ability to switch queue deferred start flag on testpmd app.**
 
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 305deed63..191f95f14 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -407,6 +407,47 @@  failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
+{
+	struct txq *txq;
+	int ret;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+
+		if (txq->info.conf.tx_deferred_start &&
+		    dev->data->tx_queue_state[i] ==
+						RTE_ETH_QUEUE_STATE_STARTED) {
+			/*
+			 * The subdevice Tx queue does not launch on device
+			 * start if deferred start flag is set. It needs to be
+			 * started manually in case an appropriate failsafe Tx
+			 * queue has been started earlier.
+			 */
+			ret = dev->dev_ops->tx_queue_start(dev, i);
+			if (ret) {
+				ERROR("Could not synchronize Tx queue %d", i);
+				return ret;
+			}
+		} else if (dev->data->tx_queue_state[i] ==
+						RTE_ETH_QUEUE_STATE_STOPPED) {
+			/*
+			 * The subdevice Tx queue needs to be stopped manually
+			 * in case an appropriate failsafe Tx queue has been
+			 * stopped earlier.
+			 */
+			ret = dev->dev_ops->tx_queue_stop(dev, i);
+			if (ret) {
+				ERROR("Could not synchronize Tx queue %d", i);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
 int
 failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
 {
@@ -466,6 +507,9 @@  failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
 	if (ret)
 		goto err_remove;
 	ret = failsafe_eth_dev_rx_queues_sync(dev);
+	if (ret)
+		goto err_remove;
+	ret = failsafe_eth_dev_tx_queues_sync(dev);
 	if (ret)
 		goto err_remove;
 	return 0;
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 412d522cf..4d30eb22d 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -174,6 +174,7 @@  static void
 fs_set_queues_state_start(struct rte_eth_dev *dev)
 {
 	struct rxq *rxq;
+	struct txq *txq;
 	uint16_t i;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -182,6 +183,12 @@  fs_set_queues_state_start(struct rte_eth_dev *dev)
 			dev->data->rx_queue_state[i] =
 						RTE_ETH_QUEUE_STATE_STARTED;
 	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq->info.conf.tx_deferred_start)
+			dev->data->tx_queue_state[i] =
+						RTE_ETH_QUEUE_STATE_STARTED;
+	}
 }
 
 static int
@@ -234,6 +241,8 @@  fs_set_queues_state_stop(struct rte_eth_dev *dev)
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 }
 
 static void
@@ -586,12 +595,17 @@  fs_tx_queue_setup(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
+	fs_lock(dev, 0);
 	if (tx_conf->tx_deferred_start) {
-		ERROR("Tx queue deferred start is not supported");
-		return -EINVAL;
+		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+			if (SUBOPS(sdev, tx_queue_start) == NULL) {
+				ERROR("Tx queue deferred start is not "
+					"supported for subdevice %d", i);
+				fs_unlock(dev, 0);
+				return -EINVAL;
+			}
+		}
 	}
-
-	fs_lock(dev, 0);
 	txq = dev->data->tx_queues[tx_queue_id];
 	if (txq != NULL) {
 		fs_tx_queue_release(txq);
@@ -631,6 +645,59 @@  fs_tx_queue_setup(struct rte_eth_dev *dev,
 	return ret;
 }
 
+static int
+fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct sub_device *sdev;
+	uint8_t i;
+	int ret;
+	int err = 0;
+	bool failure = true;
+
+	fs_lock(dev, 0);
+	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+		uint16_t port_id = ETH(sdev)->data->port_id;
+
+		ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
+		ret = fs_err(sdev, ret);
+		if (ret) {
+			ERROR("Tx queue stop failed for subdevice %d", i);
+			err = ret;
+		} else {
+			failure = false;
+		}
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+	fs_unlock(dev, 0);
+	/* Return 0 in case of at least one successful queue stop */
+	return (failure) ? err : 0;
+}
+
+static int
+fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct sub_device *sdev;
+	uint8_t i;
+	int ret;
+
+	fs_lock(dev, 0);
+	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+		uint16_t port_id = ETH(sdev)->data->port_id;
+
+		ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
+		ret = fs_err(sdev, ret);
+		if (ret) {
+			ERROR("Tx queue start failed for subdevice %d", i);
+			fs_tx_queue_stop(dev, tx_queue_id);
+			fs_unlock(dev, 0);
+			return ret;
+		}
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+	fs_unlock(dev, 0);
+	return 0;
+}
+
 static void
 fs_dev_free_queues(struct rte_eth_dev *dev)
 {
@@ -1122,7 +1189,9 @@  const struct eth_dev_ops failsafe_ops = {
 	.rx_queue_setup = fs_rx_queue_setup,
 	.tx_queue_setup = fs_tx_queue_setup,
 	.rx_queue_start = fs_rx_queue_start,
+	.tx_queue_start = fs_tx_queue_start,
 	.rx_queue_stop = fs_rx_queue_stop,
+	.tx_queue_stop = fs_tx_queue_stop,
 	.rx_queue_release = fs_rx_queue_release,
 	.tx_queue_release = fs_tx_queue_release,
 	.rx_queue_intr_enable = fs_rx_intr_enable,