[v3,08/17] net/mlx5: allocate packet pacing context

Message ID 1594887800-6563-9-git-send-email-viacheslavo@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: introduce accurate packet Tx scheduling |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Slava Ovsiienko July 16, 2020, 8:23 a.m. UTC
  This patch allocates the Packet Pacing context from the kernel,
configures one according to requested pace send scheduling
granularuty and assigns to Clock Queue.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.h      |  2 ++
 drivers/net/mlx5/mlx5_txpp.c | 71 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 61a93f9..e8a7b10 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -568,6 +568,8 @@  struct mlx5_dev_txpp {
 	struct mlx5dv_devx_event_channel *echan; /* Event Channel. */
 	struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
 	struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
+	struct mlx5dv_pp *pp; /* Packet pacing context. */
+	uint16_t pp_id; /* Packet pacing context index. */
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index f600fc5..a0ee872 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -6,6 +6,7 @@ 
 #include <rte_interrupts.h>
 #include <rte_alarm.h>
 #include <rte_malloc.h>
+#include <rte_cycles.h>
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -49,6 +50,69 @@ 
 }
 
 static void
+mlx5_txpp_free_pp_index(struct mlx5_dev_ctx_shared *sh)
+{
+	if (sh->txpp.pp) {
+		mlx5_glue->dv_free_pp(sh->txpp.pp);
+		sh->txpp.pp = NULL;
+		sh->txpp.pp_id = 0;
+	}
+}
+
+/* Allocate Packet Pacing index from kernel via mlx5dv call. */
+static int
+mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
+{
+#ifdef HAVE_MLX5DV_PP_ALLOC
+	uint32_t pp[MLX5_ST_SZ_DW(set_pp_rate_limit_context)];
+	uint64_t rate;
+
+	MLX5_ASSERT(!sh->txpp.pp);
+	memset(&pp, 0, sizeof(pp));
+	rate = NS_PER_S / sh->txpp.tick;
+	if (rate * sh->txpp.tick != NS_PER_S)
+		DRV_LOG(WARNING, "Packet pacing frequency is not precise.");
+	if (sh->txpp.test) {
+		uint32_t len;
+
+		len = RTE_MAX(MLX5_TXPP_TEST_PKT_SIZE,
+			      (size_t)RTE_ETHER_MIN_LEN);
+		MLX5_SET(set_pp_rate_limit_context, &pp,
+			 burst_upper_bound, len);
+		MLX5_SET(set_pp_rate_limit_context, &pp,
+			 typical_packet_size, len);
+		/* Convert packets per second into kilobits. */
+		rate = (rate * len) / (1000ul / CHAR_BIT);
+		DRV_LOG(INFO, "Packet pacing rate set to %" PRIu64, rate);
+	}
+	MLX5_SET(set_pp_rate_limit_context, &pp, rate_limit, rate);
+	MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
+		 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
+	sh->txpp.pp = mlx5_glue->dv_alloc_pp
+				(sh->ctx, sizeof(pp), &pp,
+				 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
+	if (sh->txpp.pp == NULL) {
+		DRV_LOG(ERR, "Failed to allocate packet pacing index.");
+		rte_errno = errno;
+		return -errno;
+	}
+	if (!sh->txpp.pp->index) {
+		DRV_LOG(ERR, "Zero packet pacing index allocated.");
+		mlx5_txpp_free_pp_index(sh);
+		rte_errno = ENOTSUP;
+		return -ENOTSUP;
+	}
+	sh->txpp.pp_id = sh->txpp.pp->index;
+	return 0;
+#else
+	RTE_SET_USED(sh);
+	DRV_LOG(ERR, "Allocating pacing index is not supported.");
+	rte_errno = ENOTSUP;
+	return -ENOTSUP;
+#endif
+}
+
+static void
 mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
 {
 	if (wq->sq)
@@ -457,6 +521,7 @@ 
 	}
 	sq_attr.state = MLX5_SQC_STATE_RST;
 	sq_attr.cqn = wq->cq->id;
+	sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
 	sq_attr.wq_attr.cd_slave = 1;
 	sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
@@ -503,6 +568,7 @@ 
  * - Clock CQ/SQ
  * - Rearm CQ/SQ
  * - attaches rearm interrupt handler
+ * - starts Clock Queue
  *
  * Returns 0 on success, negative otherwise
  */
@@ -520,6 +586,9 @@ 
 	ret = mlx5_txpp_create_eqn(sh);
 	if (ret)
 		goto exit;
+	ret = mlx5_txpp_alloc_pp_index(sh);
+	if (ret)
+		goto exit;
 	ret = mlx5_txpp_create_clock_queue(sh);
 	if (ret)
 		goto exit;
@@ -530,6 +599,7 @@ 
 	if (ret) {
 		mlx5_txpp_destroy_rearm_queue(sh);
 		mlx5_txpp_destroy_clock_queue(sh);
+		mlx5_txpp_free_pp_index(sh);
 		mlx5_txpp_destroy_eqn(sh);
 		sh->txpp.tick = 0;
 		sh->txpp.test = 0;
@@ -550,6 +620,7 @@ 
 {
 	mlx5_txpp_destroy_rearm_queue(sh);
 	mlx5_txpp_destroy_clock_queue(sh);
+	mlx5_txpp_free_pp_index(sh);
 	mlx5_txpp_destroy_eqn(sh);
 	sh->txpp.tick = 0;
 	sh->txpp.test = 0;