[v3,5/5] event/cnxk: support to set runtime queue attributes

Message ID 2818c8c220f5c8ae37f738bc864e60e2ee6a7d06.1652607951.git.sthotton@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Extend and set event queue attributes at runtime |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Shijith Thotton May 15, 2022, 9:53 a.m. UTC
  Added API to set queue attributes at runtime and API to get weight and
affinity.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 doc/guides/eventdevs/features/cnxk.ini |  1 +
 drivers/event/cnxk/cn10k_eventdev.c    |  4 ++
 drivers/event/cnxk/cn9k_eventdev.c     |  4 ++
 drivers/event/cnxk/cnxk_eventdev.c     | 91 ++++++++++++++++++++++++--
 drivers/event/cnxk/cnxk_eventdev.h     | 19 ++++++
 5 files changed, 113 insertions(+), 6 deletions(-)
  

Patch

diff --git a/doc/guides/eventdevs/features/cnxk.ini b/doc/guides/eventdevs/features/cnxk.ini
index 7633c6e3a2..bee69bf8f4 100644
--- a/doc/guides/eventdevs/features/cnxk.ini
+++ b/doc/guides/eventdevs/features/cnxk.ini
@@ -12,6 +12,7 @@  runtime_port_link          = Y
 multiple_queue_port        = Y
 carry_flow_id              = Y
 maintenance_free           = Y
+runtime_queue_attr         = y
 
 [Eth Rx adapter Features]
 internal_port              = Y
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9b4d2895ec..f6973bb691 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -845,9 +845,13 @@  cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
+
 	.queue_def_conf = cnxk_sso_queue_def_conf,
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
+	.queue_attr_get = cnxk_sso_queue_attribute_get,
+	.queue_attr_set = cnxk_sso_queue_attribute_set,
+
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn10k_sso_port_setup,
 	.port_release = cn10k_sso_port_release,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4bba477dd1..7cb59bbbfa 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1079,9 +1079,13 @@  cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
+
 	.queue_def_conf = cnxk_sso_queue_def_conf,
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
+	.queue_attr_get = cnxk_sso_queue_attribute_get,
+	.queue_attr_set = cnxk_sso_queue_attribute_set,
+
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn9k_sso_port_setup,
 	.port_release = cn9k_sso_port_release,
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index be021d86c9..a2829b817e 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -120,7 +120,8 @@  cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 				  RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
 				  RTE_EVENT_DEV_CAP_NONSEQ_MODE |
 				  RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
-				  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+				  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
+				  RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
 }
 
 int
@@ -300,11 +301,27 @@  cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
 		     const struct rte_event_queue_conf *queue_conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
-	/* Normalize <0-255> to <0-7> */
-	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
-					  queue_conf->priority / 32);
+	uint8_t priority, weight, affinity;
+
+	/* Default weight and affinity */
+	dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
+	dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
+
+	priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0,
+				      RTE_EVENT_DEV_PRIORITY_LOWEST,
+				      CNXK_SSO_PRIORITY_CNT);
+	weight = CNXK_QOS_NORMALIZE(
+		dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
+		RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
+	affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
+				      RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
+				      CNXK_SSO_AFFINITY_CNT);
+
+	plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
+		    priority, weight, affinity);
+
+	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
+					  priority);
 }
 
 void
@@ -314,6 +331,68 @@  cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
 	RTE_SET_USED(queue_id);
 }
 
+int
+cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
+			     uint32_t attr_id, uint32_t *attr_value)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+	if (attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT)
+		*attr_value = dev->mlt_prio[queue_id].weight;
+	else if (attr_id == RTE_EVENT_QUEUE_ATTR_AFFINITY)
+		*attr_value = dev->mlt_prio[queue_id].affinity;
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+int
+cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
+			     uint32_t attr_id, uint64_t attr_value)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint8_t priority, weight, affinity;
+	struct rte_event_queue_conf *conf;
+
+	conf = &event_dev->data->queues_cfg[queue_id];
+
+	switch (attr_id) {
+	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+		conf->priority = attr_value;
+		break;
+	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
+		dev->mlt_prio[queue_id].weight = attr_value;
+		break;
+	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
+		dev->mlt_prio[queue_id].affinity = attr_value;
+		break;
+	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
+	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
+	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
+	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
+		/* FALLTHROUGH */
+		plt_sso_dbg("Unsupported attribute id %u", attr_id);
+		return -ENOTSUP;
+	default:
+		plt_err("Invalid attribute id %u", attr_id);
+		return -EINVAL;
+	}
+
+	priority = CNXK_QOS_NORMALIZE(conf->priority, 0,
+				      RTE_EVENT_DEV_PRIORITY_LOWEST,
+				      CNXK_SSO_PRIORITY_CNT);
+	weight = CNXK_QOS_NORMALIZE(
+		dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
+		RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
+	affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
+				      RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
+				      CNXK_SSO_AFFINITY_CNT);
+
+	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
+					  priority);
+}
+
 void
 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 		       struct rte_event_port_conf *port_conf)
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 5564746e6d..531f6d1a84 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,6 +38,11 @@ 
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 #define CNXK_SSO_WQE_SG_PTR    (9)
+#define CNXK_SSO_PRIORITY_CNT  (0x8)
+#define CNXK_SSO_WEIGHT_MAX    (0x3f)
+#define CNXK_SSO_WEIGHT_MIN    (0x3)
+#define CNXK_SSO_WEIGHT_CNT    (CNXK_SSO_WEIGHT_MAX - CNXK_SSO_WEIGHT_MIN + 1)
+#define CNXK_SSO_AFFINITY_CNT  (0x10)
 
 #define CNXK_TT_FROM_TAG(x)	    (((x) >> 32) & SSO_TT_EMPTY)
 #define CNXK_TT_FROM_EVENT(x)	    (((x) >> 38) & SSO_TT_EMPTY)
@@ -54,6 +59,8 @@ 
 #define CN10K_GW_MODE_PREF     1
 #define CN10K_GW_MODE_PREF_WFE 2
 
+#define CNXK_QOS_NORMALIZE(val, min, max, cnt)                                 \
+	(min + val / ((max + cnt - 1) / cnt))
 #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name)                               \
 	do {                                                                   \
 		if (strncmp(dev->driver->name, drv_name, strlen(drv_name)))    \
@@ -79,6 +86,11 @@  struct cnxk_sso_qos {
 	uint16_t iaq_prcnt;
 };
 
+struct cnxk_sso_mlt_prio {
+	uint8_t weight;
+	uint8_t affinity;
+};
+
 struct cnxk_sso_evdev {
 	struct roc_sso sso;
 	uint8_t max_event_queues;
@@ -108,6 +120,7 @@  struct cnxk_sso_evdev {
 	uint64_t *timer_adptr_sz;
 	uint16_t vec_pool_cnt;
 	uint64_t *vec_pools;
+	struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/* Dev args */
 	uint32_t xae_cnt;
 	uint8_t qos_queue_cnt;
@@ -234,6 +247,12 @@  void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
 int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
 			 const struct rte_event_queue_conf *queue_conf);
 void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
+int cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev,
+				 uint8_t queue_id, uint32_t attr_id,
+				 uint32_t *attr_value);
+int cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev,
+				 uint8_t queue_id, uint32_t attr_id,
+				 uint64_t attr_value);
 void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 			    struct rte_event_port_conf *port_conf);
 int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,