[v3,11/33] event/cnxk: add event port link and unlink

Message ID 20210430135336.2749-12-pbhagavatula@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Marvell CNXK Event device Driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula April 30, 2021, 1:53 p.m. UTC
  From: Shijith Thotton <sthotton@marvell.com>

Add platform specific event port, queue link and unlink APIs.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |  64 +++++++++++++++++-
 drivers/event/cnxk/cn9k_eventdev.c  | 101 ++++++++++++++++++++++++++++
 drivers/event/cnxk/cnxk_eventdev.c  |  36 ++++++++++
 drivers/event/cnxk/cnxk_eventdev.h  |  12 +++-
 4 files changed, 210 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 11eaef380..b149b7831 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -63,6 +63,24 @@  cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
 	return ws;
 }
 
+static int
+cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn10k_sso_hws *ws = port;
+
+	return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
+}
+
+static int
+cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn10k_sso_hws *ws = port;
+
+	return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
+}
+
 static void
 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 {
@@ -83,9 +101,12 @@  cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 static void
 cn10k_sso_hws_release(void *arg, void *hws)
 {
+	struct cnxk_sso_evdev *dev = arg;
 	struct cn10k_sso_hws *ws = hws;
+	int i;
 
-	RTE_SET_USED(arg);
+	for (i = 0; i < dev->nb_event_queues; i++)
+		roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
 	memset(ws, 0, sizeof(*ws));
 }
 
@@ -149,6 +170,12 @@  cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
 	if (rc < 0)
 		goto cnxk_rsrc_fini;
 
+	/* Restore any prior port-queue mapping. */
+	cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
+
+	dev->configured = 1;
+	rte_mb();
+
 	return 0;
 cnxk_rsrc_fini:
 	roc_sso_rsrc_fini(&dev->sso);
@@ -184,6 +211,38 @@  cn10k_sso_port_release(void *port)
 	rte_free(gws_cookie);
 }
 
+static int
+cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
+		    const uint8_t queues[], const uint8_t priorities[],
+		    uint16_t nb_links)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t hwgrp_ids[nb_links];
+	uint16_t link;
+
+	RTE_SET_USED(priorities);
+	for (link = 0; link < nb_links; link++)
+		hwgrp_ids[link] = queues[link];
+	nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
+
+	return (int)nb_links;
+}
+
+static int
+cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
+		      uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t hwgrp_ids[nb_unlinks];
+	uint16_t unlink;
+
+	for (unlink = 0; unlink < nb_unlinks; unlink++)
+		hwgrp_ids[unlink] = queues[unlink];
+	nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
+
+	return (int)nb_unlinks;
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -193,6 +252,9 @@  static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn10k_sso_port_setup,
 	.port_release = cn10k_sso_port_release,
+	.port_link = cn10k_sso_port_link,
+	.port_unlink = cn10k_sso_port_unlink,
+	.timeout_ticks = cnxk_sso_timeout_ticks,
 };
 
 static int
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2daa14b50..b26fc0eae 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -18,6 +18,54 @@  cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
 	ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
 }
 
+static int
+cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn9k_sso_hws_dual *dws;
+	struct cn9k_sso_hws *ws;
+	int rc;
+
+	if (dev->dual_ws) {
+		dws = port;
+		rc = roc_sso_hws_link(&dev->sso,
+				      CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
+				      nb_link);
+		rc |= roc_sso_hws_link(&dev->sso,
+				       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
+				       map, nb_link);
+	} else {
+		ws = port;
+		rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
+	}
+
+	return rc;
+}
+
+static int
+cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn9k_sso_hws_dual *dws;
+	struct cn9k_sso_hws *ws;
+	int rc;
+
+	if (dev->dual_ws) {
+		dws = port;
+		rc = roc_sso_hws_unlink(&dev->sso,
+					CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
+					map, nb_link);
+		rc |= roc_sso_hws_unlink(&dev->sso,
+					 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
+					 map, nb_link);
+	} else {
+		ws = port;
+		rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
+	}
+
+	return rc;
+}
+
 static void
 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 {
@@ -54,12 +102,24 @@  cn9k_sso_hws_release(void *arg, void *hws)
 	struct cnxk_sso_evdev *dev = arg;
 	struct cn9k_sso_hws_dual *dws;
 	struct cn9k_sso_hws *ws;
+	int i;
 
 	if (dev->dual_ws) {
 		dws = hws;
+		for (i = 0; i < dev->nb_event_queues; i++) {
+			roc_sso_hws_unlink(&dev->sso,
+					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
+					   (uint16_t *)&i, 1);
+			roc_sso_hws_unlink(&dev->sso,
+					   CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
+					   (uint16_t *)&i, 1);
+		}
 		memset(dws, 0, sizeof(*dws));
 	} else {
 		ws = hws;
+		for (i = 0; i < dev->nb_event_queues; i++)
+			roc_sso_hws_unlink(&dev->sso, ws->hws_id,
+					   (uint16_t *)&i, 1);
 		memset(ws, 0, sizeof(*ws));
 	}
 }
@@ -183,6 +243,12 @@  cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
 	if (rc < 0)
 		goto cnxk_rsrc_fini;
 
+	/* Restore any prior port-queue mapping. */
+	cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
+
+	dev->configured = 1;
+	rte_mb();
+
 	return 0;
 cnxk_rsrc_fini:
 	roc_sso_rsrc_fini(&dev->sso);
@@ -218,6 +284,38 @@  cn9k_sso_port_release(void *port)
 	rte_free(gws_cookie);
 }
 
+static int
+cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
+		   const uint8_t queues[], const uint8_t priorities[],
+		   uint16_t nb_links)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t hwgrp_ids[nb_links];
+	uint16_t link;
+
+	RTE_SET_USED(priorities);
+	for (link = 0; link < nb_links; link++)
+		hwgrp_ids[link] = queues[link];
+	nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
+
+	return (int)nb_links;
+}
+
+static int
+cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
+		     uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t hwgrp_ids[nb_unlinks];
+	uint16_t unlink;
+
+	for (unlink = 0; unlink < nb_unlinks; unlink++)
+		hwgrp_ids[unlink] = queues[unlink];
+	nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
+
+	return (int)nb_unlinks;
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -227,6 +325,9 @@  static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn9k_sso_port_setup,
 	.port_release = cn9k_sso_port_release,
+	.port_link = cn9k_sso_port_link,
+	.port_unlink = cn9k_sso_port_unlink,
+	.timeout_ticks = cnxk_sso_timeout_ticks,
 };
 
 static int
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index daf24d84a..e68079997 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -161,6 +161,32 @@  cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
 	return -ENOMEM;
 }
 
+void
+cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
+		       cnxk_sso_link_t link_fn)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
+	int i, j;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		uint16_t nb_hwgrp = 0;
+
+		links_map = event_dev->data->links_map;
+		/* Point links_map to this port specific area */
+		links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
+
+		for (j = 0; j < dev->nb_event_queues; j++) {
+			if (links_map[j] == 0xdead)
+				continue;
+			hwgrp[nb_hwgrp] = j;
+			nb_hwgrp++;
+		}
+
+		link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
+	}
+}
+
 int
 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 {
@@ -290,6 +316,16 @@  cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
 	return 0;
 }
 
+int
+cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
+		       uint64_t *tmo_ticks)
+{
+	RTE_SET_USED(event_dev);
+	*tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
+
+	return 0;
+}
+
 static void
 parse_queue_param(char *value, void *opaque)
 {
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 79eab1829..97a944d88 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -17,8 +17,9 @@ 
 #define CNXK_SSO_XAE_CNT  "xae_cnt"
 #define CNXK_SSO_GGRP_QOS "qos"
 
-#define NSEC2USEC(__ns) ((__ns) / 1E3)
-#define USEC2NSEC(__us) ((__us)*1E3)
+#define NSEC2USEC(__ns)		((__ns) / 1E3)
+#define USEC2NSEC(__us)		((__us)*1E3)
+#define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
 
 #define CNXK_SSO_MAX_HWGRP     (RTE_EVENT_MAX_QUEUES_PER_DEV + 1)
 #define CNXK_SSO_FC_NAME       "cnxk_evdev_xaq_fc"
@@ -33,6 +34,8 @@ 
 typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
 typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t *grp_base);
 typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
+typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map,
+			       uint16_t nb_link);
 
 struct cnxk_sso_qos {
 	uint16_t queue;
@@ -48,6 +51,7 @@  struct cnxk_sso_evdev {
 	uint8_t is_timeout_deq;
 	uint8_t nb_event_queues;
 	uint8_t nb_event_ports;
+	uint8_t configured;
 	uint32_t deq_tmo_ns;
 	uint32_t min_dequeue_timeout_ns;
 	uint32_t max_dequeue_timeout_ns;
@@ -169,6 +173,8 @@  int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
 int cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
 			   cnxk_sso_init_hws_mem_t init_hws_mem,
 			   cnxk_sso_hws_setup_t hws_setup);
+void cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
+			    cnxk_sso_link_t link_fn);
 void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
 			     struct rte_event_queue_conf *queue_conf);
 int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
@@ -178,5 +184,7 @@  void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 			    struct rte_event_port_conf *port_conf);
 int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
 			cnxk_sso_hws_setup_t hws_setup_fn);
+int cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
+			   uint64_t *tmo_ticks);
 
 #endif /* __CNXK_EVENTDEV_H__ */