[v6,08/15] graph: add struct for stream moving between cores

Message ID 20230509060347.1237884-9-zhirun.yan@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Thomas Monjalon
Headers
Series graph enhancement for multi-core dispatch |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Yan, Zhirun May 9, 2023, 6:03 a.m. UTC
  Add graph_sched_wq_node to hold graph scheduling workqueue
node.

Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Signed-off-by: Cunming Liang <cunming.liang@intel.com>
Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
---
 lib/graph/graph.c                   |  1 +
 lib/graph/graph_populate.c          |  1 +
 lib/graph/graph_private.h           | 12 ++++++++++++
 lib/graph/rte_graph_worker_common.h | 21 +++++++++++++++++++++
 4 files changed, 35 insertions(+)
  

Comments

Jerin Jacob May 24, 2023, 7:24 a.m. UTC | #1
On Tue, May 9, 2023 at 11:35 AM Zhirun Yan <zhirun.yan@intel.com> wrote:
>
> Add graph_sched_wq_node to hold graph scheduling workqueue
> node.
>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> Signed-off-by: Cunming Liang <cunming.liang@intel.com>
> Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
> + * @internal
> + *
> + * Structure that holds the graph scheduling workqueue node stream.
> + * Used for mcore dispatch model.
> + */
> +struct graph_sched_wq_node {

Could we change to graph_mcore_dispatch_wq_node? Just to make sure
this for mcore dispatch model.
  
Yan, Zhirun May 26, 2023, 10:02 a.m. UTC | #2
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Wednesday, May 24, 2023 3:25 PM
> To: Yan, Zhirun <zhirun.yan@intel.com>
> Cc: dev@dpdk.org; jerinj@marvell.com; kirankumark@marvell.com;
> ndabilpuram@marvell.com; stephen@networkplumber.org;
> pbhagavatula@marvell.com; Liang, Cunming <cunming.liang@intel.com>; Wang,
> Haiyue <haiyue.wang@intel.com>
> Subject: Re: [PATCH v6 08/15] graph: add struct for stream moving between
> cores
> 
> On Tue, May 9, 2023 at 11:35 AM Zhirun Yan <zhirun.yan@intel.com> wrote:
> >
> > Add graph_sched_wq_node to hold graph scheduling workqueue node.
> >
> > Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> > Signed-off-by: Cunming Liang <cunming.liang@intel.com>
> > Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
> > + * @internal
> > + *
> > + * Structure that holds the graph scheduling workqueue node stream.
> > + * Used for mcore dispatch model.
> > + */
> > +struct graph_sched_wq_node {
> 
> Could we change to graph_mcore_dispatch_wq_node? Just to make sure this
> for mcore dispatch model.

Yes.
  

Patch

diff --git a/lib/graph/graph.c b/lib/graph/graph.c
index 2629c79103..e809aa55b0 100644
--- a/lib/graph/graph.c
+++ b/lib/graph/graph.c
@@ -290,6 +290,7 @@  rte_graph_model_dispatch_core_bind(rte_graph_t id, int lcore)
 			break;
 
 	graph->lcore_id = lcore;
+	graph->graph->lcore_id = graph->lcore_id;
 	graph->socket = rte_lcore_to_socket_id(lcore);
 
 	/* check the availability of source node */
diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
index 2c0844ce92..7dcf1420c1 100644
--- a/lib/graph/graph_populate.c
+++ b/lib/graph/graph_populate.c
@@ -89,6 +89,7 @@  graph_nodes_populate(struct graph *_graph)
 		}
 		node->id = graph_node->node->id;
 		node->parent_id = pid;
+		node->lcore_id = graph_node->node->lcore_id;
 		nb_edges = graph_node->node->nb_edges;
 		node->nb_edges = nb_edges;
 		off += sizeof(struct rte_node);
diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
index 52ca30ed56..02b10ea2b6 100644
--- a/lib/graph/graph_private.h
+++ b/lib/graph/graph_private.h
@@ -61,6 +61,18 @@  struct node {
 	char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. */
 };
 
+/**
+ * @internal
+ *
+ * Structure that holds the graph scheduling workqueue node stream.
+ * Used for mcore dispatch model.
+ */
+struct graph_sched_wq_node {
+	rte_graph_off_t node_off;
+	uint16_t nb_objs;
+	void *objs[RTE_GRAPH_BURST_SIZE];
+} __rte_cache_aligned;
+
 /**
  * @internal
  *
diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h
index 9bde8856ae..8e968e2022 100644
--- a/lib/graph/rte_graph_worker_common.h
+++ b/lib/graph/rte_graph_worker_common.h
@@ -30,6 +30,13 @@ 
 extern "C" {
 #endif
 
+/**
+ * @internal
+ *
+ * Singly-linked list head for graph schedule run-queue.
+ */
+SLIST_HEAD(rte_graph_rq_head, rte_graph);
+
 /**
  * @internal
  *
@@ -41,6 +48,15 @@  struct rte_graph {
 	uint32_t cir_mask;	     /**< Circular buffer wrap around mask. */
 	rte_node_t nb_nodes;	     /**< Number of nodes in the graph. */
 	rte_graph_off_t *cir_start;  /**< Pointer to circular buffer. */
+	/* Graph schedule */
+	struct rte_graph_rq_head *rq __rte_cache_aligned; /* The run-queue */
+	struct rte_graph_rq_head rq_head; /* The head for run-queue list */
+
+	SLIST_ENTRY(rte_graph) rq_next;   /* The next for run-queue list */
+	unsigned int lcore_id;  /**< The graph running Lcore. */
+	struct rte_ring *wq;    /**< The work-queue for pending streams. */
+	struct rte_mempool *mp; /**< The mempool for scheduling streams. */
+	/* Graph schedule area */
 	rte_graph_off_t nodes_start; /**< Offset at which node memory starts. */
 	rte_graph_t id;	/**< Graph identifier. */
 	int socket;	/**< Socket ID where memory is allocated. */
@@ -74,6 +90,11 @@  struct rte_node {
 	/** Original process function when pcap is enabled. */
 	rte_node_process_t original_process;
 
+	RTE_STD_C11
+		union {
+		/* Fast schedule area for mcore dispatch model */
+		unsigned int lcore_id;  /**< Node running lcore. */
+		};
 	/* Fast path area  */
 #define RTE_NODE_CTX_SZ 16
 	uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node Context. */