diff mbox series

[v4,15/23] net/softnic: add cli to enable and disable pipeline

Message ID 20180705154754.147420-16-jasvinder.singh@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Cristian Dumitrescu
Headers show
Series [v4,01/23] net/softnic: restructuring | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation success Compilation OK

Commit Message

Jasvinder Singh July 5, 2018, 3:47 p.m. UTC
Add cli commands to enable and disable pipelines on specific threads in
softnic.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
---
 drivers/net/softnic/rte_eth_softnic_cli.c       | 103 ++++++++
 drivers/net/softnic/rte_eth_softnic_internals.h |  10 +
 drivers/net/softnic/rte_eth_softnic_thread.c    | 325 ++++++++++++++++++++++++
 3 files changed, 438 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
index 9fbd680..8b65a54 100644
--- a/drivers/net/softnic/rte_eth_softnic_cli.c
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -1592,6 +1592,93 @@  cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
 	}
 }
 
+/**
+ * thread <thread_id> pipeline <pipeline_name> enable
+ */
+static void
+cmd_softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+	char **tokens,
+	uint32_t n_tokens,
+	char *out,
+	size_t out_size)
+{
+	char *pipeline_name;
+	uint32_t thread_id;
+	int status;
+
+	if (n_tokens != 5) {
+		snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+		return;
+	}
+
+	if (strcmp(tokens[2], "pipeline") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+		return;
+	}
+
+	pipeline_name = tokens[3];
+
+	if (strcmp(tokens[4], "enable") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+		return;
+	}
+
+	status = softnic_thread_pipeline_enable(softnic, thread_id, pipeline_name);
+	if (status) {
+		snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable");
+		return;
+	}
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> disable
+ */
+static void
+cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+	char **tokens,
+	uint32_t n_tokens,
+	char *out,
+	size_t out_size)
+{
+	char *pipeline_name;
+	uint32_t thread_id;
+	int status;
+
+	if (n_tokens != 5) {
+		snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+		return;
+	}
+
+	if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+		snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+		return;
+	}
+
+	if (strcmp(tokens[2], "pipeline") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+		return;
+	}
+
+	pipeline_name = tokens[3];
+
+	if (strcmp(tokens[4], "disable") != 0) {
+		snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+		return;
+	}
+
+	status = softnic_thread_pipeline_disable(softnic, thread_id, pipeline_name);
+	if (status) {
+		snprintf(out, out_size, MSG_CMD_FAIL,
+			"thread pipeline disable");
+		return;
+	}
+}
+
 void
 softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
 {
@@ -1700,6 +1787,22 @@  softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
 		}
 	}
 
+	if (strcmp(tokens[0], "thread") == 0) {
+		if ((n_tokens >= 5) &&
+			(strcmp(tokens[4], "enable") == 0)) {
+			cmd_softnic_thread_pipeline_enable(softnic, tokens, n_tokens,
+				out, out_size);
+			return;
+		}
+
+		if ((n_tokens >= 5) &&
+			(strcmp(tokens[4], "disable") == 0)) {
+			cmd_softnic_thread_pipeline_disable(softnic, tokens, n_tokens,
+				out, out_size);
+			return;
+		}
+	}
+
 	snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
 }
 
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 2aba9a0..8163487 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -783,6 +783,16 @@  softnic_thread_init(struct pmd_internals *p);
 void
 softnic_thread_free(struct pmd_internals *p);
 
+int
+softnic_thread_pipeline_enable(struct pmd_internals *p,
+	uint32_t thread_id,
+	const char *pipeline_name);
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *p,
+	uint32_t thread_id,
+	const char *pipeline_name);
+
 /**
  * CLI
  */
diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c
index 74abf81..5bfe704 100644
--- a/drivers/net/softnic/rte_eth_softnic_thread.c
+++ b/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -93,11 +93,30 @@  softnic_thread_init(struct pmd_internals *softnic)
  * Master thread & data plane threads: message passing
  */
 enum thread_req_type {
+	THREAD_REQ_PIPELINE_ENABLE = 0,
+	THREAD_REQ_PIPELINE_DISABLE,
 	THREAD_REQ_MAX
 };
 
 struct softnic_thread_msg_req {
 	enum thread_req_type type;
+
+	union {
+		struct {
+			struct rte_pipeline *p;
+			struct {
+				struct rte_table_action *a;
+			} table[RTE_PIPELINE_TABLE_MAX];
+			struct rte_ring *msgq_req;
+			struct rte_ring *msgq_rsp;
+			uint32_t timer_period_ms;
+			uint32_t n_tables;
+		} pipeline_enable;
+
+		struct {
+			struct rte_pipeline *p;
+		} pipeline_disable;
+	};
 };
 
 struct softnic_thread_msg_rsp {
@@ -105,6 +124,231 @@  struct softnic_thread_msg_rsp {
 };
 
 /**
+ * Master thread
+ */
+static struct softnic_thread_msg_req *
+thread_msg_alloc(void)
+{
+	size_t size = RTE_MAX(sizeof(struct softnic_thread_msg_req),
+		sizeof(struct softnic_thread_msg_rsp));
+
+	return calloc(1, size);
+}
+
+static void
+thread_msg_free(struct softnic_thread_msg_rsp *rsp)
+{
+	free(rsp);
+}
+
+static struct softnic_thread_msg_rsp *
+thread_msg_send_recv(struct pmd_internals *softnic,
+	uint32_t thread_id,
+	struct softnic_thread_msg_req *req)
+{
+	struct softnic_thread *t = &softnic->thread[thread_id];
+	struct rte_ring *msgq_req = t->msgq_req;
+	struct rte_ring *msgq_rsp = t->msgq_rsp;
+	struct softnic_thread_msg_rsp *rsp;
+	int status;
+
+	/* send */
+	do {
+		status = rte_ring_sp_enqueue(msgq_req, req);
+	} while (status == -ENOBUFS);
+
+	/* recv */
+	do {
+		status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+	} while (status != 0);
+
+	return rsp;
+}
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+	uint32_t thread_id,
+	const char *pipeline_name)
+{
+	struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+	struct softnic_thread *t;
+	struct softnic_thread_msg_req *req;
+	struct softnic_thread_msg_rsp *rsp;
+	enum rte_lcore_state_t thread_state;
+	uint32_t i;
+	int status;
+
+	/* Check input params */
+	if ((thread_id >= RTE_MAX_LCORE) ||
+		(p == NULL) ||
+		(p->n_ports_in == 0) ||
+		(p->n_ports_out == 0) ||
+		(p->n_tables == 0))
+		return -1;
+
+	t = &softnic->thread[thread_id];
+	if ((t->enabled == 0) ||
+		p->enabled)
+		return -1;
+
+	thread_state = rte_eal_get_lcore_state(thread_id);
+	if (thread_state != RUNNING) {
+		struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+		struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
+
+		if (td->n_pipelines >= THREAD_PIPELINES_MAX)
+			return -1;
+
+		/* Data plane thread */
+		td->p[td->n_pipelines] = p->p;
+
+		tdp->p = p->p;
+		for (i = 0; i < p->n_tables; i++)
+			tdp->table_data[i].a =
+				p->table[i].a;
+		tdp->n_tables = p->n_tables;
+
+		tdp->msgq_req = p->msgq_req;
+		tdp->msgq_rsp = p->msgq_rsp;
+		tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
+		tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
+
+		td->n_pipelines++;
+
+		/* Pipeline */
+		p->thread_id = thread_id;
+		p->enabled = 1;
+
+		return 0;
+	}
+
+	/* Allocate request */
+	req = thread_msg_alloc();
+	if (req == NULL)
+		return -1;
+
+	/* Write request */
+	req->type = THREAD_REQ_PIPELINE_ENABLE;
+	req->pipeline_enable.p = p->p;
+	for (i = 0; i < p->n_tables; i++)
+		req->pipeline_enable.table[i].a =
+			p->table[i].a;
+	req->pipeline_enable.msgq_req = p->msgq_req;
+	req->pipeline_enable.msgq_rsp = p->msgq_rsp;
+	req->pipeline_enable.timer_period_ms = p->timer_period_ms;
+	req->pipeline_enable.n_tables = p->n_tables;
+
+	/* Send request and wait for response */
+	rsp = thread_msg_send_recv(softnic, thread_id, req);
+	if (rsp == NULL)
+		return -1;
+
+	/* Read response */
+	status = rsp->status;
+
+	/* Free response */
+	thread_msg_free(rsp);
+
+	/* Request completion */
+	if (status)
+		return status;
+
+	p->thread_id = thread_id;
+	p->enabled = 1;
+
+	return 0;
+}
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+	uint32_t thread_id,
+	const char *pipeline_name)
+{
+	struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+	struct softnic_thread *t;
+	struct softnic_thread_msg_req *req;
+	struct softnic_thread_msg_rsp *rsp;
+	enum rte_lcore_state_t thread_state;
+	int status;
+
+	/* Check input params */
+	if ((thread_id >= RTE_MAX_LCORE) ||
+		(p == NULL))
+		return -1;
+
+	t = &softnic->thread[thread_id];
+	if (t->enabled == 0)
+		return -1;
+
+	if (p->enabled == 0)
+		return 0;
+
+	if (p->thread_id != thread_id)
+		return -1;
+
+	thread_state = rte_eal_get_lcore_state(thread_id);
+	if (thread_state != RUNNING) {
+		struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+		uint32_t i;
+
+		for (i = 0; i < td->n_pipelines; i++) {
+			struct pipeline_data *tdp = &td->pipeline_data[i];
+
+			if (tdp->p != p->p)
+				continue;
+
+			/* Data plane thread */
+			if (i < td->n_pipelines - 1) {
+				struct rte_pipeline *pipeline_last =
+					td->p[td->n_pipelines - 1];
+				struct pipeline_data *tdp_last =
+					&td->pipeline_data[td->n_pipelines - 1];
+
+				td->p[i] = pipeline_last;
+				memcpy(tdp, tdp_last, sizeof(*tdp));
+			}
+
+			td->n_pipelines--;
+
+			/* Pipeline */
+			p->enabled = 0;
+
+			break;
+		}
+
+		return 0;
+	}
+
+	/* Allocate request */
+	req = thread_msg_alloc();
+	if (req == NULL)
+		return -1;
+
+	/* Write request */
+	req->type = THREAD_REQ_PIPELINE_DISABLE;
+	req->pipeline_disable.p = p->p;
+
+	/* Send request and wait for response */
+	rsp = thread_msg_send_recv(softnic, thread_id, req);
+	if (rsp == NULL)
+		return -1;
+
+	/* Read response */
+	status = rsp->status;
+
+	/* Free response */
+	thread_msg_free(rsp);
+
+	/* Request completion */
+	if (status)
+		return status;
+
+	p->enabled = 0;
+
+	return 0;
+}
+
+/**
  * Data plane threads: message handling
  */
 static inline struct softnic_thread_msg_req *
@@ -131,6 +375,79 @@  thread_msg_send(struct rte_ring *msgq_rsp,
 	} while (status == -ENOBUFS);
 }
 
+static struct softnic_thread_msg_rsp *
+thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
+	struct softnic_thread_msg_req *req)
+{
+	struct softnic_thread_msg_rsp *rsp = (struct softnic_thread_msg_rsp *)req;
+	struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
+	uint32_t i;
+
+	/* Request */
+	if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
+		rsp->status = -1;
+		return rsp;
+	}
+
+	t->p[t->n_pipelines] = req->pipeline_enable.p;
+
+	p->p = req->pipeline_enable.p;
+	for (i = 0; i < req->pipeline_enable.n_tables; i++)
+		p->table_data[i].a =
+			req->pipeline_enable.table[i].a;
+
+	p->n_tables = req->pipeline_enable.n_tables;
+
+	p->msgq_req = req->pipeline_enable.msgq_req;
+	p->msgq_rsp = req->pipeline_enable.msgq_rsp;
+	p->timer_period =
+		(rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
+	p->time_next = rte_get_tsc_cycles() + p->timer_period;
+
+	t->n_pipelines++;
+
+	/* Response */
+	rsp->status = 0;
+	return rsp;
+}
+
+static struct softnic_thread_msg_rsp *
+thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
+	struct softnic_thread_msg_req *req)
+{
+	struct softnic_thread_msg_rsp *rsp = (struct softnic_thread_msg_rsp *)req;
+	uint32_t n_pipelines = t->n_pipelines;
+	struct rte_pipeline *pipeline = req->pipeline_disable.p;
+	uint32_t i;
+
+	/* find pipeline */
+	for (i = 0; i < n_pipelines; i++) {
+		struct pipeline_data *p = &t->pipeline_data[i];
+
+		if (p->p != pipeline)
+			continue;
+
+		if (i < n_pipelines - 1) {
+			struct rte_pipeline *pipeline_last =
+				t->p[n_pipelines - 1];
+			struct pipeline_data *p_last =
+				&t->pipeline_data[n_pipelines - 1];
+
+			t->p[i] = pipeline_last;
+			memcpy(p, p_last, sizeof(*p));
+		}
+
+		t->n_pipelines--;
+
+		rsp->status = 0;
+		return rsp;
+	}
+
+	/* should not get here */
+	rsp->status = 0;
+	return rsp;
+}
+
 static void
 thread_msg_handle(struct softnic_thread_data *t)
 {
@@ -143,6 +460,14 @@  thread_msg_handle(struct softnic_thread_data *t)
 			break;
 
 		switch (req->type) {
+		case THREAD_REQ_PIPELINE_ENABLE:
+			rsp = thread_msg_handle_pipeline_enable(t, req);
+			break;
+
+		case THREAD_REQ_PIPELINE_DISABLE:
+			rsp = thread_msg_handle_pipeline_disable(t, req);
+			break;
+
 		default:
 			rsp = (struct softnic_thread_msg_rsp *)req;
 			rsp->status = -1;