From patchwork Fri Jun 8 12:41:49 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jasvinder Singh X-Patchwork-Id: 40841 X-Patchwork-Delegate: cristian.dumitrescu@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B2BF17CFB; Fri, 8 Jun 2018 14:42:40 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 8E8B16CCC for ; Fri, 8 Jun 2018 14:42:13 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 08 Jun 2018 05:42:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.49,490,1520924400"; d="scan'208";a="235505376" Received: from silpixa00381635.ir.intel.com (HELO silpixa00381635.ger.corp.intel.com) ([10.237.222.149]) by fmsmga005.fm.intel.com with ESMTP; 08 Jun 2018 05:42:12 -0700 From: Jasvinder Singh To: dev@dpdk.org Cc: cristian.dumitrescu@intel.com Date: Fri, 8 Jun 2018 13:41:49 +0100 Message-Id: <20180608124155.140663-16-jasvinder.singh@intel.com> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20180608124155.140663-1-jasvinder.singh@intel.com> References: <20180608124155.140663-1-jasvinder.singh@intel.com> Subject: [dpdk-dev] [PATCH 15/21] net/softnic: add cli to enable and disable pipeline X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add cli commands to enable and disable pipelines on specific threads in softnic. Signed-off-by: Cristian Dumitrescu Signed-off-by: Jasvinder Singh --- drivers/net/softnic/rte_eth_softnic_cli.c | 103 ++++++++ drivers/net/softnic/rte_eth_softnic_internals.h | 10 + drivers/net/softnic/rte_eth_softnic_thread.c | 325 ++++++++++++++++++++++++ 3 files changed, 438 insertions(+) diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c index 6ac3c2e..6884eac 100644 --- a/drivers/net/softnic/rte_eth_softnic_cli.c +++ b/drivers/net/softnic/rte_eth_softnic_cli.c @@ -1561,6 +1561,93 @@ cmd_pipeline_port_in_disable(struct pmd_internals *softnic, } } +/** + * thread pipeline enable + */ +static void +cmd_thread_pipeline_enable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t thread_id; + int status; + + if (n_tokens != 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (parser_read_uint32(&thread_id, tokens[1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "thread_id"); + return; + } + + if (strcmp(tokens[2], "pipeline") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline"); + return; + } + + pipeline_name = tokens[3]; + + if (strcmp(tokens[4], "enable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable"); + return; + } + + status = thread_pipeline_enable(softnic, thread_id, pipeline_name); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable"); + return; + } +} + +/** + * thread pipeline disable + */ +static void +cmd_thread_pipeline_disable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t thread_id; + int status; + + if (n_tokens != 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (parser_read_uint32(&thread_id, tokens[1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "thread_id"); + return; + } + + if (strcmp(tokens[2], "pipeline") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline"); + return; + } + + pipeline_name = tokens[3]; + + if (strcmp(tokens[4], "disable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable"); + return; + } + + status = thread_pipeline_disable(softnic, thread_id, pipeline_name); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, + "thread pipeline disable"); + return; + } +} + void cli_process(char *in, char *out, size_t out_size, void *arg) { @@ -1669,6 +1756,22 @@ cli_process(char *in, char *out, size_t out_size, void *arg) } } + if (strcmp(tokens[0], "thread") == 0) { + if ((n_tokens >= 5) && + (strcmp(tokens[4], "enable") == 0)) { + cmd_thread_pipeline_enable(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if ((n_tokens >= 5) && + (strcmp(tokens[4], "disable") == 0)) { + cmd_thread_pipeline_disable(softnic, tokens, n_tokens, + out, out_size); + return; + } + } + snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]); } diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h index 069f490..37c8583 100644 --- a/drivers/net/softnic/rte_eth_softnic_internals.h +++ b/drivers/net/softnic/rte_eth_softnic_internals.h @@ -796,6 +796,16 @@ thread_init(struct pmd_internals *p); void thread_free(struct pmd_internals *p); +int +thread_pipeline_enable(struct pmd_internals *p, + uint32_t thread_id, + const char *pipeline_name); + +int +thread_pipeline_disable(struct pmd_internals *p, + uint32_t thread_id, + const char *pipeline_name); + /** * CLI */ diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c index 5cffdc2..ecc525a 100644 --- a/drivers/net/softnic/rte_eth_softnic_thread.c +++ b/drivers/net/softnic/rte_eth_softnic_thread.c @@ -93,11 +93,30 @@ thread_init(struct pmd_internals *softnic) * Master thread & data plane threads: message passing */ enum thread_req_type { + THREAD_REQ_PIPELINE_ENABLE = 0, + THREAD_REQ_PIPELINE_DISABLE, THREAD_REQ_MAX }; struct thread_msg_req { enum thread_req_type type; + + union { + struct { + struct rte_pipeline *p; + struct { + struct rte_table_action *a; + } table[RTE_PIPELINE_TABLE_MAX]; + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + uint32_t timer_period_ms; + uint32_t n_tables; + } pipeline_enable; + + struct { + struct rte_pipeline *p; + } pipeline_disable; + }; }; struct thread_msg_rsp { @@ -105,6 +124,231 @@ struct thread_msg_rsp { }; /** + * Master thread + */ +static struct thread_msg_req * +thread_msg_alloc(void) +{ + size_t size = RTE_MAX(sizeof(struct thread_msg_req), + sizeof(struct thread_msg_rsp)); + + return calloc(1, size); +} + +static void +thread_msg_free(struct thread_msg_rsp *rsp) +{ + free(rsp); +} + +static struct thread_msg_rsp * +thread_msg_send_recv(struct pmd_internals *softnic, + uint32_t thread_id, + struct thread_msg_req *req) +{ + struct thread *t = &softnic->thread[thread_id]; + struct rte_ring *msgq_req = t->msgq_req; + struct rte_ring *msgq_rsp = t->msgq_rsp; + struct thread_msg_rsp *rsp; + int status; + + /* send */ + do { + status = rte_ring_sp_enqueue(msgq_req, req); + } while (status == -ENOBUFS); + + /* recv */ + do { + status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp); + } while (status != 0); + + return rsp; +} + +int +thread_pipeline_enable(struct pmd_internals *softnic, + uint32_t thread_id, + const char *pipeline_name) +{ + struct pipeline *p = pipeline_find(softnic, pipeline_name); + struct thread *t; + struct thread_msg_req *req; + struct thread_msg_rsp *rsp; + enum rte_lcore_state_t thread_state; + uint32_t i; + int status; + + /* Check input params */ + if ((thread_id >= RTE_MAX_LCORE) || + (p == NULL) || + (p->n_ports_in == 0) || + (p->n_ports_out == 0) || + (p->n_tables == 0)) + return -1; + + t = &softnic->thread[thread_id]; + if ((t->enabled == 0) || + p->enabled) + return -1; + + thread_state = rte_eal_get_lcore_state(thread_id); + if (thread_state != RUNNING) { + struct thread_data *td = &softnic->thread_data[thread_id]; + struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines]; + + if (td->n_pipelines >= THREAD_PIPELINES_MAX) + return -1; + + /* Data plane thread */ + td->p[td->n_pipelines] = p->p; + + tdp->p = p->p; + for (i = 0; i < p->n_tables; i++) + tdp->table_data[i].a = + p->table[i].a; + tdp->n_tables = p->n_tables; + + tdp->msgq_req = p->msgq_req; + tdp->msgq_rsp = p->msgq_rsp; + tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000; + tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period; + + td->n_pipelines++; + + /* Pipeline */ + p->thread_id = thread_id; + p->enabled = 1; + + return 0; + } + + /* Allocate request */ + req = thread_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = THREAD_REQ_PIPELINE_ENABLE; + req->pipeline_enable.p = p->p; + for (i = 0; i < p->n_tables; i++) + req->pipeline_enable.table[i].a = + p->table[i].a; + req->pipeline_enable.msgq_req = p->msgq_req; + req->pipeline_enable.msgq_rsp = p->msgq_rsp; + req->pipeline_enable.timer_period_ms = p->timer_period_ms; + req->pipeline_enable.n_tables = p->n_tables; + + /* Send request and wait for response */ + rsp = thread_msg_send_recv(softnic, thread_id, req); + if (rsp == NULL) + return -1; + + /* Read response */ + status = rsp->status; + + /* Free response */ + thread_msg_free(rsp); + + /* Request completion */ + if (status) + return status; + + p->thread_id = thread_id; + p->enabled = 1; + + return 0; +} + +int +thread_pipeline_disable(struct pmd_internals *softnic, + uint32_t thread_id, + const char *pipeline_name) +{ + struct pipeline *p = pipeline_find(softnic, pipeline_name); + struct thread *t; + struct thread_msg_req *req; + struct thread_msg_rsp *rsp; + enum rte_lcore_state_t thread_state; + int status; + + /* Check input params */ + if ((thread_id >= RTE_MAX_LCORE) || + (p == NULL)) + return -1; + + t = &softnic->thread[thread_id]; + if (t->enabled == 0) + return -1; + + if (p->enabled == 0) + return 0; + + if (p->thread_id != thread_id) + return -1; + + thread_state = rte_eal_get_lcore_state(thread_id); + if (thread_state != RUNNING) { + struct thread_data *td = &softnic->thread_data[thread_id]; + uint32_t i; + + for (i = 0; i < td->n_pipelines; i++) { + struct pipeline_data *tdp = &td->pipeline_data[i]; + + if (tdp->p != p->p) + continue; + + /* Data plane thread */ + if (i < td->n_pipelines - 1) { + struct rte_pipeline *pipeline_last = + td->p[td->n_pipelines - 1]; + struct pipeline_data *tdp_last = + &td->pipeline_data[td->n_pipelines - 1]; + + td->p[i] = pipeline_last; + memcpy(tdp, tdp_last, sizeof(*tdp)); + } + + td->n_pipelines--; + + /* Pipeline */ + p->enabled = 0; + + break; + } + + return 0; + } + + /* Allocate request */ + req = thread_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = THREAD_REQ_PIPELINE_DISABLE; + req->pipeline_disable.p = p->p; + + /* Send request and wait for response */ + rsp = thread_msg_send_recv(softnic, thread_id, req); + if (rsp == NULL) + return -1; + + /* Read response */ + status = rsp->status; + + /* Free response */ + thread_msg_free(rsp); + + /* Request completion */ + if (status) + return status; + + p->enabled = 0; + + return 0; +} + +/** * Data plane threads: message handling */ static inline struct thread_msg_req * @@ -131,6 +375,79 @@ thread_msg_send(struct rte_ring *msgq_rsp, } while (status == -ENOBUFS); } +static struct thread_msg_rsp * +thread_msg_handle_pipeline_enable(struct thread_data *t, + struct thread_msg_req *req) +{ + struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req; + struct pipeline_data *p = &t->pipeline_data[t->n_pipelines]; + uint32_t i; + + /* Request */ + if (t->n_pipelines >= THREAD_PIPELINES_MAX) { + rsp->status = -1; + return rsp; + } + + t->p[t->n_pipelines] = req->pipeline_enable.p; + + p->p = req->pipeline_enable.p; + for (i = 0; i < req->pipeline_enable.n_tables; i++) + p->table_data[i].a = + req->pipeline_enable.table[i].a; + + p->n_tables = req->pipeline_enable.n_tables; + + p->msgq_req = req->pipeline_enable.msgq_req; + p->msgq_rsp = req->pipeline_enable.msgq_rsp; + p->timer_period = + (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000; + p->time_next = rte_get_tsc_cycles() + p->timer_period; + + t->n_pipelines++; + + /* Response */ + rsp->status = 0; + return rsp; +} + +static struct thread_msg_rsp * +thread_msg_handle_pipeline_disable(struct thread_data *t, + struct thread_msg_req *req) +{ + struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req; + uint32_t n_pipelines = t->n_pipelines; + struct rte_pipeline *pipeline = req->pipeline_disable.p; + uint32_t i; + + /* find pipeline */ + for (i = 0; i < n_pipelines; i++) { + struct pipeline_data *p = &t->pipeline_data[i]; + + if (p->p != pipeline) + continue; + + if (i < n_pipelines - 1) { + struct rte_pipeline *pipeline_last = + t->p[n_pipelines - 1]; + struct pipeline_data *p_last = + &t->pipeline_data[n_pipelines - 1]; + + t->p[i] = pipeline_last; + memcpy(p, p_last, sizeof(*p)); + } + + t->n_pipelines--; + + rsp->status = 0; + return rsp; + } + + /* should not get here */ + rsp->status = 0; + return rsp; +} + static void thread_msg_handle(struct thread_data *t) { @@ -143,6 +460,14 @@ thread_msg_handle(struct thread_data *t) break; switch (req->type) { + case THREAD_REQ_PIPELINE_ENABLE: + rsp = thread_msg_handle_pipeline_enable(t, req); + break; + + case THREAD_REQ_PIPELINE_DISABLE: + rsp = thread_msg_handle_pipeline_disable(t, req); + break; + default: rsp = (struct thread_msg_rsp *) req; rsp->status = -1;