From patchwork Tue Feb 16 20:46:42 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87955 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 62F49A054D; Tue, 16 Feb 2021 21:46:51 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 421261607D4; Tue, 16 Feb 2021 21:46:51 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 965311607C0 for ; Tue, 16 Feb 2021 21:46:49 +0100 (CET) IronPort-SDR: o2S3ToXS1mTqbG7lVEoCCspgGXAavRwl2fTYf4iUpbABtnFRTrGi3FcMrffAYYJ/T/kv5/xUSf GLG+GuXxKlig== X-IronPort-AV: E=McAfee;i="6000,8403,9897"; a="247078495" X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="247078495" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2021 12:46:47 -0800 IronPort-SDR: p3Ck9TqPWs1c0t1QczKF6aZiEXlGlCdSLagKu/5FWIDh+hlwSiJt5j+XiRIWd93x5QTXktNHXL ya/NtgdwQ3+w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="493443097" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2021 12:46:46 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Date: Tue, 16 Feb 2021 20:46:42 +0000 Message-Id: <20210216204646.24196-1-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210216202127.22803-1-cristian.dumitrescu@intel.com> References: <20210216202127.22803-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH v3 1/5] pipeline: improve table entry helpers X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Improve the internal table entry helper routines for key comparison, entry duplication and checks. Signed-off-by: Cristian Dumitrescu --- lib/librte_pipeline/rte_swx_ctl.c | 120 ++++++++++++++++-------------- 1 file changed, 65 insertions(+), 55 deletions(-) diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c index ee6df4544..af653d7f4 100644 --- a/lib/librte_pipeline/rte_swx_ctl.c +++ b/lib/librte_pipeline/rte_swx_ctl.c @@ -234,6 +234,26 @@ table_entry_alloc(struct table *table) return NULL; } +static int +table_entry_key_check_em(struct table *table, struct rte_swx_table_entry *entry) +{ + uint8_t *key_mask0 = table->params.key_mask0; + uint32_t key_size = table->params.key_size, i; + + if (!entry->key_mask) + return 0; + + for (i = 0; i < key_size; i++) { + uint8_t km0 = key_mask0[i]; + uint8_t km = entry->key_mask[i]; + + if ((km & km0) != km0) + return -EINVAL; + } + + return 0; +} + static int table_entry_check(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id, @@ -242,6 +262,7 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl, int data_check) { struct table *table = &ctl->tables[table_id]; + int status; CHECK(entry, EINVAL); @@ -266,7 +287,9 @@ table_entry_check(struct rte_swx_ctl_pipeline *ctl, break; case RTE_SWX_TABLE_MATCH_EXACT: - CHECK(!entry->key_mask, EINVAL); + status = table_entry_key_check_em(table, entry); + if (status) + return status; break; default: @@ -327,10 +350,7 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl, new_entry->key_signature = entry->key_signature; /* key_mask. */ - if (table->params.match_type != RTE_SWX_TABLE_MATCH_EXACT) { - if (!entry->key_mask) - goto error; - + if (entry->key_mask) { new_entry->key_mask = malloc(table->params.key_size); if (!new_entry->key_mask) goto error; @@ -357,18 +377,24 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl, /* action_data. */ a = &ctl->actions[entry->action_id]; - if (a->data_size) { - if (!entry->action_data) - goto error; + if (a->data_size && !entry->action_data) + goto error; - new_entry->action_data = malloc(a->data_size); - if (!new_entry->action_data) - goto error; + /* The table layer provisions a constant action data size per + * entry, which should be the largest data size for all the + * actions enabled for the current table, and attempts to copy + * this many bytes each time a table entry is added, even if the + * specific action requires less data or even no data at all, + * hence we always have to allocate the max. + */ + new_entry->action_data = calloc(1, table->params.action_data_size); + if (!new_entry->action_data) + goto error; + if (a->data_size) memcpy(new_entry->action_data, entry->action_data, a->data_size); - } } return new_entry; @@ -378,58 +404,36 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl, return NULL; } -static int -entry_keycmp_em(struct rte_swx_table_entry *e0, - struct rte_swx_table_entry *e1, - uint32_t key_size) -{ - if (e0->key_signature != e1->key_signature) - return 1; /* Not equal. */ - - if (memcmp(e0->key, e1->key, key_size)) - return 1; /* Not equal. */ - - return 0; /* Equal */ -} - -static int -entry_keycmp_wm(struct rte_swx_table_entry *e0 __rte_unused, - struct rte_swx_table_entry *e1 __rte_unused, - uint32_t key_size __rte_unused) -{ - /* TBD */ - - return 1; /* Not equal */ -} - -static int -entry_keycmp_lpm(struct rte_swx_table_entry *e0 __rte_unused, - struct rte_swx_table_entry *e1 __rte_unused, - uint32_t key_size __rte_unused) -{ - /* TBD */ - - return 1; /* Not equal */ -} - static int table_entry_keycmp(struct table *table, struct rte_swx_table_entry *e0, struct rte_swx_table_entry *e1) { - switch (table->params.match_type) { - case RTE_SWX_TABLE_MATCH_EXACT: - return entry_keycmp_em(e0, e1, table->params.key_size); + uint32_t key_size = table->params.key_size; + uint32_t i; + + for (i = 0; i < key_size; i++) { + uint8_t *key_mask0 = table->params.key_mask0; + uint8_t km0, km[2], k[2]; + + km0 = key_mask0 ? key_mask0[i] : 0xFF; + + km[0] = e0->key_mask ? e0->key_mask[i] : 0xFF; + km[1] = e1->key_mask ? e1->key_mask[i] : 0xFF; - case RTE_SWX_TABLE_MATCH_WILDCARD: - return entry_keycmp_wm(e0, e1, table->params.key_size); + k[0] = e0->key[i]; + k[1] = e1->key[i]; - case RTE_SWX_TABLE_MATCH_LPM: - return entry_keycmp_lpm(e0, e1, table->params.key_size); + /* Mask comparison. */ + if ((km[0] & km0) != (km[1] & km0)) + return 1; /* Not equal. */ - default: - return 1; /* Not equal. */ + /* Value comparison. */ + if ((k[0] & km[0] & km0) != (k[1] & km[1] & km0)) + return 1; /* Not equal. */ } + + return 0; /* Equal. */ } static struct rte_swx_table_entry * @@ -893,6 +897,9 @@ rte_swx_ctl_pipeline_table_entry_add(struct rte_swx_ctl_pipeline *ctl, CHECK(table, EINVAL); table_id = table - ctl->tables; + CHECK(entry, EINVAL); + CHECK(!table_entry_check(ctl, table_id, entry, 1, 1), EINVAL); + new_entry = table_entry_duplicate(ctl, table_id, entry, 1, 1); CHECK(new_entry, ENOMEM); @@ -1095,6 +1102,9 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl, table_id = table - ctl->tables; CHECK(!table->info.default_action_is_const, EINVAL); + CHECK(entry, EINVAL); + CHECK(!table_entry_check(ctl, table_id, entry, 0, 1), EINVAL); + new_entry = table_entry_duplicate(ctl, table_id, entry, 0, 1); CHECK(new_entry, ENOMEM); From patchwork Tue Feb 16 20:46:43 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87956 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 55D8CA054D; Tue, 16 Feb 2021 21:46:56 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6266D1607F3; Tue, 16 Feb 2021 21:46:52 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 79C311607C0 for ; Tue, 16 Feb 2021 21:46:50 +0100 (CET) IronPort-SDR: 3/a0onrBQxXpaHN8JaY/2vm7TNLhMs2C8b34LumUKuBiTUU4huy6VbeqYCLC54ICTavFUFbmzz 4cPtnJSGpR3w== X-IronPort-AV: E=McAfee;i="6000,8403,9897"; a="247078499" X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="247078499" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2021 12:46:48 -0800 IronPort-SDR: rq2aluMuwmw54sj7iOpKcssz2R0/fgpPwqm7yNriX0zTQDFYoXsf6h4i9CCxi/kLWO8bYCZqOt Hh7J7EJIsyKg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="493443107" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2021 12:46:47 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Cc: Venkata Suresh Kumar P , Churchill Khangar Date: Tue, 16 Feb 2021 20:46:43 +0000 Message-Id: <20210216204646.24196-2-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210216204646.24196-1-cristian.dumitrescu@intel.com> References: <20210216202127.22803-1-cristian.dumitrescu@intel.com> <20210216204646.24196-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH v3 2/5] pipeline: improve table entry parsing X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Improve the table entry parsing: better code structure, enable parsing for the key field masks, allow comments and empty lines in the table entry files. Signed-off-by: Cristian Dumitrescu Signed-off-by: Venkata Suresh Kumar P Signed-off-by: Churchill Khangar --- examples/pipeline/cli.c | 21 +++- lib/librte_pipeline/rte_swx_ctl.c | 172 ++++++++++++++++++++---------- lib/librte_pipeline/rte_swx_ctl.h | 7 +- 3 files changed, 141 insertions(+), 59 deletions(-) diff --git a/examples/pipeline/cli.c b/examples/pipeline/cli.c index e97e12060..30c2dd34d 100644 --- a/examples/pipeline/cli.c +++ b/examples/pipeline/cli.c @@ -881,14 +881,19 @@ cmd_pipeline_table_update(char **tokens, if (file_add) for (line_id = 1; ; line_id++) { struct rte_swx_table_entry *entry; + int is_blank_or_comment; if (fgets(line, 2048, file_add) == NULL) break; entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl, table_name, - line); + line, + &is_blank_or_comment); if (!entry) { + if (is_blank_or_comment) + continue; + snprintf(out, out_size, MSG_FILE_ERR, file_name_add, line_id); goto error; @@ -911,14 +916,19 @@ cmd_pipeline_table_update(char **tokens, if (file_delete) for (line_id = 1; ; line_id++) { struct rte_swx_table_entry *entry; + int is_blank_or_comment; if (fgets(line, 2048, file_delete) == NULL) break; entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl, table_name, - line); + line, + &is_blank_or_comment); if (!entry) { + if (is_blank_or_comment) + continue; + snprintf(out, out_size, MSG_FILE_ERR, file_name_delete, line_id); goto error; @@ -940,14 +950,19 @@ cmd_pipeline_table_update(char **tokens, if (file_default) for (line_id = 1; ; line_id++) { struct rte_swx_table_entry *entry; + int is_blank_or_comment; if (fgets(line, 2048, file_default) == NULL) break; entry = rte_swx_ctl_pipeline_table_entry_read(p->ctl, table_name, - line); + line, + &is_blank_or_comment); if (!entry) { + if (is_blank_or_comment) + continue; + snprintf(out, out_size, MSG_FILE_ERR, file_name_default, line_id); goto error; diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c index af653d7f4..4a416bc71 100644 --- a/lib/librte_pipeline/rte_swx_ctl.c +++ b/lib/librte_pipeline/rte_swx_ctl.c @@ -1339,19 +1339,32 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl) table_abort(ctl, i); } +static int +token_is_comment(const char *token) +{ + if ((token[0] == '#') || + (token[0] == ';') || + ((token[0] == '/') && (token[1] == '/'))) + return 1; /* TRUE. */ + + return 0; /* FALSE. */ +} + #define RTE_SWX_CTL_ENTRY_TOKENS_MAX 256 struct rte_swx_table_entry * rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, const char *table_name, - const char *string) + const char *string, + int *is_blank_or_comment) { - char *tokens[RTE_SWX_CTL_ENTRY_TOKENS_MAX]; + char *token_array[RTE_SWX_CTL_ENTRY_TOKENS_MAX], **tokens; struct table *table; struct action *action; struct rte_swx_table_entry *entry = NULL; char *s0 = NULL, *s; uint32_t n_tokens = 0, arg_offset = 0, i; + int blank_or_comment = 0; /* Check input arguments. */ if (!ctl) @@ -1381,37 +1394,66 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, char *token; token = strtok_r(s, " \f\n\r\t\v", &s); - if (!token) + if (!token || token_is_comment(token)) break; if (n_tokens >= RTE_SWX_CTL_ENTRY_TOKENS_MAX) goto error; - tokens[n_tokens] = token; + token_array[n_tokens] = token; n_tokens++; } - if ((n_tokens < 3 + table->info.n_match_fields) || - strcmp(tokens[0], "match") || - strcmp(tokens[1 + table->info.n_match_fields], "action")) - goto error; - - action = action_find(ctl, tokens[2 + table->info.n_match_fields]); - if (!action) + if (!n_tokens) { + blank_or_comment = 1; goto error; + } - if (n_tokens != 3 + table->info.n_match_fields + - action->info.n_args * 2) - goto error; + tokens = token_array; /* * Match. */ + if (n_tokens && strcmp(tokens[0], "match")) + goto action; + + if (n_tokens < 1 + table->info.n_match_fields) + goto error; + for (i = 0; i < table->info.n_match_fields; i++) { struct rte_swx_ctl_table_match_field_info *mf = &table->mf[i]; - char *mf_val = tokens[1 + i]; - uint64_t val; + char *mf_val = tokens[1 + i], *mf_mask = NULL; + uint64_t val, mask = UINT64_MAX; + uint32_t offset = (mf->offset - table->mf[0].offset) / 8; + + /* + * Mask. + */ + mf_mask = strchr(mf_val, '/'); + if (mf_mask) { + *mf_mask = 0; + mf_mask++; + + /* Parse. */ + mask = strtoull(mf_mask, &mf_mask, 0); + if (mf_mask[0]) + goto error; + + /* Endianness conversion. */ + if (mf->is_header) + mask = field_hton(mask, mf->n_bits); + } + /* Copy to entry. */ + if (entry->key_mask) + memcpy(&entry->key_mask[offset], + (uint8_t *)&mask, + mf->n_bits / 8); + + /* + * Value. + */ + /* Parse. */ val = strtoull(mf_val, &mf_val, 0); if (mf_val[0]) goto error; @@ -1420,17 +1462,32 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, if (mf->is_header) val = field_hton(val, mf->n_bits); - /* Copy key and key_mask to entry. */ - memcpy(&entry->key[(mf->offset - table->mf[0].offset) / 8], + /* Copy to entry. */ + memcpy(&entry->key[offset], (uint8_t *)&val, mf->n_bits / 8); - - /* TBD Set entry->key_mask for wildcard and LPM tables. */ } + tokens += 1 + table->info.n_match_fields; + n_tokens -= 1 + table->info.n_match_fields; + /* * Action. */ +action: + if (n_tokens && strcmp(tokens[0], "action")) + goto other; + + if (n_tokens < 2) + goto error; + + action = action_find(ctl, tokens[1]); + if (!action) + goto error; + + if (n_tokens < 2 + action->info.n_args * 2) + goto error; + /* action_id. */ entry->action_id = action - ctl->actions; @@ -1441,8 +1498,8 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, uint64_t val; int is_nbo = 0; - arg_name = tokens[3 + table->info.n_match_fields + i * 2]; - arg_val = tokens[3 + table->info.n_match_fields + i * 2 + 1]; + arg_name = tokens[2 + i * 2]; + arg_val = tokens[2 + i * 2 + 1]; if (strcmp(arg_name, arg->name) || (strlen(arg_val) < 4) || @@ -1473,15 +1530,50 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, arg_offset += arg->n_bits / 8; } + tokens += 2 + action->info.n_args * 2; + n_tokens -= 2 + action->info.n_args * 2; + +other: + if (n_tokens) + goto error; + free(s0); return entry; error: table_entry_free(entry); free(s0); + if (is_blank_or_comment) + *is_blank_or_comment = blank_or_comment; return NULL; } +static void +table_entry_printf(FILE *f, + struct rte_swx_ctl_pipeline *ctl, + struct table *table, + struct rte_swx_table_entry *entry) +{ + struct action *action = &ctl->actions[entry->action_id]; + uint32_t i; + + fprintf(f, "match "); + for (i = 0; i < table->params.key_size; i++) + fprintf(f, "%02x", entry->key[i]); + + if (entry->key_mask) { + fprintf(f, "/"); + for (i = 0; i < table->params.key_size; i++) + fprintf(f, "%02x", entry->key_mask[i]); + } + + fprintf(f, " action %s ", action->info.name); + for (i = 0; i < action->data_size; i++) + fprintf(f, "%02x", entry->action_data[i]); + + fprintf(f, "\n"); +} + int rte_swx_ctl_pipeline_table_fprintf(FILE *f, struct rte_swx_ctl_pipeline *ctl, @@ -1512,47 +1604,17 @@ rte_swx_ctl_pipeline_table_fprintf(FILE *f, /* Table entries. */ TAILQ_FOREACH(entry, &table->entries, node) { - struct action *action = &ctl->actions[entry->action_id]; - - fprintf(f, "match "); - for (i = 0; i < table->params.key_size; i++) - fprintf(f, "%02x", entry->key[i]); - - fprintf(f, " action %s ", action->info.name); - for (i = 0; i < action->data_size; i++) - fprintf(f, "%02x", entry->action_data[i]); - - fprintf(f, "\n"); + table_entry_printf(f, ctl, table, entry); n_entries++; } TAILQ_FOREACH(entry, &table->pending_modify0, node) { - struct action *action = &ctl->actions[entry->action_id]; - - fprintf(f, "match "); - for (i = 0; i < table->params.key_size; i++) - fprintf(f, "%02x", entry->key[i]); - - fprintf(f, " action %s ", action->info.name); - for (i = 0; i < action->data_size; i++) - fprintf(f, "%02x", entry->action_data[i]); - - fprintf(f, "\n"); + table_entry_printf(f, ctl, table, entry); n_entries++; } TAILQ_FOREACH(entry, &table->pending_delete, node) { - struct action *action = &ctl->actions[entry->action_id]; - - fprintf(f, "match "); - for (i = 0; i < table->params.key_size; i++) - fprintf(f, "%02x", entry->key[i]); - - fprintf(f, " action %s ", action->info.name); - for (i = 0; i < action->data_size; i++) - fprintf(f, "%02x", entry->action_data[i]); - - fprintf(f, "\n"); + table_entry_printf(f, ctl, table, entry); n_entries++; } diff --git a/lib/librte_pipeline/rte_swx_ctl.h b/lib/librte_pipeline/rte_swx_ctl.h index 32815b69e..530671db1 100644 --- a/lib/librte_pipeline/rte_swx_ctl.h +++ b/lib/librte_pipeline/rte_swx_ctl.h @@ -521,6 +521,10 @@ rte_swx_ctl_pipeline_abort(struct rte_swx_ctl_pipeline *ctl); * Table name. * @param[in] string * String containing the table entry. + * @param[out] is_blank_or_comment + * On error, this argument provides an indication of whether *string* contains + * an invalid table entry (set to zero) or a blank or comment line that should + * typically be ignored (set to a non-zero value). * @return * 0 on success or the following error codes otherwise: * -EINVAL: Invalid argument. @@ -529,7 +533,8 @@ __rte_experimental struct rte_swx_table_entry * rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, const char *table_name, - const char *string); + const char *string, + int *is_blank_or_comment); /** * Pipeline table print to file From patchwork Tue Feb 16 20:46:44 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87957 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7A8A9A054D; Tue, 16 Feb 2021 21:47:02 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A8E47160800; Tue, 16 Feb 2021 21:46:53 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 58CD51607E5 for ; Tue, 16 Feb 2021 21:46:51 +0100 (CET) IronPort-SDR: w1GUMxN9905RknSrkRxJCnKzzQwOjxJGD5s5XLat1A+mcLUCtKzdJdqa4PJnB0ZImBnjfgSmBZ UKUR7D3hM7rQ== X-IronPort-AV: E=McAfee;i="6000,8403,9897"; a="247078502" X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="247078502" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2021 12:46:49 -0800 IronPort-SDR: xhPpuUFepMvLY2zNQDqSyXEVEoWSkTy6WOPlCEl8/VNgsiyiGsSyiWempI/fUYCnqPeRip4OEr yOZ05nsbJrjA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="493443108" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2021 12:46:49 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Date: Tue, 16 Feb 2021 20:46:44 +0000 Message-Id: <20210216204646.24196-3-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210216204646.24196-1-cristian.dumitrescu@intel.com> References: <20210216202127.22803-1-cristian.dumitrescu@intel.com> <20210216204646.24196-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH v3 3/5] pipeline: support non-incremental table updates X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some table types (e.g. exact match/hash) allow for incremental table updates, while others (e.g. wildcard match/ACL) do not. The former is already supported, the latter is enabled by this patch. Signed-off-by: Cristian Dumitrescu --- lib/librte_pipeline/rte_swx_ctl.c | 315 ++++++++++++++++++++++++------ 1 file changed, 258 insertions(+), 57 deletions(-) diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c index 4a416bc71..6bef9c311 100644 --- a/lib/librte_pipeline/rte_swx_ctl.c +++ b/lib/librte_pipeline/rte_swx_ctl.c @@ -42,11 +42,38 @@ struct table { struct rte_swx_table_ops ops; struct rte_swx_table_params params; + /* Set of "stable" keys: these keys are currently part of the table; + * these keys will be preserved with no action data changes after the + * next commit. + */ struct rte_swx_table_entry_list entries; + + /* Set of new keys: these keys are currently NOT part of the table; + * these keys will be added to the table on the next commit, if + * the commit operation is successful. + */ struct rte_swx_table_entry_list pending_add; + + /* Set of keys to be modified: these keys are currently part of the + * table; these keys are still going to be part of the table after the + * next commit, but their action data will be modified if the commit + * operation is successful. The modify0 list contains the keys with the + * current action data, the modify1 list contains the keys with the + * modified action data. + */ struct rte_swx_table_entry_list pending_modify0; struct rte_swx_table_entry_list pending_modify1; + + /* Set of keys to be deleted: these keys are currently part of the + * table; these keys are to be deleted from the table on the next + * commit, if the commit operation is successful. + */ struct rte_swx_table_entry_list pending_delete; + + /* The pending default action: this is NOT the current default action; + * this will be the new default action after the next commit, if the + * next commit operation is successful. + */ struct rte_swx_table_entry *pending_default; int is_stub; @@ -609,6 +636,31 @@ table_pending_default_free(struct table *table) table->pending_default = NULL; } +static int +table_is_update_pending(struct table *table, int consider_pending_default) +{ + struct rte_swx_table_entry *e; + uint32_t n = 0; + + /* Pending add. */ + TAILQ_FOREACH(e, &table->pending_add, node) + n++; + + /* Pending modify. */ + TAILQ_FOREACH(e, &table->pending_modify1, node) + n++; + + /* Pending delete. */ + TAILQ_FOREACH(e, &table->pending_delete, node) + n++; + + /* Pending default. */ + if (consider_pending_default && table->pending_default) + n++; + + return n; +} + static void table_free(struct rte_swx_ctl_pipeline *ctl) { @@ -680,7 +732,7 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl) struct rte_swx_table_state *ts_next = &ctl->ts_next[i]; /* Table object. */ - if (!table->is_stub) { + if (!table->is_stub && table->ops.add) { ts_next->obj = table->ops.create(&table->params, &table->entries, table->info.args, @@ -691,6 +743,9 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl) } } + if (!table->is_stub && !table->ops.add) + ts_next->obj = ts->obj; + /* Default action data: duplicate from current table state. */ ts_next->default_action_data = malloc(table->params.action_data_size); @@ -1114,54 +1169,173 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl, return 0; } + +static void +table_entry_list_free(struct rte_swx_table_entry_list *list) +{ + for ( ; ; ) { + struct rte_swx_table_entry *entry; + + entry = TAILQ_FIRST(list); + if (!entry) + break; + + TAILQ_REMOVE(list, entry, node); + table_entry_free(entry); + } +} + +static int +table_entry_list_duplicate(struct rte_swx_ctl_pipeline *ctl, + uint32_t table_id, + struct rte_swx_table_entry_list *dst, + struct rte_swx_table_entry_list *src) +{ + struct rte_swx_table_entry *src_entry; + + TAILQ_FOREACH(src_entry, src, node) { + struct rte_swx_table_entry *dst_entry; + + dst_entry = table_entry_duplicate(ctl, table_id, src_entry, 1, 1); + if (!dst_entry) + goto error; + + TAILQ_INSERT_TAIL(dst, dst_entry, node); + } + + return 0; + +error: + table_entry_list_free(dst); + return -ENOMEM; +} + +/* This commit stage contains all the operations that can fail; in case ANY of + * them fails for ANY table, ALL of them are rolled back for ALL the tables. + */ static int -table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) +table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, + uint32_t table_id, + uint32_t after_swap) { struct table *table = &ctl->tables[table_id]; + struct rte_swx_table_state *ts = &ctl->ts[table_id]; struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id]; - struct rte_swx_table_entry *entry; - /* Reset counters. */ - table->n_add = 0; - table->n_modify = 0; - table->n_delete = 0; + if (table->is_stub || !table_is_update_pending(table, 0)) + return 0; - /* Add pending rules. */ - TAILQ_FOREACH(entry, &table->pending_add, node) { - int status; + /* + * Current table supports incremental update. + */ + if (table->ops.add) { + /* Reset counters. */ + table->n_add = 0; + table->n_modify = 0; + table->n_delete = 0; - status = table->ops.add(ts_next->obj, entry); - if (status) - return status; + /* Add pending rules. */ + struct rte_swx_table_entry *entry; - table->n_add++; - } + TAILQ_FOREACH(entry, &table->pending_add, node) { + int status; - /* Modify pending rules. */ - TAILQ_FOREACH(entry, &table->pending_modify1, node) { - int status; + status = table->ops.add(ts_next->obj, entry); + if (status) + return status; - status = table->ops.add(ts_next->obj, entry); - if (status) - return status; + table->n_add++; + } + + /* Modify pending rules. */ + TAILQ_FOREACH(entry, &table->pending_modify1, node) { + int status; + + status = table->ops.add(ts_next->obj, entry); + if (status) + return status; + + table->n_modify++; + } + + /* Delete pending rules. */ + TAILQ_FOREACH(entry, &table->pending_delete, node) { + int status; - table->n_modify++; + status = table->ops.del(ts_next->obj, entry); + if (status) + return status; + + table->n_delete++; + } + + return 0; } - /* Delete pending rules. */ - TAILQ_FOREACH(entry, &table->pending_delete, node) { + /* + * Current table does NOT support incremental update. + */ + if (!after_swap) { + struct rte_swx_table_entry_list list; int status; - status = table->ops.del(ts_next->obj, entry); + /* Create updated list of entries included. */ + TAILQ_INIT(&list); + + status = table_entry_list_duplicate(ctl, + table_id, + &list, + &table->entries); + if (status) + goto error; + + status = table_entry_list_duplicate(ctl, + table_id, + &list, + &table->pending_add); + if (status) + goto error; + + status = table_entry_list_duplicate(ctl, + table_id, + &list, + &table->pending_modify1); if (status) - return status; + goto error; + + /* Create new table object with the updates included. */ + ts_next->obj = table->ops.create(&table->params, + &list, + table->info.args, + ctl->numa_node); + if (!ts_next->obj) { + status = -ENODEV; + goto error; + } + + table_entry_list_free(&list); + + return 0; - table->n_delete++; +error: + table_entry_list_free(&list); + return status; } + /* Free the old table object. */ + if (ts_next->obj && table->ops.free) + table->ops.free(ts_next->obj); + + /* Copy over the new table object. */ + ts_next->obj = ts->obj; + return 0; } +/* This commit stage contains all the operations that cannot fail. They are + * executed only if the previous stage was successful for ALL the tables. Hence, + * none of these operations has to be rolled back for ANY table. + */ static void table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) { @@ -1186,6 +1360,10 @@ table_rollfwd1(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) ts_next->default_action_id = action_id; } +/* This last commit stage is simply finalizing a successful commit operation. + * This stage is only executed if all the previous stages were successful. This + * stage cannot fail. + */ static void table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) { @@ -1212,43 +1390,66 @@ table_rollfwd2(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) table_pending_default_free(table); } +/* The rollback stage is only executed when the commit failed, i.e. ANY of the + * commit operations that can fail did fail for ANY table. It reverts ALL the + * tables to their state before the commit started, as if the commit never + * happened. + */ static void table_rollback(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) { struct table *table = &ctl->tables[table_id]; struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id]; - struct rte_swx_table_entry *entry; - /* Add back all the entries that were just deleted. */ - TAILQ_FOREACH(entry, &table->pending_delete, node) { - if (!table->n_delete) - break; + if (table->is_stub || !table_is_update_pending(table, 0)) + return; - table->ops.add(ts_next->obj, entry); - table->n_delete--; - } + if (table->ops.add) { + struct rte_swx_table_entry *entry; - /* Add back the old copy for all the entries that were just - * modified. - */ - TAILQ_FOREACH(entry, &table->pending_modify0, node) { - if (!table->n_modify) - break; + /* Add back all the entries that were just deleted. */ + TAILQ_FOREACH(entry, &table->pending_delete, node) { + if (!table->n_delete) + break; - table->ops.add(ts_next->obj, entry); - table->n_modify--; - } + table->ops.add(ts_next->obj, entry); + table->n_delete--; + } - /* Delete all the entries that were just added. */ - TAILQ_FOREACH(entry, &table->pending_add, node) { - if (!table->n_add) - break; + /* Add back the old copy for all the entries that were just + * modified. + */ + TAILQ_FOREACH(entry, &table->pending_modify0, node) { + if (!table->n_modify) + break; + + table->ops.add(ts_next->obj, entry); + table->n_modify--; + } - table->ops.del(ts_next->obj, entry); - table->n_add--; + /* Delete all the entries that were just added. */ + TAILQ_FOREACH(entry, &table->pending_add, node) { + if (!table->n_add) + break; + + table->ops.del(ts_next->obj, entry); + table->n_add--; + } + } else { + struct rte_swx_table_state *ts = &ctl->ts[table_id]; + + /* Free the new table object, as update was cancelled. */ + if (ts_next->obj && table->ops.free) + table->ops.free(ts_next->obj); + + /* Reinstate the old table object. */ + ts_next->obj = ts->obj; } } +/* This stage is conditionally executed (as instructed by the user) after a + * failed commit operation to remove ALL the pending work for ALL the tables. + */ static void table_abort(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) { @@ -1290,7 +1491,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail) * ts. */ for (i = 0; i < ctl->info.n_tables; i++) { - status = table_rollfwd0(ctl, i); + status = table_rollfwd0(ctl, i, 0); if (status) goto rollback; } @@ -1310,7 +1511,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail) /* Operate the changes on the current ts_next, which is the previous ts. */ for (i = 0; i < ctl->info.n_tables; i++) { - table_rollfwd0(ctl, i); + table_rollfwd0(ctl, i, 1); table_rollfwd1(ctl, i); table_rollfwd2(ctl, i); } @@ -1444,11 +1645,11 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, mask = field_hton(mask, mf->n_bits); } - /* Copy to entry. */ - if (entry->key_mask) - memcpy(&entry->key_mask[offset], - (uint8_t *)&mask, - mf->n_bits / 8); + /* Copy to entry. */ + if (entry->key_mask) + memcpy(&entry->key_mask[offset], + (uint8_t *)&mask, + mf->n_bits / 8); /* * Value. From patchwork Tue Feb 16 20:46:45 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87958 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 37AC4A054D; Tue, 16 Feb 2021 21:47:08 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E24D916080C; Tue, 16 Feb 2021 21:46:54 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 6449F1607F4 for ; Tue, 16 Feb 2021 21:46:52 +0100 (CET) IronPort-SDR: 0ZSjfAULl9wgfIU3H/lZQrTYjMhIKXBoPNDEvyukvsybwYN8cf8Nl9h3xuAW77a/NTxyk9o0zs 8B8fqYFhL1FQ== X-IronPort-AV: E=McAfee;i="6000,8403,9897"; a="247078505" X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="247078505" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2021 12:46:49 -0800 IronPort-SDR: /vBDSpUTJC75Lz2gweleO/KKmxdpVaefQFh+UyHmQLjXdFYjpzxUVTEe3uFBHRhLVEVDsZAAfr aLy+ObxBaYGQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="493443111" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2021 12:46:50 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Date: Tue, 16 Feb 2021 20:46:45 +0000 Message-Id: <20210216204646.24196-4-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210216204646.24196-1-cristian.dumitrescu@intel.com> References: <20210216202127.22803-1-cristian.dumitrescu@intel.com> <20210216204646.24196-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH v3 4/5] table: add table entry priority X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for table entry priority, which is required for the wildcard match/ACL table type. Signed-off-by: Cristian Dumitrescu --- lib/librte_pipeline/rte_swx_ctl.c | 27 +++++++++++++++++++++++++++ lib/librte_table/rte_swx_table.h | 9 +++++++++ 2 files changed, 36 insertions(+) diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c index 6bef9c311..2e4538bd0 100644 --- a/lib/librte_pipeline/rte_swx_ctl.c +++ b/lib/librte_pipeline/rte_swx_ctl.c @@ -386,6 +386,9 @@ table_entry_duplicate(struct rte_swx_ctl_pipeline *ctl, entry->key_mask, table->params.key_size); } + + /* key_priority. */ + new_entry->key_priority = entry->key_priority; } if (data_duplicate) { @@ -1672,6 +1675,28 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, tokens += 1 + table->info.n_match_fields; n_tokens -= 1 + table->info.n_match_fields; + /* + * Match priority. + */ + if (n_tokens && !strcmp(tokens[0], "priority")) { + char *priority = tokens[1]; + uint32_t val; + + if (n_tokens < 2) + goto error; + + /* Parse. */ + val = strtoul(priority, &priority, 0); + if (priority[0]) + goto error; + + /* Copy to entry. */ + entry->key_priority = val; + + tokens += 2; + n_tokens -= 2; + } + /* * Action. */ @@ -1768,6 +1793,8 @@ table_entry_printf(FILE *f, fprintf(f, "%02x", entry->key_mask[i]); } + fprintf(f, " priority %u", entry->key_priority); + fprintf(f, " action %s ", action->info.name); for (i = 0; i < action->data_size; i++) fprintf(f, "%02x", entry->action_data[i]); diff --git a/lib/librte_table/rte_swx_table.h b/lib/librte_table/rte_swx_table.h index 5a3137ec5..00446718f 100644 --- a/lib/librte_table/rte_swx_table.h +++ b/lib/librte_table/rte_swx_table.h @@ -89,6 +89,15 @@ struct rte_swx_table_entry { */ uint64_t key_signature; + /** Key priority for the current entry. Useful for wildcard match (as + * match rules are commonly overlapping with other rules), ignored for + * exact match (as match rules never overlap, hence all rules have the + * same match priority) and for LPM (match priority is driven by the + * prefix length, with non-overlapping prefixes essentially having the + * same match priority). Value 0 indicates the highest match priority. + */ + uint32_t key_priority; + /** Action ID for the current entry. */ uint64_t action_id; From patchwork Tue Feb 16 20:46:46 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87959 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 28B9CA054D; Tue, 16 Feb 2021 21:47:15 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7794416081D; Tue, 16 Feb 2021 21:46:56 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 1BAED1607F8 for ; Tue, 16 Feb 2021 21:46:52 +0100 (CET) IronPort-SDR: pbDCEqofoZPUzQVCH6lbO7HABzDnC8/dG7/TLiRqZ/lxkyeZ1DGEELGGeIpPUyJU+LXlwPjYLO ZwEOFg3ecoFQ== X-IronPort-AV: E=McAfee;i="6000,8403,9897"; a="247078508" X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="247078508" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2021 12:46:51 -0800 IronPort-SDR: 0yClna0vSdJ0jUHGpR3IeNoboDpvJsMdqCfKVzHIjy7VxDrDWhUXBZA8xGPenkxclBh0V3As8b gVRVaLJ3Qr6A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,184,1610438400"; d="scan'208";a="493443119" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga001.fm.intel.com with ESMTP; 16 Feb 2021 12:46:51 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Cc: Churchill Khangar Date: Tue, 16 Feb 2021 20:46:46 +0000 Message-Id: <20210216204646.24196-5-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210216204646.24196-1-cristian.dumitrescu@intel.com> References: <20210216202127.22803-1-cristian.dumitrescu@intel.com> <20210216204646.24196-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH v3 5/5] table: add wildcard match table type X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add the widlcard match/ACL table type for the SWX pipeline, which is used under the hood by the table instruction. Signed-off-by: Cristian Dumitrescu Signed-off-by: Churchill Khangar --- doc/api/doxy-api-index.md | 1 + examples/pipeline/obj.c | 8 + lib/librte_table/meson.build | 8 +- lib/librte_table/rte_swx_table_wm.c | 470 ++++++++++++++++++++++++++++ lib/librte_table/rte_swx_table_wm.h | 27 ++ lib/librte_table/version.map | 3 + 6 files changed, 515 insertions(+), 2 deletions(-) create mode 100644 lib/librte_table/rte_swx_table_wm.c create mode 100644 lib/librte_table/rte_swx_table_wm.h diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index 748514e24..94e9937be 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -187,6 +187,7 @@ The public API headers are grouped by topics: * SWX table: [table] (@ref rte_swx_table.h), [table_em] (@ref rte_swx_table_em.h) + [table_wm] (@ref rte_swx_table_wm.h) * [graph] (@ref rte_graph.h): [graph_worker] (@ref rte_graph_worker.h) * graph_nodes: diff --git a/examples/pipeline/obj.c b/examples/pipeline/obj.c index 84bbcf2b2..7be61228b 100644 --- a/examples/pipeline/obj.c +++ b/examples/pipeline/obj.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -415,6 +416,13 @@ pipeline_create(struct obj *obj, const char *name, int numa_node) if (status) goto error; + status = rte_swx_pipeline_table_type_register(p, + "wildcard", + RTE_SWX_TABLE_MATCH_WILDCARD, + &rte_swx_table_wildcard_match_ops); + if (status) + goto error; + /* Node allocation */ pipeline = calloc(1, sizeof(struct pipeline)); if (pipeline == NULL) diff --git a/lib/librte_table/meson.build b/lib/librte_table/meson.build index aa1e1d038..007ffe013 100644 --- a/lib/librte_table/meson.build +++ b/lib/librte_table/meson.build @@ -12,7 +12,9 @@ sources = files('rte_table_acl.c', 'rte_table_hash_lru.c', 'rte_table_array.c', 'rte_table_stub.c', - 'rte_swx_table_em.c',) + 'rte_swx_table_em.c', + 'rte_swx_table_wm.c', + ) headers = files('rte_table.h', 'rte_table_acl.h', 'rte_table_lpm.h', @@ -24,7 +26,9 @@ headers = files('rte_table.h', 'rte_table_array.h', 'rte_table_stub.h', 'rte_swx_table.h', - 'rte_swx_table_em.h',) + 'rte_swx_table_em.h', + 'rte_swx_table_wm.h', + ) deps += ['mbuf', 'port', 'lpm', 'hash', 'acl'] indirect_headers += files('rte_lru_x86.h', diff --git a/lib/librte_table/rte_swx_table_wm.c b/lib/librte_table/rte_swx_table_wm.c new file mode 100644 index 000000000..9924231b3 --- /dev/null +++ b/lib/librte_table/rte_swx_table_wm.c @@ -0,0 +1,470 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "rte_swx_table_wm.h" + +#ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES +#define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1 +#endif + +#if RTE_SWX_TABLE_EM_USE_HUGE_PAGES + +#include + +static void * +env_malloc(size_t size, size_t alignment, int numa_node) +{ + return rte_zmalloc_socket(NULL, size, alignment, numa_node); +} + +static void +env_free(void *start, size_t size __rte_unused) +{ + rte_free(start); +} + +#else + +#include + +static void * +env_malloc(size_t size, size_t alignment __rte_unused, int numa_node) +{ + return numa_alloc_onnode(size, numa_node); +} + +static void +env_free(void *start, size_t size) +{ + numa_free(start, size); +} + +#endif + +static char *get_unique_name(void) +{ + char *name; + uint64_t *tsc; + + name = calloc(7, 1); + if (!name) + return NULL; + + tsc = (uint64_t *) name; + *tsc = rte_get_tsc_cycles(); + return name; +} + +static uint32_t +count_entries(struct rte_swx_table_entry_list *entries) +{ + struct rte_swx_table_entry *entry; + uint32_t n_entries = 0; + + if (!entries) + return 0; + + TAILQ_FOREACH(entry, entries, node) + n_entries++; + + return n_entries; +} + +static int +acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p) +{ + uint32_t byte_id = 0, field_id = 0; + + /* cfg->num_categories. */ + cfg->num_categories = 1; + + /* cfg->defs and cfg->num_fields. */ + for (byte_id = 0; byte_id < p->key_size; ) { + uint32_t field_size = field_id ? 4 : 1; + uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF; + + if (!byte) { + byte_id++; + continue; + } + + if (field_id == RTE_ACL_MAX_FIELDS) + return -1; + + cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK; + cfg->defs[field_id].size = field_size; + cfg->defs[field_id].field_index = field_id; + cfg->defs[field_id].input_index = field_id; + cfg->defs[field_id].offset = p->key_offset + byte_id; + + field_id++; + byte_id += field_size; + } + + if (!field_id) + return -1; + + cfg->num_fields = field_id; + + /* cfg->max_size. */ + cfg->max_size = 0; + + return 0; +} + +static void +acl_table_rule_field8(uint8_t *value, + uint8_t *mask, + uint8_t *key_mask0, + uint8_t *key_mask, + uint8_t *key, + uint32_t offset) +{ + uint8_t km0, km; + + km0 = key_mask0 ? key_mask0[offset] : 0xFF; + km = key_mask ? key_mask[offset] : 0xFF; + + *value = key[offset]; + *mask = km0 & km; +} + +static void +acl_table_rule_field32(uint32_t *value, + uint32_t *mask, + uint8_t *key_mask0, + uint8_t *key_mask, + uint8_t *key, + uint32_t key_size, + uint32_t offset) +{ + uint32_t km0[4], km[4], k[4]; + uint32_t byte_id; + + /* Byte 0 = MSB, byte 3 = LSB. */ + for (byte_id = 0; byte_id < 4; byte_id++) { + if (offset + byte_id >= key_size) { + km0[byte_id] = 0; + km[byte_id] = 0; + k[byte_id] = 0; + continue; + } + + km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF; + km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF; + k[byte_id] = key[offset + byte_id]; + } + + *value = (k[0] << 24) | + (k[1] << 16) | + (k[2] << 8) | + k[3]; + + *mask = ((km[0] & km0[0]) << 24) | + ((km[1] & km0[1]) << 16) | + ((km[2] & km0[2]) << 8) | + (km[3] & km0[3]); +} + +RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS); + +static struct rte_acl_rule * +acl_table_rules_get(struct rte_acl_config *acl_cfg, + struct rte_swx_table_params *p, + struct rte_swx_table_entry_list *entries, + uint32_t n_entries) +{ + struct rte_swx_table_entry *entry; + uint8_t *memory; + uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields); + uint32_t n_fields = acl_cfg->num_fields; + uint32_t rule_id; + + if (!n_entries) + return NULL; + + memory = malloc(n_entries * acl_rule_size); + if (!memory) + return NULL; + + rule_id = 0; + TAILQ_FOREACH(entry, entries, node) { + uint8_t *m = &memory[rule_id * acl_rule_size]; + struct acl_rule *acl_rule = (struct acl_rule *)m; + uint32_t field_id; + + acl_rule->data.category_mask = 1; + acl_rule->data.priority = RTE_ACL_MAX_PRIORITY - + entry->key_priority; + acl_rule->data.userdata = rule_id + 1; + + for (field_id = 0; field_id < n_fields; field_id++) { + struct rte_acl_field *f = &acl_rule->field[field_id]; + uint32_t size = acl_cfg->defs[field_id].size; + uint32_t offset = acl_cfg->defs[field_id].offset - + p->key_offset; + + if (size == 1) { + uint8_t value, mask; + + acl_table_rule_field8(&value, + &mask, + p->key_mask0, + entry->key_mask, + entry->key, + offset); + + f->value.u8 = value; + f->mask_range.u8 = mask; + } else { + uint32_t value, mask; + + acl_table_rule_field32(&value, + &mask, + p->key_mask0, + entry->key_mask, + entry->key, + p->key_size, + offset); + + f->value.u32 = value; + f->mask_range.u32 = mask; + } + } + + rule_id++; + } + + return (struct rte_acl_rule *)memory; +} + +/* When the table to be created has no rules, the expected behavior is to always + * get lookup miss for any input key. To achieve this, we add a single bogus + * rule to the table with the rule user data set to 0, i.e. the value returned + * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or + * miss, a user data of 0 is returned, which for the ACL library is equivalent + * to lookup miss. + */ +static struct rte_acl_rule * +acl_table_rules_default_get(struct rte_acl_config *acl_cfg) +{ + struct rte_acl_rule *acl_rule; + uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields); + + acl_rule = calloc(1, acl_rule_size); + if (!acl_rule) + return NULL; + + acl_rule->data.category_mask = 1; + acl_rule->data.priority = RTE_ACL_MAX_PRIORITY; + acl_rule->data.userdata = 0; + + memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule)); + + return acl_rule; +} + +static struct rte_acl_ctx * +acl_table_create(struct rte_swx_table_params *params, + struct rte_swx_table_entry_list *entries, + uint32_t n_entries, + int numa_node) +{ + struct rte_acl_param acl_params = {0}; + struct rte_acl_config acl_cfg = {0}; + struct rte_acl_ctx *acl_ctx = NULL; + struct rte_acl_rule *acl_rules = NULL; + char *name = NULL; + int status = 0; + + /* ACL config data structures. */ + name = get_unique_name(); + if (!name) { + status = -1; + goto free_resources; + } + + status = acl_table_cfg_get(&acl_cfg, params); + if (status) + goto free_resources; + + acl_rules = n_entries ? + acl_table_rules_get(&acl_cfg, params, entries, n_entries) : + acl_table_rules_default_get(&acl_cfg); + if (!acl_rules) { + status = -1; + goto free_resources; + } + + n_entries = n_entries ? n_entries : 1; + + /* ACL create. */ + acl_params.name = name; + acl_params.socket_id = numa_node; + acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields); + acl_params.max_rule_num = n_entries; + + acl_ctx = rte_acl_create(&acl_params); + if (!acl_ctx) { + status = -1; + goto free_resources; + } + + /* ACL add rules. */ + status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries); + if (status) + goto free_resources; + + /* ACL build. */ + status = rte_acl_build(acl_ctx, &acl_cfg); + +free_resources: + if (status && acl_ctx) + rte_acl_free(acl_ctx); + + free(acl_rules); + + free(name); + + return status ? NULL : acl_ctx; +} + +static void +entry_data_copy(uint8_t *data, + struct rte_swx_table_entry_list *entries, + uint32_t n_entries, + uint32_t entry_data_size) +{ + struct rte_swx_table_entry *entry; + uint32_t i = 0; + + if (!n_entries) + return; + + TAILQ_FOREACH(entry, entries, node) { + uint64_t *d = (uint64_t *)&data[i * entry_data_size]; + + d[0] = entry->action_id; + memcpy(&d[1], entry->action_data, entry_data_size - 8); + + i++; + } +} + +struct table { + struct rte_acl_ctx *acl_ctx; + uint8_t *data; + size_t total_size; + uint32_t entry_data_size; +}; + +static void +table_free(void *table) +{ + struct table *t = table; + + if (!t) + return; + + if (t->acl_ctx) + rte_acl_free(t->acl_ctx); + env_free(t, t->total_size); +} + +static void * +table_create(struct rte_swx_table_params *params, + struct rte_swx_table_entry_list *entries, + const char *args __rte_unused, + int numa_node) +{ + struct table *t = NULL; + size_t meta_sz, data_sz, total_size; + uint32_t entry_data_size; + uint32_t n_entries = count_entries(entries); + + /* Check input arguments. */ + if (!params || !params->key_size) + goto error; + + /* Memory allocation and initialization. */ + entry_data_size = 8 + params->action_data_size; + meta_sz = sizeof(struct table); + data_sz = n_entries * entry_data_size; + total_size = meta_sz + data_sz; + + t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node); + if (!t) + goto error; + + memset(t, 0, total_size); + t->entry_data_size = entry_data_size; + t->total_size = total_size; + t->data = (uint8_t *)&t[1]; + + t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node); + if (!t->acl_ctx) + goto error; + + entry_data_copy(t->data, entries, n_entries, entry_data_size); + + return t; + +error: + table_free(t); + return NULL; +} + +struct mailbox { + +}; + +static uint64_t +table_mailbox_size_get(void) +{ + return sizeof(struct mailbox); +} + +static int +table_lookup(void *table, + void *mailbox __rte_unused, + const uint8_t **key, + uint64_t *action_id, + uint8_t **action_data, + int *hit) +{ + struct table *t = table; + uint8_t *data; + uint32_t user_data; + + rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1); + if (!user_data) { + *hit = 0; + return 1; + } + + data = &t->data[(user_data - 1) * t->entry_data_size]; + *action_id = ((uint64_t *)data)[0]; + *action_data = &data[8]; + *hit = 1; + return 1; +} + +struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = { + .footprint_get = NULL, + .mailbox_size_get = table_mailbox_size_get, + .create = table_create, + .add = NULL, + .del = NULL, + .lkp = (rte_swx_table_lookup_t)table_lookup, + .free = table_free, +}; diff --git a/lib/librte_table/rte_swx_table_wm.h b/lib/librte_table/rte_swx_table_wm.h new file mode 100644 index 000000000..a716536ca --- /dev/null +++ b/lib/librte_table/rte_swx_table_wm.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Intel Corporation + */ +#ifndef __INCLUDE_RTE_SWX_TABLE_WM_H__ +#define __INCLUDE_RTE_SWX_TABLE_WM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file + * RTE SWX Wildcard Match Table + */ + +#include + +#include + +/** Wildcard match table operations. */ +extern struct rte_swx_table_ops rte_swx_table_wildcard_match_ops; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_table/version.map b/lib/librte_table/version.map index bea2252a4..eb0291ac4 100644 --- a/lib/librte_table/version.map +++ b/lib/librte_table/version.map @@ -25,4 +25,7 @@ EXPERIMENTAL { # added in 20.11 rte_swx_table_exact_match_ops; rte_swx_table_exact_match_unoptimized_ops; + + # added in 21.05 + rte_swx_table_wildcard_match_ops; };