From patchwork Mon Feb 15 16:21:49 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cristian Dumitrescu X-Patchwork-Id: 87924 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0418BA054F; Mon, 15 Feb 2021 17:22:14 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6B08E1606DC; Mon, 15 Feb 2021 17:22:00 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 8A17C406FF for ; Mon, 15 Feb 2021 17:21:56 +0100 (CET) IronPort-SDR: 5FumE9qNdI4cySNqq5vVprTVcxslrYnJXrT+59HZryOJiy7SqxbTvgh9pPIvlIZ0ygzOvT2wtC 2WM8txHRwEWQ== X-IronPort-AV: E=McAfee;i="6000,8403,9896"; a="201892848" X-IronPort-AV: E=Sophos;i="5.81,181,1610438400"; d="scan'208";a="201892848" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Feb 2021 08:21:55 -0800 IronPort-SDR: LXvpY19GTeKfmzssovl1JiT10tKIsy/91UUtuOu3HAEJ+tzWX8BY9gp9zv2mMrWg39L4CK4IRI TKrIvIPASdoQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,181,1610438400"; d="scan'208";a="383421603" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga008.fm.intel.com with ESMTP; 15 Feb 2021 08:21:54 -0800 From: Cristian Dumitrescu To: dev@dpdk.org Date: Mon, 15 Feb 2021 16:21:49 +0000 Message-Id: <20210215162151.5655-3-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210215162151.5655-1-cristian.dumitrescu@intel.com> References: <20210215162151.5655-1-cristian.dumitrescu@intel.com> Subject: [dpdk-dev] [PATCH 3/5] pipeline: support non-incremental table updates X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some table types (e.g. exact match/hash) allow for incremental table updates, while others (e.g. wildcard match/ACL) do not. The former is already supported, the latter is enabled by this patch. Signed-off-by: Cristian Dumitrescu --- lib/librte_pipeline/rte_swx_ctl.c | 258 +++++++++++++++++++++++------- 1 file changed, 197 insertions(+), 61 deletions(-) diff --git a/lib/librte_pipeline/rte_swx_ctl.c b/lib/librte_pipeline/rte_swx_ctl.c index 480e34238..20e2ac067 100644 --- a/lib/librte_pipeline/rte_swx_ctl.c +++ b/lib/librte_pipeline/rte_swx_ctl.c @@ -603,6 +603,31 @@ table_pending_default_free(struct table *table) table->pending_default = NULL; } +static int +table_is_update_pending(struct table *table, int consider_peding_default) +{ + struct rte_swx_table_entry *e; + uint32_t n = 0; + + /* Pending add. */ + TAILQ_FOREACH(e, &table->pending_add, node) + n++; + + /* Pending modify. */ + TAILQ_FOREACH(e, &table->pending_modify1, node) + n++; + + /* Pending delete. */ + TAILQ_FOREACH(e, &table->pending_delete, node) + n++; + + /* Pending default. */ + if (consider_peding_default && table->pending_default) + n++; + + return n; +} + static void table_free(struct rte_swx_ctl_pipeline *ctl) { @@ -674,7 +699,7 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl) struct rte_swx_table_state *ts_next = &ctl->ts_next[i]; /* Table object. */ - if (!table->is_stub) { + if (!table->is_stub && table->ops.add) { ts_next->obj = table->ops.create(&table->params, &table->entries, table->info.args, @@ -685,6 +710,9 @@ table_state_create(struct rte_swx_ctl_pipeline *ctl) } } + if (!table->is_stub && !table->ops.add) + ts_next->obj = ts->obj; + /* Default action data: duplicate from current table state. */ ts_next->default_action_data = malloc(table->params.action_data_size); @@ -1108,52 +1136,145 @@ rte_swx_ctl_pipeline_table_default_entry_add(struct rte_swx_ctl_pipeline *ctl, return 0; } + +static void +table_entry_list_free(struct rte_swx_table_entry_list *list) +{ + for ( ; ; ) { + struct rte_swx_table_entry *entry; + + entry = TAILQ_FIRST(list); + if (!entry) + break; + + TAILQ_REMOVE(list, entry, node); + table_entry_free(entry); + } +} + static int -table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) +table_entry_list_duplicate(struct rte_swx_ctl_pipeline *ctl, + uint32_t table_id, + struct rte_swx_table_entry_list *dst, + struct rte_swx_table_entry_list *src) +{ + struct rte_swx_table_entry *src_entry; + + TAILQ_FOREACH(src_entry, src, node) { + struct rte_swx_table_entry *dst_entry; + + dst_entry = table_entry_duplicate(ctl, table_id, src_entry, 1, 1); + if (!dst_entry) + goto error; + + TAILQ_INSERT_TAIL(dst, dst_entry, node); + } + + return 0; + +error: + table_entry_list_free(dst); + return -ENOMEM; +} + +static int +table_rollfwd0(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id, uint32_t after_swap) { struct table *table = &ctl->tables[table_id]; struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id]; - struct rte_swx_table_entry *entry; - /* Reset counters. */ - table->n_add = 0; - table->n_modify = 0; - table->n_delete = 0; + if (table->is_stub || !table_is_update_pending(table, 0)) + return 0; - /* Add pending rules. */ - TAILQ_FOREACH(entry, &table->pending_add, node) { - int status; + if (table->ops.add) { + /* Reset counters. */ + table->n_add = 0; + table->n_modify = 0; + table->n_delete = 0; - status = table->ops.add(ts_next->obj, entry); - if (status) - return status; + /* Add pending rules. */ + struct rte_swx_table_entry *entry; - table->n_add++; - } + TAILQ_FOREACH(entry, &table->pending_add, node) { + int status; - /* Modify pending rules. */ - TAILQ_FOREACH(entry, &table->pending_modify1, node) { - int status; + status = table->ops.add(ts_next->obj, entry); + if (status) + return status; - status = table->ops.add(ts_next->obj, entry); - if (status) - return status; + table->n_add++; + } - table->n_modify++; - } + /* Modify pending rules. */ + TAILQ_FOREACH(entry, &table->pending_modify1, node) { + int status; - /* Delete pending rules. */ - TAILQ_FOREACH(entry, &table->pending_delete, node) { - int status; + status = table->ops.add(ts_next->obj, entry); + if (status) + return status; - status = table->ops.del(ts_next->obj, entry); - if (status) + table->n_modify++; + } + + /* Delete pending rules. */ + TAILQ_FOREACH(entry, &table->pending_delete, node) { + int status; + + status = table->ops.del(ts_next->obj, entry); + if (status) + return status; + + table->n_delete++; + } + + return 0; + } else { + if (!after_swap) { + struct rte_swx_table_entry_list list; + int status; + + /* Create updated list of entries included. */ + TAILQ_INIT(&list); + + status = table_entry_list_duplicate(ctl, table_id, &list, &table->entries); + if (status) + goto error; + + status = table_entry_list_duplicate(ctl, table_id, &list, &table->pending_add); + if (status) + goto error; + + status = table_entry_list_duplicate(ctl, table_id, &list, &table->pending_modify1); + if (status) + goto error; + + /* Create new table object with the updates included. */ + ts_next->obj = table->ops.create(&table->params, &list, table->info.args, ctl->numa_node); + if (!ts_next->obj) { + status = -ENODEV; + goto error; + } + + table_entry_list_free(&list); + + return 0; + +error: + table_entry_list_free(&list); return status; + } else { + struct rte_swx_table_state *ts = &ctl->ts[table_id]; - table->n_delete++; - } + /* Free the old table object. */ + if (ts_next->obj && table->ops.free) + table->ops.free(ts_next->obj); - return 0; + /* Copy over the new table object. */ + ts_next->obj = ts->obj; + + return 0; + } + } } static void @@ -1211,35 +1332,50 @@ table_rollback(struct rte_swx_ctl_pipeline *ctl, uint32_t table_id) { struct table *table = &ctl->tables[table_id]; struct rte_swx_table_state *ts_next = &ctl->ts_next[table_id]; - struct rte_swx_table_entry *entry; - /* Add back all the entries that were just deleted. */ - TAILQ_FOREACH(entry, &table->pending_delete, node) { - if (!table->n_delete) - break; + if (table->is_stub || !table_is_update_pending(table, 0)) + return; - table->ops.add(ts_next->obj, entry); - table->n_delete--; - } + if (table->ops.add) { + struct rte_swx_table_entry *entry; - /* Add back the old copy for all the entries that were just - * modified. - */ - TAILQ_FOREACH(entry, &table->pending_modify0, node) { - if (!table->n_modify) - break; + /* Add back all the entries that were just deleted. */ + TAILQ_FOREACH(entry, &table->pending_delete, node) { + if (!table->n_delete) + break; - table->ops.add(ts_next->obj, entry); - table->n_modify--; - } + table->ops.add(ts_next->obj, entry); + table->n_delete--; + } - /* Delete all the entries that were just added. */ - TAILQ_FOREACH(entry, &table->pending_add, node) { - if (!table->n_add) - break; + /* Add back the old copy for all the entries that were just + * modified. + */ + TAILQ_FOREACH(entry, &table->pending_modify0, node) { + if (!table->n_modify) + break; + + table->ops.add(ts_next->obj, entry); + table->n_modify--; + } - table->ops.del(ts_next->obj, entry); - table->n_add--; + /* Delete all the entries that were just added. */ + TAILQ_FOREACH(entry, &table->pending_add, node) { + if (!table->n_add) + break; + + table->ops.del(ts_next->obj, entry); + table->n_add--; + } + } else { + struct rte_swx_table_state *ts = &ctl->ts[table_id]; + + /* Free the new table object, as update was cancelled. */ + if (ts_next->obj && table->ops.free) + table->ops.free(ts_next->obj); + + /* Reinstate the old table object. */ + ts_next->obj = ts->obj; } } @@ -1284,7 +1420,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail) * ts. */ for (i = 0; i < ctl->info.n_tables; i++) { - status = table_rollfwd0(ctl, i); + status = table_rollfwd0(ctl, i, 0); if (status) goto rollback; } @@ -1304,7 +1440,7 @@ rte_swx_ctl_pipeline_commit(struct rte_swx_ctl_pipeline *ctl, int abort_on_fail) /* Operate the changes on the current ts_next, which is the previous ts. */ for (i = 0; i < ctl->info.n_tables; i++) { - table_rollfwd0(ctl, i); + table_rollfwd0(ctl, i, 1); table_rollfwd1(ctl, i); table_rollfwd2(ctl, i); } @@ -1438,11 +1574,11 @@ rte_swx_ctl_pipeline_table_entry_read(struct rte_swx_ctl_pipeline *ctl, mask = field_hton(mask, mf->n_bits); } - /* Copy to entry. */ - if (entry->key_mask) - memcpy(&entry->key_mask[offset], - (uint8_t *)&mask, - mf->n_bits / 8); + /* Copy to entry. */ + if (entry->key_mask) + memcpy(&entry->key_mask[offset], + (uint8_t *)&mask, + mf->n_bits / 8); /* * Value.