[RFC,1/3] graph: add feature arc support
Checks
Commit Message
add feature arc to allow dynamic steering of packets across graph nodes
based on protocol features enabled on incoming or outgoing interface
Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
---
lib/graph/graph_feature_arc.c | 959 +++++++++++++++++++++++
lib/graph/meson.build | 2 +
lib/graph/rte_graph_feature_arc.h | 373 +++++++++
lib/graph/rte_graph_feature_arc_worker.h | 548 +++++++++++++
lib/graph/version.map | 17 +
5 files changed, 1899 insertions(+)
create mode 100644 lib/graph/graph_feature_arc.c
create mode 100644 lib/graph/rte_graph_feature_arc.h
create mode 100644 lib/graph/rte_graph_feature_arc_worker.h
Comments
> -----Original Message-----
> From: Nitin Saxena <nsaxena@marvell.com>
> Sent: Saturday, September 7, 2024 1:01 PM
> To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; Zhirun Yan <yanzhirun_163@163.com>
> Cc: dev@dpdk.org; Nitin Saxena <nsaxena16@gmail.com>
> Subject: [RFC PATCH 1/3] graph: add feature arc support
>
> add feature arc to allow dynamic steering of packets across graph nodes
> based on protocol features enabled on incoming or outgoing interface
>
> Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
> ---
> lib/graph/graph_feature_arc.c | 959 +++++++++++++++++++++++
> lib/graph/meson.build | 2 +
> lib/graph/rte_graph_feature_arc.h | 373 +++++++++
> lib/graph/rte_graph_feature_arc_worker.h | 548 +++++++++++++
> lib/graph/version.map | 17 +
> 5 files changed, 1899 insertions(+)
> create mode 100644 lib/graph/graph_feature_arc.c
> create mode 100644 lib/graph/rte_graph_feature_arc.h
> create mode 100644 lib/graph/rte_graph_feature_arc_worker.h
>
> diff --git a/lib/graph/graph_feature_arc.c b/lib/graph/graph_feature_arc.c
> new file mode 100644
> index 0000000000..3b05bac137
> --- /dev/null
> +++ b/lib/graph/graph_feature_arc.c
> @@ -0,0 +1,959 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2024 Marvell International Ltd.
> + */
> +
> +#include "graph_private.h"
> +#include <rte_graph_feature_arc_worker.h>
> +#include <rte_malloc.h>
> +
> +#define __RTE_GRAPH_FEATURE_ARC_MAX 32
> +
> +#define ARC_PASSIVE_LIST(arc) (arc->active_feature_list ^ 0x1)
> +
> +#define rte_graph_uint_cast(x) ((unsigned int)x)
> +#define feat_dbg graph_err
> +
> +rte_graph_feature_arc_main_t *__feature_arc_main;
> +
> +/* Make sure fast path cache line is compact */
> +_Static_assert((offsetof(struct rte_graph_feature_arc, slow_path_variables)
> + - offsetof(struct rte_graph_feature_arc, fast_path_variables))
> + <= RTE_CACHE_LINE_SIZE);
> +
> +
> +static int
> +feature_lookup(struct rte_graph_feature_arc *arc, const char *feat_name,
> + struct rte_graph_feature_node_list **ffinfo, uint32_t *slot)
> +{
> + struct rte_graph_feature_node_list *finfo = NULL;
> + const char *name;
> +
> + if (!feat_name)
> + return -1;
> +
> + if (slot)
> + *slot = 0;
> +
> + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> + RTE_VERIFY(finfo->feature_arc == arc);
> + name = rte_node_id_to_name(finfo->feature_node->id);
> + if (!strncmp(name, feat_name, RTE_GRAPH_NAMESIZE)) {
> + if (ffinfo)
> + *ffinfo = finfo;
> + return 0;
> + }
> + if (slot)
> + (*slot)++;
> + }
> + return -1;
> +}
> +
> +static int
> +feature_arc_node_info_lookup(struct rte_graph_feature_arc *arc, uint32_t
> feature_index,
> + struct rte_graph_feature_node_list **ppfinfo)
> +{
> + struct rte_graph_feature_node_list *finfo = NULL;
> + uint32_t index = 0;
> +
> + if (!ppfinfo)
> + return -1;
> +
> + *ppfinfo = NULL;
> + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> + if (index == feature_index) {
> + if (finfo->node_index == feature_index)
> + return -1;
> + *ppfinfo = finfo;
> + }
> + index++;
> + }
> + if (feature_index && (index >= feature_index))
> + return -1;
> +
> + return 0;
> +}
> +
> +static void
> +prepare_feature_arc(struct rte_graph_feature_arc *arc)
> +{
> + struct rte_graph_feature_node_list *finfo = NULL;
> + uint32_t index = 0;
> +
> + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> + finfo->node_index = index;
> + index++;
> + }
> +}
> +
> +static int
> +feature_arc_lookup(rte_graph_feature_arc_t _arc)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> + uint32_t iter;
> +
> + if (!__feature_arc_main)
> + return -1;
> +
> + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> + if (dm->feature_arcs[iter] ==
> RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> + continue;
> +
> + if (arc == (rte_graph_feature_arc_get(dm-
> >feature_arcs[iter])))
> + return 0;
> + }
> + return -1;
> +}
> +
> +static int
> +get_existing_edge(const char *arc_name, struct rte_node_register
> *parent_node,
> + struct rte_node_register *child_node, rte_edge_t *_edge)
> +{
> + char **next_edges = NULL;
> + uint32_t count, i;
> +
> + RTE_SET_USED(arc_name);
> +
> + count = rte_node_edge_get(parent_node->id, NULL);
> + next_edges = malloc(count);
> +
> + if (!next_edges)
> + return -1;
> +
> + count = rte_node_edge_get(parent_node->id, next_edges);
> + for (i = 0; i < count; i++) {
> + if (strstr(child_node->name, next_edges[i])) {
> + feat_dbg("%s: Edge exists [%s[%u]: \"%s\"]",
> arc_name,
> + parent_node->name, i, child_node->name);
> + if (_edge)
> + *_edge = (rte_edge_t)i;
> +
> + free(next_edges);
> + return 0;
> + }
> + }
> + free(next_edges);
> +
> + return -1;
> +}
> +
> +static int
> +connect_graph_nodes(struct rte_node_register *parent_node, struct
> rte_node_register *child_node,
> + rte_edge_t *_edge, char *arc_name)
> +{
> + const char *next_node = NULL;
> + rte_edge_t edge;
> +
> + if (!get_existing_edge(arc_name, parent_node, child_node, &edge)) {
> + feat_dbg("%s: add_feature: Edge reused [%s[%u]: \"%s\"]",
> arc_name,
> + parent_node->name, edge, child_node->name);
> +
> + if (_edge)
> + *_edge = edge;
> +
> + return 0;
> + }
> +
> + /* Node to be added */
> + next_node = child_node->name;
> +
> + edge = rte_node_edge_update(parent_node->id,
> RTE_EDGE_ID_INVALID, &next_node, 1);
> +
> + if (edge == RTE_EDGE_ID_INVALID) {
> + graph_err("edge invalid");
> + return -1;
> + }
> + edge = rte_node_edge_count(parent_node->id) - 1;
> +
> + feat_dbg("%s: add_feature: edge added [%s[%u]: \"%s\"]", arc_name,
> parent_node->name, edge,
> + child_node->name);
> +
> + if (_edge)
> + *_edge = edge;
> +
> + return 0;
> +}
> +
> +static int
> +feature_arc_init(rte_graph_feature_arc_main_t **pfl, uint32_t
> max_feature_arcs)
> +{
> + rte_graph_feature_arc_main_t *pm = NULL;
> + uint32_t i;
> + size_t sz;
> +
> + if (!pfl)
> + return -1;
> +
> + sz = sizeof(rte_graph_feature_arc_main_t) +
> + (sizeof(pm->feature_arcs[0]) * max_feature_arcs);
> +
> + pm = malloc(sz);
> + if (!pm)
> + return -1;
> +
> + memset(pm, 0, sz);
> +
> + for (i = 0; i < max_feature_arcs; i++)
> + pm->feature_arcs[i] =
> RTE_GRAPH_FEATURE_ARC_INITIALIZER;
> +
> + pm->max_feature_arcs = max_feature_arcs;
> +
> + *pfl = pm;
> +
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_arc_init(int max_feature_arcs)
> +{
> + if (!max_feature_arcs)
> + return -1;
> +
> + if (__feature_arc_main)
> + return -1;
> +
> + return feature_arc_init(&__feature_arc_main, max_feature_arcs);
> +}
> +
> +static void
> +feature_arc_list_reset(struct rte_graph_feature_arc *arc, uint32_t list_index)
> +{
> + rte_graph_feature_data_t *fdata = NULL;
> + rte_graph_feature_list_t *list = NULL;
> + struct rte_graph_feature *feat = NULL;
> + uint32_t i, j;
> +
> + list = arc->feature_list[list_index];
> + feat = arc->features[list_index];
> +
> + /*Initialize variables*/
> + memset(feat, 0, arc->feature_size);
> + memset(list, 0, arc->feature_list_size);
> +
> + /* Initialize feature and feature_data */
> + for (i = 0; i < arc->max_features; i++) {
> + feat = __rte_graph_feature_get(arc, i, list_index);
> + feat->this_feature_index = i;
> +
> + for (j = 0; j < arc->max_indexes; j++) {
> + fdata = rte_graph_feature_data_get(arc, feat, j);
> + fdata->next_enabled_feature =
> RTE_GRAPH_FEATURE_INVALID;
> + fdata->next_edge = UINT16_MAX;
> + fdata->user_data = UINT32_MAX;
> + }
> + }
> +
> + for (i = 0; i < arc->max_indexes; i++)
> + list->first_enabled_feature_by_index[i] =
> RTE_GRAPH_FEATURE_INVALID;
> +}
> +
> +static int
> +feature_arc_list_init(struct rte_graph_feature_arc *arc, const char
> *flist_name,
> + rte_graph_feature_list_t **pplist,
> + struct rte_graph_feature **ppfeature, uint32_t
> list_index)
> +{
> + char fname[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
> + size_t list_size, feat_size, fdata_size;
> + rte_graph_feature_list_t *list = NULL;
> + struct rte_graph_feature *feat = NULL;
> +
> + list_size = sizeof(list->first_enabled_feature_by_index[0]) * arc-
> >max_indexes;
> +
> + list = rte_malloc(flist_name, list_size, RTE_CACHE_LINE_SIZE);
> + if (!list)
> + return -ENOMEM;
> +
> + fdata_size = arc->max_indexes * sizeof(rte_graph_feature_data_t);
> +
> + /* Let one feature capture complete cache lines */
> + feat_size = RTE_ALIGN_CEIL(sizeof(struct rte_graph_feature) +
> fdata_size,
> + RTE_CACHE_LINE_SIZE);
> +
> + snprintf(fname, sizeof(fname), "%s-%s", arc->feature_arc_name,
> "feat");
> +
> + feat = rte_malloc(fname, feat_size * arc->max_features,
> RTE_CACHE_LINE_SIZE);
> + if (!feat) {
> + rte_free(list);
> + return -ENOMEM;
> + }
> + arc->feature_size = feat_size;
> + arc->feature_data_size = fdata_size;
> + arc->feature_list_size = list_size;
> +
> + /* Initialize list */
> + list->indexed_by_features = feat;
> + *pplist = list;
> + *ppfeature = feat;
> +
> + feature_arc_list_reset(arc, list_index);
> +
> + return 0;
> +}
> +
> +static void
> +feature_arc_list_destroy(rte_graph_feature_list_t *list)
> +{
> + rte_free(list->indexed_by_features);
Do you need to free individual rte_graph_feature here, that is allocated in arc_list_init?
> + rte_free(list);
> +}
> +
> +int
> +rte_graph_feature_arc_create(const char *feature_arc_name, int
> max_features, int max_indexes,
> + struct rte_node_register *start_node,
> rte_graph_feature_arc_t *_arc)
> +{
> + char name[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
> + rte_graph_feature_arc_main_t *dfm = NULL;
> + struct rte_graph_feature_arc *arc = NULL;
> + struct rte_graph_feature_data *gfd = NULL;
> + struct rte_graph_feature *df = NULL;
> + uint32_t iter, j, arc_index;
> + size_t sz;
> +
> + if (!_arc)
> + return -1;
> +
> + if (max_features < 2)
> + return -1;
> +
> + if (!start_node)
> + return -1;
> +
> + if (!feature_arc_name)
> + return -1;
> +
> + if (max_features > RTE_GRAPH_FEATURE_MAX_PER_ARC) {
> + graph_err("Invalid max features: %u", max_features);
> + return -1;
> + }
> +
> + /*
> + * Application hasn't called rte_graph_feature_arc_init(). Initialize with
> + * default values
> + */
> + if (!__feature_arc_main) {
> + if
> (rte_graph_feature_arc_init((int)__RTE_GRAPH_FEATURE_ARC_MAX) < 0) {
> + graph_err("rte_graph_feature_arc_init() failed");
> + return -1;
> + }
> + }
> +
> + dfm = __feature_arc_main;
> +
> + /* threshold check */
> + if (dfm->num_feature_arcs > (dfm->max_feature_arcs - 1)) {
> + graph_err("max threshold for num_feature_arcs: %d
> reached",
> + dfm->max_feature_arcs - 1);
> + return -1;
> + }
> + /* Find the free slot for feature arc */
> + for (iter = 0; iter < dfm->max_feature_arcs; iter++) {
> + if (dfm->feature_arcs[iter] ==
> RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> + break;
> + }
> + arc_index = iter;
> +
> + if (arc_index >= dfm->max_feature_arcs) {
> + graph_err("No free slot found for num_feature_arc");
> + return -1;
> + }
> +
> + /* This should not happen */
> + RTE_VERIFY(dfm->feature_arcs[arc_index] ==
> RTE_GRAPH_FEATURE_ARC_INITIALIZER);
> +
> + /* size of feature arc + feature_bit_mask_by_index */
> + sz = sizeof(*arc) + (sizeof(uint64_t) * max_indexes);
> +
> + arc = rte_malloc(feature_arc_name, sz, RTE_CACHE_LINE_SIZE);
> +
> + if (!arc) {
> + graph_err("malloc failed for feature_arc_create()");
> + return -1;
> + }
> +
> + memset(arc, 0, sz);
> +
> + /* Initialize rte_graph port group fixed variables */
> + STAILQ_INIT(&arc->all_features);
> + strncpy(arc->feature_arc_name, feature_arc_name,
> RTE_GRAPH_FEATURE_ARC_NAMELEN - 1);
> + arc->feature_arc_main = (void *)dfm;
> + arc->start_node = start_node;
> + arc->max_features = max_features;
> + arc->max_indexes = max_indexes;
> +
> + snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist0");
> +
> + if (feature_arc_list_init(arc, name, &arc->feature_list[0], &arc-
> >features[0], 0) < 0) {
> + rte_free(arc);
> + graph_err("feature_arc_list_init(0) failed");
> + return -1;
> + }
> + snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist1");
> +
> + if (feature_arc_list_init(arc, name, &arc->feature_list[1], &arc-
> >features[1], 1) < 0) {
> + feature_arc_list_destroy(arc->feature_list[0]);
> + graph_err("feature_arc_list_init(1) failed");
> + return -1;
> + }
> +
> + for (iter = 0; iter < arc->max_features; iter++) {
> + df = rte_graph_feature_get(arc, iter);
> + for (j = 0; j < arc->max_indexes; j++) {
> + gfd = rte_graph_feature_data_get(arc, df, j);
> + gfd->next_enabled_feature =
> RTE_GRAPH_FEATURE_INVALID;
> + }
> + }
> + arc->feature_arc_index = arc_index;
> + dfm->feature_arcs[arc->feature_arc_index] =
> (rte_graph_feature_arc_t)arc;
> + dfm->num_feature_arcs++;
> +
> + if (_arc)
> + *_arc = (rte_graph_feature_arc_t)arc;
> +
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct
> rte_node_register *feature_node,
> + const char *after_feature, const char *before_feature)
> +{
> + struct rte_graph_feature_node_list *after_finfo = NULL, *before_finfo
> = NULL;
> + struct rte_graph_feature_node_list *temp = NULL, *finfo = NULL;
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + uint32_t slot, add_flag;
> + rte_edge_t edge = -1;
> +
> + RTE_VERIFY(arc->feature_arc_main == __feature_arc_main);
> +
> + if (feature_arc_lookup(_arc)) {
> + graph_err("invalid feature arc: 0x%016" PRIx64,
> (uint64_t)_arc);
> + return -1;
> + }
> +
> + if (arc->runtime_enabled_features) {
> + graph_err("adding features after enabling any one of them is
> not supported");
> + return -1;
> + }
> +
> + if ((after_feature != NULL) && (before_feature != NULL) &&
> + (after_feature == before_feature)) {
> + graph_err("after_feature and before_feature are same
> '%s:%s]", after_feature,
> + before_feature);
> + return -1;
> + }
> +
> + if (!feature_node) {
> + graph_err("feature_node: %p invalid", feature_node);
> + return -1;
> + }
> +
> + arc = rte_graph_feature_arc_get(_arc);
> +
> + if (feature_node->id == RTE_NODE_ID_INVALID) {
> + graph_err("Invalid node: %s", feature_node->name);
> + return -1;
> + }
> +
> + if (!feature_lookup(arc, feature_node->name, &finfo, &slot)) {
> + graph_err("%s feature already added", feature_node->name);
> + return -1;
> + }
> +
> + if (slot >= RTE_GRAPH_FEATURE_MAX_PER_ARC) {
> + graph_err("Max slot %u reached for feature addition", slot);
> + return -1;
> + }
> +
> + if (strstr(feature_node->name, arc->start_node->name)) {
> + graph_err("Feature %s cannot point to itself: %s",
> feature_node->name,
> + arc->start_node->name);
> + return -1;
> + }
> +
> + if (connect_graph_nodes(arc->start_node, feature_node, &edge, arc-
> >feature_arc_name)) {
> + graph_err("unable to connect %s -> %s", arc->start_node-
> >name, feature_node->name);
> + return -1;
> + }
> +
> + finfo = malloc(sizeof(*finfo));
> + if (!finfo)
> + return -1;
> +
> + memset(finfo, 0, sizeof(*finfo));
> +
> + finfo->feature_arc = (void *)arc;
> + finfo->feature_node = feature_node;
> + finfo->edge_to_this_feature = edge;
> +
> + /* Check for before and after constraints */
> + if (before_feature) {
> + /* before_feature sanity */
> + if (feature_lookup(arc, before_feature, &before_finfo, NULL))
> + SET_ERR_JMP(EINVAL, finfo_free,
> + "Invalid before feature name: %s",
> before_feature);
> +
> + if (!before_finfo)
> + SET_ERR_JMP(EINVAL, finfo_free,
> + "before_feature %s does not exist",
> before_feature);
> +
> + /*
> + * Starting from 0 to before_feature, continue connecting
> edges
> + */
> + add_flag = 1;
> + STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
> + /*
> + * As soon as we see before_feature. stop adding
> edges
> + */
> + if (!strncmp(temp->feature_node->name,
> before_feature,
> + RTE_GRAPH_NAMESIZE))
> + if (!connect_graph_nodes(finfo-
> >feature_node, temp->feature_node,
> + &edge, arc-
> >feature_arc_name))
> + add_flag = 0;
> +
> + if (add_flag)
> + connect_graph_nodes(temp->feature_node,
> finfo->feature_node, NULL,
> + arc->feature_arc_name);
> + }
> + }
> +
> + if (after_feature) {
> + if (feature_lookup(arc, after_feature, &after_finfo, NULL))
> + SET_ERR_JMP(EINVAL, finfo_free,
> + "Invalid after feature_name %s",
> after_feature);
> +
> + if (!after_finfo)
> + SET_ERR_JMP(EINVAL, finfo_free,
> + "after_feature %s does not exist",
> after_feature);
> +
> + /* Starting from after_feature to end continue connecting
> edges */
> + add_flag = 0;
> + STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
> + /* We have already seen after_feature now */
> + if (add_flag)
> + /* Add all features as next node to current
> feature*/
> + connect_graph_nodes(finfo->feature_node,
> temp->feature_node, NULL,
> + arc->feature_arc_name);
> +
> + /* as soon as we see after_feature. start adding edges
> + * from next iteration
> + */
> + if (!strncmp(temp->feature_node->name,
> after_feature, RTE_GRAPH_NAMESIZE))
> + /* connect after_feature to this feature */
> + if (!connect_graph_nodes(temp-
> >feature_node, finfo->feature_node,
> + &edge, arc-
> >feature_arc_name))
> + add_flag = 1;
> + }
> +
> + /* add feature next to after_feature */
> + STAILQ_INSERT_AFTER(&arc->all_features, after_finfo, finfo,
> next_feature);
> + } else {
> + if (before_finfo) {
> + after_finfo = NULL;
> + STAILQ_FOREACH(temp, &arc->all_features,
> next_feature) {
> + if (before_finfo == temp) {
> + if (after_finfo)
> + STAILQ_INSERT_AFTER(&arc-
> >all_features, after_finfo,
> + finfo,
> next_feature);
> + else
> + STAILQ_INSERT_HEAD(&arc-
> >all_features, finfo,
> +
> next_feature);
> +
> + return 0;
> + }
> + after_finfo = temp;
> + }
> + } else {
> + STAILQ_INSERT_TAIL(&arc->all_features, finfo,
> next_feature);
> + }
> + }
> +
> + return 0;
> +
> +finfo_free:
> + free(finfo);
> +
> + return -1;
> +}
> +
> +int
> +rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char
> *feature_name,
> + rte_graph_feature_t *feat)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + struct rte_graph_feature_node_list *finfo = NULL;
> + uint32_t slot;
> +
> + if (!feature_lookup(arc, feature_name, &finfo, &slot)) {
> + *feat = (rte_graph_feature_t) slot;
> + return 0;
> + }
> +
> + return -1;
> +}
> +
> +int
> +rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t index,
> const char *feature_name,
> + int is_enable_disable)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + struct rte_graph_feature_node_list *finfo = NULL;
> + struct rte_graph_feature *gf = NULL;
> + uint32_t slot;
> +
> + /* validate _arc */
> + if (arc->feature_arc_main != __feature_arc_main) {
> + graph_err("invalid feature arc: 0x%016" PRIx64,
> (uint64_t)_arc);
> + return -EINVAL;
> + }
> +
> + /* validate index */
> + if (index >= arc->max_indexes) {
> + graph_err("%s: Invalid provided index: %u >= %u configured",
> arc->feature_arc_name,
> + index, arc->max_indexes);
> + return -1;
> + }
> +
> + /* validate feature_name is already added or not */
> + if (feature_lookup(arc, feature_name, &finfo, &slot)) {
> + graph_err("%s: No feature %s added", arc-
> >feature_arc_name, feature_name);
> + return -EINVAL;
> + }
> +
> + if (!finfo) {
> + graph_err("%s: No feature: %s found", arc-
> >feature_arc_name, feature_name);
> + return -EINVAL;
> + }
> +
> + /* slot should be in valid range */
> + if (slot >= arc->max_features) {
> + graph_err("%s/%s: Invalid free slot %u(max=%u) for feature",
> arc->feature_arc_name,
> + feature_name, slot, arc->max_features);
> + return -EINVAL;
> + }
> +
> + /* slot should be in range of 0 - 63 */
> + if (slot > (RTE_GRAPH_FEATURE_MAX_PER_ARC - 1)) {
> + graph_err("%s/%s: Invalid slot: %u", arc->feature_arc_name,
> + feature_name, slot);
> + return -EINVAL;
> + }
> +
> + if (finfo->node_index != slot) {
> + graph_err("%s/%s: feature lookup slot mismatch with finfo
> index: %u and lookup slot: %u",
> + arc->feature_arc_name, feature_name, finfo-
> >node_index, slot);
> + return -1;
> + }
> +
> + /* Get feature from active list */
> + gf = __rte_graph_feature_get(arc, slot, ARC_PASSIVE_LIST(arc));
> + if (gf->this_feature_index != slot) {
> + graph_err("%s: %s received feature_index: %u does not match
> with saved feature_index: %u",
> + arc->feature_arc_name, feature_name, slot, gf-
> >this_feature_index);
> + return -1;
> + }
> +
> + if (is_enable_disable && (arc->feature_bit_mask_by_index[index] &
> + RTE_BIT64(slot))) {
> + graph_err("%s: %s already enabled on index: %u",
> + arc->feature_arc_name, feature_name, index);
> + return -1;
> + }
> +
> + if (!is_enable_disable && !arc->runtime_enabled_features) {
> + graph_err("%s: No feature enabled to disable", arc-
> >feature_arc_name);
> + return -1;
> + }
> +
> + if (!is_enable_disable && !(arc->feature_bit_mask_by_index[index] &
> RTE_BIT64(slot))) {
> + graph_err("%s: %s not enabled in bitmask for index: %u",
> + arc->feature_arc_name, feature_name, index);
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> +static void
> +copy_fastpath_user_data(struct rte_graph_feature_arc *arc, uint16_t
> dest_list_index,
> + uint16_t src_list_index)
> +{
> + rte_graph_feature_data_t *sgfd = NULL, *dgfd = NULL;
> + struct rte_graph_feature *sgf = NULL, *dgf = NULL;
> + uint32_t i, j;
> +
> + for (i = 0; i < arc->max_features; i++) {
> + sgf = __rte_graph_feature_get(arc, i, src_list_index);
> + dgf = __rte_graph_feature_get(arc, i, dest_list_index);
> + for (j = 0; j < arc->max_indexes; j++) {
> + sgfd = rte_graph_feature_data_get(arc, sgf, j);
> + dgfd = rte_graph_feature_data_get(arc, dgf, j);
> + dgfd->user_data = sgfd->user_data;
> + }
> + }
> +}
> +
> +static void
> +refill_feature_fastpath_data(struct rte_graph_feature_arc *arc, uint16_t
> list_index)
> +{
> + struct rte_graph_feature_node_list *finfo = NULL, *prev_finfo = NULL;
> + struct rte_graph_feature_data *gfd = NULL, *prev_gfd = NULL;
> + struct rte_graph_feature *gf = NULL, *prev_gf = NULL;
> + rte_graph_feature_list_t *flist = NULL;
> + uint32_t fi, di, prev_fi;
> + uint64_t bitmask;
> + rte_edge_t edge;
> +
> + flist = arc->feature_list[list_index];
> +
> + for (di = 0; di < arc->max_indexes; di++) {
> + bitmask = arc->feature_bit_mask_by_index[di];
> + prev_fi = RTE_GRAPH_FEATURE_INVALID;
> + /* for each feature set for index, set fast path data */
> + while (rte_bsf64_safe(bitmask, &fi)) {
> + gf = __rte_graph_feature_get(arc, fi, list_index);
> + gfd = rte_graph_feature_data_get(arc, gf, di);
> + feature_arc_node_info_lookup(arc, fi, &finfo);
> +
> + /* If previous feature_index was valid in last loop */
> + if (prev_fi != RTE_GRAPH_FEATURE_INVALID) {
> + prev_gf = __rte_graph_feature_get(arc,
> prev_fi, list_index);
> + prev_gfd = rte_graph_feature_data_get(arc,
> prev_gf, di);
> + /*
> + * Get edge of previous feature node
> connecting to this feature node
> + */
> + feature_arc_node_info_lookup(arc, prev_fi,
> &prev_finfo);
> + if (!get_existing_edge(arc->feature_arc_name,
> + prev_finfo->feature_node,
> + finfo->feature_node,
> &edge)) {
> + feat_dbg("[%s/%s(%2u)/idx:%2u]:
> %s[%u] = %s",
> + arc->feature_arc_name,
> + prev_finfo->feature_node-
> >name, prev_fi, di,
> + prev_finfo->feature_node-
> >name,
> + edge, finfo->feature_node-
> >name);
> + /* Copy feature index for next
> iteration*/
> + gfd->next_edge = edge;
> + prev_fi = fi;
> + /*
> + * Fill current feature as next enabled
> + * feature to previous one
> + */
> + prev_gfd->next_enabled_feature = fi;
> + } else {
> + /* Should not fail */
> + RTE_VERIFY(0);
> + }
> + }
> + /* On first feature edge of the node to be added */
> + if (fi == rte_bsf64(arc-
> >feature_bit_mask_by_index[di])) {
> + if (!get_existing_edge(arc->feature_arc_name,
> arc->start_node,
> + finfo->feature_node,
> + &edge)) {
> + feat_dbg("[%s/%s/%2u/idx:%2u]: 1st
> feat %s[%u] = %s",
> + arc->feature_arc_name,
> + arc->start_node->name, fi, di,
> + arc->start_node->name,
> edge,
> + finfo->feature_node->name);
> + /* Copy feature index for next
> iteration*/
> + gfd->next_edge = edge;
> + prev_fi = fi;
> + /* Set first feature set array for
> index*/
> + flist-
> >first_enabled_feature_by_index[di] = fi;
> + } else {
> + /* Should not fail */
> + RTE_VERIFY(0);
> + }
> + }
> + /* Clear current feature index */
> + bitmask &= ~RTE_BIT64(fi);
> + }
> + }
> +}
> +
> +int
> +rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t index,
> const
> + char *feature_name, int32_t user_data)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + struct rte_graph_feature_node_list *finfo = NULL;
> + struct rte_graph_feature_data *gfd = NULL;
> + rte_graph_feature_rt_list_t passive_list;
> + struct rte_graph_feature *gf = NULL;
> + uint64_t fp_bitmask;
> + uint32_t slot;
> +
> + if (rte_graph_feature_validate(_arc, index, feature_name, 1))
> + return -1;
> +
> + /** This should not fail as validate() has passed */
> + if (feature_lookup(arc, feature_name, &finfo, &slot))
> + RTE_VERIFY(0);
> +
> + if (!arc->runtime_enabled_features)
> + prepare_feature_arc(arc);
> +
> + passive_list = ARC_PASSIVE_LIST(arc);
> +
> + gf = __rte_graph_feature_get(arc, slot, passive_list);
> + gfd = rte_graph_feature_data_get(arc, gf, index);
> +
> + feat_dbg("%s/%s: Enabling feature on list: %u for index: %u at feature
> slot %u",
> + arc->feature_arc_name, feature_name, passive_list, index,
> slot);
> +
> + /* Reset feature list */
> + feature_arc_list_reset(arc, passive_list);
> +
> + /* Copy user-data */
> + copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
> +
> + /* Set current user-data */
> + gfd->user_data = user_data;
> +
> + /* Set bitmask in control path bitmask */
> + rte_bit_relaxed_set64(rte_graph_uint_cast(slot), &arc-
> >feature_bit_mask_by_index[index]);
> + refill_feature_fastpath_data(arc, passive_list);
> +
> + /* Set fast path enable bitmask */
> + fp_bitmask = __atomic_load_n(&arc-
> >feature_enable_bitmask[passive_list], __ATOMIC_RELAXED);
> + fp_bitmask |= RTE_BIT64(slot);
> + __atomic_store(&arc->feature_enable_bitmask[passive_list],
> &fp_bitmask, __ATOMIC_RELAXED);
> +
> + /* Slow path updates */
> + arc->runtime_enabled_features++;
> +
> + /* Increase feature node info reference count */
> + finfo->ref_count++;
> +
> + /* Store release semantics for active_list update */
> + __atomic_store(&arc->active_feature_list, &passive_list,
> __ATOMIC_RELEASE);
> +
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t index,
> const char *feature_name)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + struct rte_graph_feature_data *gfd = NULL;
> + struct rte_graph_feature_node_list *finfo = NULL;
> + rte_graph_feature_rt_list_t passive_list;
> + struct rte_graph_feature *gf = NULL;
> + uint32_t slot;
> +
> + if (rte_graph_feature_validate(_arc, index, feature_name, 0))
> + return -1;
> +
> + if (feature_lookup(arc, feature_name, &finfo, &slot))
> + return -1;
> +
> + passive_list = ARC_PASSIVE_LIST(arc);
> +
> + gf = __rte_graph_feature_get(arc, slot, passive_list);
> + gfd = rte_graph_feature_data_get(arc, gf, index);
> +
> + feat_dbg("%s/%s: Disabling feature for index: %u at feature slot %u",
> arc->feature_arc_name,
> + feature_name, index, slot);
> +
> + rte_bit_relaxed_clear64(rte_graph_uint_cast(slot), &arc-
> >feature_bit_mask_by_index[index]);
> +
> + /* Set fast path enable bitmask */
> + arc->feature_enable_bitmask[passive_list] &= ~(RTE_BIT64(slot));
> +
> + /* Reset feature list */
> + feature_arc_list_reset(arc, passive_list);
> +
> + /* Copy user-data */
> + copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
> +
> + /* Reset current user-data */
> + gfd->user_data = ~0;
> +
> + refill_feature_fastpath_data(arc, passive_list);
> +
> + finfo->ref_count--;
> + arc->runtime_enabled_features--;
> +
> + /* Store release semantics for active_list update */
> + __atomic_store(&arc->active_feature_list, &passive_list,
> __ATOMIC_RELEASE);
> +
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> + struct rte_graph_feature_node_list *node_info = NULL;
> +
> + while (!STAILQ_EMPTY(&arc->all_features)) {
> + node_info = STAILQ_FIRST(&arc->all_features);
> + STAILQ_REMOVE_HEAD(&arc->all_features, next_feature);
> + free(node_info);
> + }
> + feature_arc_list_destroy(arc->feature_list[0]);
> + feature_arc_list_destroy(arc->feature_list[1]);
> + rte_free(arc->features[0]);
> + rte_free(arc->features[1]);
> +
> + dm->feature_arcs[arc->feature_arc_index] =
> RTE_GRAPH_FEATURE_ARC_INITIALIZER;
> +
> + rte_free(arc);
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_arc_cleanup(void)
> +{
> + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> + uint32_t iter;
> +
> + if (!__feature_arc_main)
> + return -1;
> +
> + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> + if (dm->feature_arcs[iter] ==
> RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> + continue;
> +
> + rte_graph_feature_arc_destroy((rte_graph_feature_arc_t)dm-
> >feature_arcs[iter]);
> + }
> + free(dm);
> +
> + __feature_arc_main = NULL;
> +
> + return 0;
> +}
> +
> +int
> +rte_graph_feature_arc_lookup_by_name(const char *arc_name,
> rte_graph_feature_arc_t *_arc)
> +{
> + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> + struct rte_graph_feature_arc *arc = NULL;
> + uint32_t iter;
> +
> + if (!__feature_arc_main)
> + return -1;
> +
> + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> + if (dm->feature_arcs[iter] ==
> RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> + continue;
> +
> + arc = rte_graph_feature_arc_get(dm->feature_arcs[iter]);
> +
> + if (strstr(arc_name, arc->feature_arc_name)) {
> + if (_arc)
> + *_arc = (rte_graph_feature_arc_t)arc;
> + return 0;
> + }
> + }
> +
> + return -1;
> +}
> +
> +int
> +rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t
> _arc)
> +{
> + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> +
> + return arc->runtime_enabled_features;
> +}
> +
> +
> diff --git a/lib/graph/meson.build b/lib/graph/meson.build
> index 0cb15442ab..d916176fb7 100644
> --- a/lib/graph/meson.build
> +++ b/lib/graph/meson.build
> @@ -14,11 +14,13 @@ sources = files(
> 'graph_debug.c',
> 'graph_stats.c',
> 'graph_populate.c',
> + 'graph_feature_arc.c',
> 'graph_pcap.c',
> 'rte_graph_worker.c',
> 'rte_graph_model_mcore_dispatch.c',
> )
> headers = files('rte_graph.h', 'rte_graph_worker.h')
> +headers += files('rte_graph_feature_arc.h', 'rte_graph_feature_arc_worker.h')
> indirect_headers += files(
> 'rte_graph_model_mcore_dispatch.h',
> 'rte_graph_model_rtc.h',
> diff --git a/lib/graph/rte_graph_feature_arc.h
> b/lib/graph/rte_graph_feature_arc.h
> new file mode 100644
> index 0000000000..e3bf4eb73d
> --- /dev/null
> +++ b/lib/graph/rte_graph_feature_arc.h
> @@ -0,0 +1,373 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2024 Marvell International Ltd.
> + */
> +
> +#ifndef _RTE_GRAPH_FEATURE_ARC_H_
> +#define _RTE_GRAPH_FEATURE_ARC_H_
> +
> +#include <assert.h>
> +#include <errno.h>
> +#include <signal.h>
> +#include <stddef.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#include <rte_debug.h>
> +#include <rte_graph.h>
> +#include <rte_graph_worker.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @file
> + *
> + * rte_graph_feature_arc.h
> + *
> + * Define APIs and structures/variables with respect to feature arc
> + *
> + * - Feature arc(s)
> + * - Feature(s)
> + *
> + * A feature arc represents an ordered list of features/protocol-nodes at a
> + * given networking layer. Feature arc provides a high level abstraction to
> + * connect various *rte_graph* nodes, designated as *feature nodes*, and
> + * allowing steering of packets across these feature nodes fast path
> processing
> + * in a generic manner. In a typical network stack, often a protocol or feature
> + * must be first enabled on a given interface, before any packet is steered
> + * towards it for feature processing. For eg: incoming IPv4 packets are sent to
> + * routing sub-system only after a valid IPv4 address is assigned to the
> + * received interface. In other words, often packets needs to be steered across
> + * features not based on the packet content but based on whether a feature is
> + * enable or disable on a given incoming/outgoing interface. Feature arc
> + * provides mechanism to enable/disable feature(s) on each interface at
> runtime
> + * and allow seamless packet steering across runtime enabled feature nodes
> in
> + * fast path.
> + *
> + * Feature arc also provides a way to steer packets from standard nodes to
> + * custom/user-defined *feature nodes* without any change in standard
> node's
> + * fast path functions
> + *
> + * On a given interface multiple feature(s) might be enabled in a particular
> + * feature arc. For instance, both "ipv4-output" and "IPsec policy output"
> + * features may be enabled on "eth0" interface in "L3-output" feature arc.
> + * Similarly, "ipv6-output" and "ipsec-output" may be enabled on "eth1"
> + * interface in same "L3-output" feature arc.
> + *
> + * When multiple features are present in a given feature arc, its imperative
> + * to allow each feature processing in a particular sequential order. For
> + * instance, in "L3-input" feature arc it may be required to run "IPsec
> + * input" feature first, for packet decryption, before "ip-lookup". So a
> + * sequential order must be maintained among features present in a feature
> arc.
> + *
> + * Features are enabled/disabled multiple times at runtime to some or all
> + * available interfaces present in the system. Features can be
> enabled/disabled
> + * even after @b rte_graph_create() is called. Enable/disabling features on
> one
> + * interface is independent of other interface.
> + *
> + * A given feature might consume packet (if it's configured to consume) or
> may
> + * forward it to next enabled feature. For instance, "IPsec input" feature may
> + * consume/drop all packets with "Protect" policy action while all packets with
> + * policy action as "Bypass" may be forwarded to next enabled feature (with
> in
> + * same feature arc)
> + *
> + * This library facilitates rte graph based applications to steer packets in
> + * fast path to different feature nodes with-in a feature arc and support all
> + * functionalities described above
> + *
> + * In order to use feature-arc APIs, applications needs to do following in
> + * control path:
> + * - Initialize feature arc library via rte_graph_feature_arc_init()
> + * - Create feature arc via rte_graph_feature_arc_create()
> + * - *Before calling rte_graph_create()*, features must be added to feature-
> arc
> + * via rte_graph_feature_add(). rte_graph_feature_add() allows adding
> + * features in a sequential order with "runs_after" and "runs_before"
> + * constraints.
> + * - Post rte_graph_create(), features can be enabled/disabled at runtime on
> + * any interface via rte_graph_feature_enable()/rte_graph_feature_disable()
> + * - Feature arc can be destroyed via rte_graph_feature_arc_destroy()
> + *
> + * In fast path, APIs are provided to steer packets towards feature path from
> + * - start_node (provided as an argument to rte_graph_feature_arc_create())
> + * - feature nodes (which are added via rte_graph_feature_add())
> + *
> + * For typical steering of packets across feature nodes, application required
> + * to know "rte_edges" which are saved in feature data object. Feature data
> + * object is unique for every interface per feature with in a feature arc.
> + *
> + * When steering packets from start_node to feature node:
> + * - rte_graph_feature_arc_first_feature_get() provides first enabled feature.
> + * - Next rte_edge from start_node to first enabled feature can be obtained
> via
> + * rte_graph_feature_arc_feature_set()
> + *
> + * rte_mbuf can carry [current feature, index] from start_node of an arc to
> other
> + * feature nodes
> + *
> + * In feature node, application can get 32-bit user_data
> + * via_rte_graph_feature_user_data_get() which is provided in
> + * rte_graph_feature_enable(). User data can hold feature specific cookie like
> + * IPsec policy database index (if more than one are supported)
> + *
> + * If feature node is not consuming packet, next enabled feature and next
> + * rte_edge can be obtained via rte_graph_feature_arc_next_feature_get()
> + *
> + * It is application responsibility to ensure that at-least *last feature*(or sink
> + * feature) must be enabled from where packet can exit feature-arc path, if
> + * *NO* intermediate feature is consuming the packet and it has reached till
> + * the end of feature arc path
> + *
> + * Synchronization among cores
> + * ---------------------------
> + * Subsequent calls to rte_graph_feature_enable() is allowed while worker
> cores
> + * are processing in rte_graph_walk() loop. However, for
> + * rte_graph_feature_disable() application must use RCU based
> synchronization
> + */
> +
> +/**< Initializer value for rte_graph_feature_arc_t */
> +#define RTE_GRAPH_FEATURE_ARC_INITIALIZER
> ((rte_graph_feature_arc_t)UINT64_MAX)
> +
> +/** Max number of features supported in a given feature arc */
> +#define RTE_GRAPH_FEATURE_MAX_PER_ARC 64
> +
> +/** Length of feature arc name */
> +#define RTE_GRAPH_FEATURE_ARC_NAMELEN RTE_NODE_NAMESIZE
> +
> +/** @internal */
> +#define rte_graph_feature_cast(x) ((rte_graph_feature_t)x)
> +
> +/**< Initializer value for rte_graph_feature_arc_t */
> +#define RTE_GRAPH_FEATURE_INVALID
> rte_graph_feature_cast(UINT8_MAX)
> +
> +/** rte_graph feature arc object */
> +typedef uint64_t rte_graph_feature_arc_t;
> +
> +/** rte_graph feature object */
> +typedef uint8_t rte_graph_feature_t;
> +
> +/** runtime active feature list index with in feature arc*/
> +typedef uint8_t rte_graph_feature_rt_list_t;
> +
> +/** per feature arc monotonically increasing counter to synchronize fast path
> APIs */
> +typedef uint16_t rte_graph_feature_counter_t;
> +
> +/**
> + * Initialize feature arc subsystem
> + *
> + * @param max_feature_arcs
> + * Maximum number of feature arcs required to be supported
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_init(int max_feature_arcs);
> +
> +/**
> + * Create a feature arc
> + *
> + * @param feature_arc_name
> + * Feature arc name with max length of @ref
> RTE_GRAPH_FEATURE_ARC_NAMELEN
> + * @param max_features
> + * Maximum number of features to be supported in this feature arc
> + * @param max_indexes
> + * Maximum number of interfaces/ports/indexes to be supported
> + * @param start_node
> + * Base node where this feature arc's features are checked in fast path
> + * @param[out] _arc
> + * Feature arc object
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_create(const char *feature_arc_name, int
> max_features, int max_indexes,
> + struct rte_node_register *start_node,
> + rte_graph_feature_arc_t *_arc);
> +
> +/**
> + * Get feature arc object with name
> + *
> + * @param arc_name
> + * Feature arc name provided to successful @ref
> rte_graph_feature_arc_create
> + * @param[out] _arc
> + * Feature arc object returned
> + *
> + * @return
> + * 0: Success
> + * <0: Failure.
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_lookup_by_name(const char *arc_name,
> rte_graph_feature_arc_t *_arc);
> +
> +/**
> + * Add a feature to already created feature arc. For instance
> + *
> + * 1. Add first feature node: "ipv4-input" to input arc
> + * rte_graph_feature_add(ipv4_input_arc, "ipv4-input", NULL, NULL);
> + *
> + * 2. Add "ipsec-input" feature node after "ipv4-input" node
> + * rte_graph_feature_add(ipv4_input_arc, "ipsec-input", "ipv4-input",
> NULL);
> + *
> + * 3. Add "ipv4-pre-classify-input" node before "ipv4-input" node
> + * rte_graph_feature_add(ipv4_input_arc, "ipv4-pre-classify-input"", NULL,
> "ipv4-input");
> + *
> + * 4. Add "acl-classify-input" node after ipv4-input but before ipsec-input
> + * rte_graph_feature_add(ipv4_input_arc, "acl-classify-input", "ipv4-input",
> "ipsec-input");
> + *
> + * @param _arc
> + * Feature arc handle returned from @ref rte_graph_feature_arc_create()
> + * @param feature_node
> + * Graph node representing feature. On success, feature_node is next_node
> of
> + * feature_arc->start_node
> + * @param runs_after
> + * Add this feature_node after already added "runs_after". Creates
> + * start_node -> runs_after -> this_feature sequence
> + * @param runs_before
> + * Add this feature_node before already added "runs_before". Creates
> + * start_node -> this_feature -> runs_before sequence
> + *
> + * <I> Must be called before rte_graph_create() </I>
> + * <I> rte_graph_feature_add() is not allowed after call to
> + * rte_graph_feature_enable() so all features must be added before they can
> be
> + * enabled </I>
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct
> rte_node_register *feature_node,
> + const char *runs_after, const char *runs_before);
> +
> +/**
> + * Enable feature within a feature arc
> + *
> + * Must be called after @b rte_graph_create().
> + *
> + * @param _arc
> + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> @ref
> + * rte_graph_feature_arc_lookup_by_name
> + * @param index
> + * Application specific index. Can be corresponding to interface_id/port_id
> etc
> + * @param feature_name
> + * Name of the node which is already added via @ref rte_graph_feature_add
> + * @param user_data
> + * Application specific data which is retrieved in fast path
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t index,
> const char *feature_name,
> + int32_t user_data);
> +
> +/**
> + * Validate whether subsequent enable/disable feature would succeed or not.
> + * API is thread-safe
> + *
> + * @param _arc
> + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> @ref
> + * rte_graph_feature_arc_lookup_by_name
> + * @param index
> + * Application specific index. Can be corresponding to interface_id/port_id
> etc
> + * @param feature_name
> + * Name of the node which is already added via @ref rte_graph_feature_add
> + * @param is_enable_disable
> + * If 1, validate whether subsequent @ref rte_graph_feature_enable would
> pass or not
> + * If 0, validate whether subsequent @ref rte_graph_feature_disable would
> pass or not
> + *
> + * @return
> + * 0: Subsequent enable/disable API would pass
> + * <0: Subsequent enable/disable API would not pass
> + */
> +__rte_experimental
> +int rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t index,
> + const char *feature_name, int is_enable_disable);
> +
> +/**
> + * Disable already enabled feature within a feature arc
> + *
> + * Must be called after @b rte_graph_create(). API is *NOT* Thread-safe
> + *
> + * @param _arc
> + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> @ref
> + * rte_graph_feature_arc_lookup_by_name
> + * @param index
> + * Application specific index. Can be corresponding to interface_id/port_id
> etc
> + * @param feature_name
> + * Name of the node which is already added via @ref rte_graph_feature_add
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t index,
> + const char *feature_name);
> +
> +/**
> + * Get rte_graph_feature_t object from feature name
> + *
> + * @param arc
> + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> @ref
> + * rte_graph_feature_arc_lookup_by_name
> + * @param feature_name
> + * Feature name provided to @ref rte_graph_feature_add
> + * @param[out] feature
> + * Feature object
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char
> *feature_name,
> + rte_graph_feature_t *feature);
> +
> +/**
> + * Delete feature_arc object
> + *
> + * @param _arc
> + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> @ref
> + * rte_graph_feature_arc_lookup_by_name
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc);
> +
> +/**
> + * Cleanup all feature arcs
> + *
> + * @return
> + * 0: Success
> + * <0: Failure
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_cleanup(void);
> +
> +/**
> + * Slow path API to know how many features are currently enabled within a
> featur-arc
> + *
> + * @param _arc
> + * Feature arc object
> + *
> + * @return: Number of enabled features
> + */
> +__rte_experimental
> +int rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t
> _arc);
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif
> diff --git a/lib/graph/rte_graph_feature_arc_worker.h
> b/lib/graph/rte_graph_feature_arc_worker.h
> new file mode 100644
> index 0000000000..6019d74853
> --- /dev/null
> +++ b/lib/graph/rte_graph_feature_arc_worker.h
> @@ -0,0 +1,548 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2024 Marvell International Ltd.
> + */
> +
> +#ifndef _RTE_GRAPH_FEATURE_ARC_WORKER_H_
> +#define _RTE_GRAPH_FEATURE_ARC_WORKER_H_
> +
> +#include <stddef.h>
> +#include <rte_graph_feature_arc.h>
> +#include <rte_bitops.h>
> +
> +/**
> + * @file
> + *
> + * rte_graph_feature_arc_worker.h
> + *
> + * Defines fast path structure
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/** @internal
> + *
> + * Slow path feature node info list
> + */
> +struct rte_graph_feature_node_list {
> + /** Next feature */
> + STAILQ_ENTRY(rte_graph_feature_node_list) next_feature;
> +
> + /** node representing feature */
> + struct rte_node_register *feature_node;
> +
> + /** How many indexes/interfaces using this feature */
> + int32_t ref_count;
> +
> + /* node_index in list (after feature_enable())*/
> + uint32_t node_index;
> +
> + /** Back pointer to feature arc */
> + void *feature_arc;
> +
> + /** rte_edge_t to this feature node from feature_arc->start_node */
> + rte_edge_t edge_to_this_feature;
> +};
> +
> +/**
> + * Fast path holding rte_edge_t and next enabled feature for an feature
> + */
> +typedef struct __rte_packed rte_graph_feature_data {
> + /* next node to which current mbuf should go*/
> + rte_edge_t next_edge;
> +
> + /* next enabled feature on this arc for current index */
> + union {
> + uint16_t reserved;
> + struct {
> + rte_graph_feature_t next_enabled_feature;
> + };
> + };
> +
> + /* user_data */
> + int32_t user_data;
> +} rte_graph_feature_data_t;
> +
> +/**
> + * Fast path feature structure. Holds re_graph_feature_data_t per index
> + */
> +struct __rte_cache_aligned rte_graph_feature {
> + uint16_t this_feature_index;
> +
> + /* Array of size arc->feature_data_size
> + * [data-index-0][data-index-1]...
> + * Each index of size: sizeof(rte_graph_feature_data_t)
> + */
> + uint8_t feature_data_by_index[];
> +};
> +
> +/**
> + * fast path cache aligned feature list holding all features
> + * There are two feature lists: active, passive
> + *
> + * Fast APIs works on active list while control plane updates passive list
> + * A atomic update to arc->active_feature_list is done to switch between
> active
> + * and passive
> + */
> +typedef struct __rte_cache_aligned rte_graph_feature_list {
> + /**
> + * fast path array holding per_feature data.
> + * Duplicate entry as feature-arc also hold this pointer
> + * arc->features[]
> + *
> + *<-------------feature-0 ---------><CEIL><---------feature-1 --------------
> >...
> + *[index-0][index-1]...[max_index-1] [index-0][index-1]
> ...[max_index-1]...
> + */
> + struct rte_graph_feature *indexed_by_features;
> + /*
> + * fast path array holding first enabled feature per index
> + * (Required in start_node. In non start_node, mbuf can hold next
> enabled
> + * feature)
> + */
> + rte_graph_feature_t first_enabled_feature_by_index[];
> +} rte_graph_feature_list_t;
> +
> +/**
> + * rte_graph feature arc object
> + *
> + * A feature-arc can only hold RTE_GRAPH_FEATURE_MAX_PER_ARC features
> but no
> + * limit to interface index
> + *
> + * Representing a feature arc holding all features which are enabled/disabled
> + * on any interfaces
> + */
> +struct __rte_cache_aligned rte_graph_feature_arc {
> + /* First 64B is fast path variables */
> + RTE_MARKER fast_path_variables;
> +
> + /** runtime active feature list */
> + rte_graph_feature_rt_list_t active_feature_list;
> +
> + /* Actual Size of feature_list0 */
> + uint16_t feature_list_size;
> +
> + /**
> + * Size each feature in fastpath.
> + * sizeof(arc->active_list->indexed_by_feature[0])
> + */
> + uint16_t feature_size;
> +
> + /* Size of arc->max_index * sizeof(rte_graph_feature_data_t) */
> + uint16_t feature_data_size;
> +
> + /**
> + * Fast path bitmask indicating if a feature is enabled or not Number
> + * of bits: RTE_GRAPH_FEATURE_MAX_PER_ARC
> + */
> + uint64_t feature_enable_bitmask[2];
> + rte_graph_feature_list_t *feature_list[2];
> + struct rte_graph_feature *features[2];
> +
> + /** index in feature_arc_main */
> + uint16_t feature_arc_index;
> +
> + uint16_t reserved[3];
> +
> + /** Slow path variables follows*/
> + RTE_MARKER slow_path_variables;
> +
> + /** feature arc name */
> + char feature_arc_name[RTE_GRAPH_FEATURE_ARC_NAMELEN];
> +
> + /** All feature lists */
> + STAILQ_HEAD(, rte_graph_feature_node_list) all_features;
> +
> + uint32_t runtime_enabled_features;
> +
> + /** Back pointer to feature_arc_main */
> + void *feature_arc_main;
> +
> + /* start_node */
> + struct rte_node_register *start_node;
> +
> + /* maximum number of features supported by this arc */
> + uint32_t max_features;
> +
> + /* maximum number of index supported by this arc */
> + uint32_t max_indexes;
> +
> + /* Slow path bit mask per feature per index */
> + uint64_t feature_bit_mask_by_index[];
> +};
> +
> +/** Feature arc main */
> +typedef struct feature_arc_main {
> + /** number of feature arcs created by application */
> + uint32_t num_feature_arcs;
> +
> + /** max features arcs allowed */
> + uint32_t max_feature_arcs;
> +
> + /** feature arcs */
> + rte_graph_feature_arc_t feature_arcs[];
> +} rte_graph_feature_arc_main_t;
> +
> +/** @internal Get feature arc pointer from object */
> +#define rte_graph_feature_arc_get(arc) ((struct rte_graph_feature_arc *)arc)
> +
> +extern rte_graph_feature_arc_main_t *__feature_arc_main;
> +
> +/**
> + * API to know if feature is valid or not
> + */
> +
> +static __rte_always_inline int
> +rte_graph_feature_is_valid(rte_graph_feature_t feature)
> +{
> + return (feature != RTE_GRAPH_FEATURE_INVALID);
> +}
> +
> +/**
> + * Get rte_graph_feature object with no checks
> + *
> + * @param arc
> + * Feature arc pointer
> + * @param feature
> + * Feature index
> + * @param feature_list
> + * active feature list retrieved from
> rte_graph_feature_arc_has_any_feature()
> + * or rte_graph_feature_arc_has_feature()
> + *
> + * @return
> + * Internal feature object.
> + */
> +static __rte_always_inline struct rte_graph_feature *
> +__rte_graph_feature_get(struct rte_graph_feature_arc *arc,
> rte_graph_feature_t feature,
> + const rte_graph_feature_rt_list_t feature_list)
> +{
> + return ((struct rte_graph_feature *)((uint8_t *)(arc-
> >features[feature_list] +
> + (feature * arc->feature_size))));
> +}
> +
> +/**
> + * Get rte_graph_feature object for a given interface/index from feature arc
> + *
> + * @param arc
> + * Feature arc pointer
> + * @param feature
> + * Feature index
> + *
> + * @return
> + * Internal feature object.
> + */
> +static __rte_always_inline struct rte_graph_feature *
> +rte_graph_feature_get(struct rte_graph_feature_arc *arc,
> rte_graph_feature_t feature)
> +{
> + RTE_VERIFY(feature < arc->max_features);
> +
> + if (likely(rte_graph_feature_is_valid(feature)))
> + return __rte_graph_feature_get(arc, feature, arc-
> >active_feature_list);
> +
> + return NULL;
> +}
> +
> +static __rte_always_inline rte_graph_feature_data_t *
> +__rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct
> rte_graph_feature *feature,
> + uint8_t index)
> +{
> + RTE_SET_USED(arc);
> + return ((rte_graph_feature_data_t *)(feature->feature_data_by_index
> +
> + (index *
> sizeof(rte_graph_feature_data_t))));
> +}
> +
> +/**
> + * Get rte_graph feature data object for a index in feature
> + *
> + * @param arc
> + * feature arc
> + * @param feature
> + * Pointer to feature object
> + * @param index
> + * Index of feature maintained in slow path linked list
> + *
> + * @return
> + * Valid feature data
> + */
> +static __rte_always_inline rte_graph_feature_data_t *
> +rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct
> rte_graph_feature *feature,
> + uint8_t index)
> +{
> + if (likely(index < arc->max_indexes))
> + return __rte_graph_feature_data_get(arc, feature, index);
> +
> + RTE_VERIFY(0);
> +}
> +
> +/**
> + * Fast path API to check if any feature enabled on a feature arc
> + * Typically from arc->start_node process function
> + *
> + * @param arc
> + * Feature arc object
> + * @param[out] plist
> + * Pointer to runtime active feature list which needs to be provided to other
> + * fast path APIs
> + *
> + * @return
> + * 0: If no feature enabled
> + * Non-Zero: Bitmask of features enabled. plist is valid
> + *
> + */
> +static __rte_always_inline uint64_t
> +rte_graph_feature_arc_has_any_feature(struct rte_graph_feature_arc *arc,
> + rte_graph_feature_rt_list_t *plist)
> +{
> + *plist = __atomic_load_n(&arc->active_feature_list,
> __ATOMIC_RELAXED);
> +
> + return (__atomic_load_n(arc->feature_enable_bitmask +
> (uint8_t)*plist,
> + __ATOMIC_RELAXED));
> +}
> +
> +/**
> + * Fast path API to check if provided feature is enabled on any interface/index
> + * or not
> + *
> + * @param arc
> + * Feature arc object
> + * @param feature
> + * Input rte_graph_feature_t that needs to be checked
> + * @param[out] plist
> + * Returns active list to caller which needs to be provided to other fast path
> + * APIs
> + *
> + * @return
> + * 1: If feature is enabled in arc
> + * 0: If feature is not enabled in arc
> + */
> +static __rte_always_inline int
> +rte_graph_feature_arc_has_feature(struct rte_graph_feature_arc *arc,
> + rte_graph_feature_t feature,
> + rte_graph_feature_rt_list_t *plist)
> +{
> + uint64_t bitmask = RTE_BIT64(feature);
> +
> + *plist = __atomic_load_n(&arc->active_feature_list,
> __ATOMIC_RELAXED);
> +
> + return (bitmask & __atomic_load_n(arc->feature_enable_bitmask +
> (uint8_t)*plist,
> + __ATOMIC_RELAXED));
> +}
> +
> +/**
> + * Prefetch feature arc fast path cache line
> + *
> + * @param arc
> + * RTE_GRAPH feature arc object
> + */
> +static __rte_always_inline void
> +rte_graph_feature_arc_prefetch(struct rte_graph_feature_arc *arc)
> +{
> + rte_prefetch0((void *)&arc->fast_path_variables);
> +}
> +
> +/**
> + * Prefetch feature related fast path cache line
> + *
> + * @param arc
> + * RTE_GRAPH feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> rte_graph_feature_arc_has_any_feature();
> + * @param feature
> + * Pointer to feature object
> + */
> +static __rte_always_inline void
> +rte_graph_feature_arc_feature_prefetch(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + rte_graph_feature_t feature)
> +{
> + /* feature cache line */
> + if (likely(rte_graph_feature_is_valid(feature)))
> + rte_prefetch0((void *)__rte_graph_feature_get(arc, feature,
> list));
> +}
> +
> +/**
> + * Prefetch feature data upfront. Perform sanity
> + *
> + * @param _arc
> + * RTE_GRAPH feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> rte_graph_feature_arc_has_any_feature();
> + * @param feature
> + * Pointer to feature object returned from @ref
> + * rte_graph_feature_arc_first_feature_get()
> + * @param index
> + * Interface/index
> + */
> +static __rte_always_inline void
> +rte_graph_feature_arc_data_prefetch(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + rte_graph_feature_t feature, uint32_t index)
> +{
> + if (likely(rte_graph_feature_is_valid(feature)))
> + rte_prefetch0((void *)((uint8_t *)arc->features[list] +
> + offsetof(struct rte_graph_feature,
> feature_data_by_index) +
> + (index * sizeof(rte_graph_feature_data_t))));
> +}
> +
> +/**
> + * Fast path API to get first enabled feature on interface index
> + * Typically required in arc->start_node so that from returned feature,
> + * feature-data can be retrieved to steer packets
> + *
> + * @param arc
> + * Feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> + * rte_graph_feature_arc_has_any_feature() or
> + * rte_graph_feature_arc_has_feature()
> + * @param index
> + * Interface Index
> + * @param[out] feature
> + * Pointer to rte_graph_feature_t.
> + *
> + * @return
> + * 0. Success. feature field is valid
> + * 1. Failure. feature field is invalid
> + *
> + */
> +static __rte_always_inline int
> +rte_graph_feature_arc_first_feature_get(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + uint32_t index,
> + rte_graph_feature_t *feature)
> +{
> + struct rte_graph_feature_list *feature_list = arc->feature_list[list];
> +
> + *feature = feature_list->first_enabled_feature_by_index[index];
> +
> + return rte_graph_feature_is_valid(*feature);
> +}
> +
> +/**
> + * Fast path API to get next enabled feature on interface index with provided
> + * input feature
> + *
> + * @param arc
> + * Feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> + * rte_graph_feature_arc_has_any_feature() or
> + * @param index
> + * Interface Index
> + * @param[in][out] feature
> + * Pointer to rte_graph_feature_t. Input feature set to next enabled feature
> + * after success return
> + * @param[out] next_edge
> + * Edge from current feature to next feature. Valid only if next feature is
> valid
> + *
> + * @return
> + * 0. Success. next enabled feature is valid.
> + * 1. Failure. next enabled feature is invalid
> + */
> +static __rte_always_inline int
> +rte_graph_feature_arc_next_feature_get(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + uint32_t index,
> + rte_graph_feature_t *feature,
> + rte_edge_t *next_edge)
> +{
> + rte_graph_feature_data_t *feature_data = NULL;
> + struct rte_graph_feature *f = NULL;
> +
> + if (likely(rte_graph_feature_is_valid(*feature))) {
> + f = __rte_graph_feature_get(arc, *feature, list);
> + feature_data = rte_graph_feature_data_get(arc, f, index);
> + *feature = feature_data->next_enabled_feature;
> + *next_edge = feature_data->next_edge;
> + return (*feature == RTE_GRAPH_FEATURE_INVALID);
> + }
> +
> + return 1;
> +}
> +
> +/**
> + * Set fields with respect to first enabled feature in an arc and return edge
> + * Typically returned feature and interface index must be saved in rte_mbuf
> + * structure to pass this information to next feature node
> + *
> + * @param arc
> + * Feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> rte_graph_feature_arc_has_any_feature();
> + * @param index
> + * Index (of interface)
> + * @param[out] gf
> + * Pointer to rte_graph_feature_t. Valid if API returns Success
> + * @param[out] edge
> + * Edge to steer packet from arc->start_node to first enabled feature. Valid
> + * only if API returns Success
> + *
> + * @return
> + * 0: If valid feature is set by API
> + * 1: If valid feature is NOT set by API
> + */
> +static __rte_always_inline rte_graph_feature_t
> +rte_graph_feature_arc_feature_set(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + uint32_t index,
> + rte_graph_feature_t *gf,
> + rte_edge_t *edge)
> +{
> + struct rte_graph_feature_list *feature_list = arc->feature_list[list];
> + struct rte_graph_feature_data *feature_data = NULL;
> + struct rte_graph_feature *feature = NULL;
> + rte_graph_feature_t f;
> +
> + /* reset */
> + *gf = RTE_GRAPH_FEATURE_INVALID;
> + f = feature_list->first_enabled_feature_by_index[index];
> +
> + if (unlikely(rte_graph_feature_is_valid(f))) {
> + feature = __rte_graph_feature_get(arc, f, list);
> + feature_data = rte_graph_feature_data_get(arc, feature,
> index);
> + *gf = f;
> + *edge = feature_data->next_edge;
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +/**
> + * Get user data corresponding to current feature set by application in
> + * rte_graph_feature_enable()
> + *
> + * @param arc
> + * Feature arc object
> + * @param list
> + * Pointer to runtime active feature list from
> rte_graph_feature_arc_has_any_feature();
> + * @param feature
> + * Feature index
> + * @param index
> + * Interface index
> + *
> + * @return
> + * UINT32_MAX: Failure
> + * Valid user data: Success
> + */
> +static __rte_always_inline uint32_t
> +rte_graph_feature_user_data_get(struct rte_graph_feature_arc *arc,
> + const rte_graph_feature_rt_list_t list,
> + rte_graph_feature_t feature,
> + uint32_t index)
> +{
> + rte_graph_feature_data_t *fdata = NULL;
> + struct rte_graph_feature *f = NULL;
> +
> + if (likely(rte_graph_feature_is_valid(feature))) {
> + f = __rte_graph_feature_get(arc, feature, list);
> + fdata = rte_graph_feature_data_get(arc, f, index);
> + return fdata->user_data;
> + }
> +
> + return UINT32_MAX;
> +}
> +#ifdef __cplusplus
> +}
> +#endif
> +#endif
> diff --git a/lib/graph/version.map b/lib/graph/version.map
> index 2c83425ddc..82b2469fba 100644
> --- a/lib/graph/version.map
> +++ b/lib/graph/version.map
> @@ -52,3 +52,20 @@ DPDK_25 {
>
> local: *;
> };
> +
> +EXPERIMENTAL {
> + global:
> +
> + # added in 24.11
> + rte_graph_feature_arc_init;
> + rte_graph_feature_arc_create;
> + rte_graph_feature_arc_lookup_by_name;
> + rte_graph_feature_add;
> + rte_graph_feature_enable;
> + rte_graph_feature_validate;
> + rte_graph_feature_disable;
> + rte_graph_feature_lookup;
> + rte_graph_feature_arc_destroy;
> + rte_graph_feature_arc_cleanup;
> + rte_graph_feature_arc_num_enabled_features;
> +};
> --
> 2.43.0
Hi Kiran,
See my inline comments. Somehow I forgot to respond earlier
Thanks,
Nitin
> -----Original Message-----
> From: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>
> Sent: Wednesday, September 11, 2024 10:11 AM
> To: Nitin Saxena <nsaxena@marvell.com>; Jerin Jacob <jerinj@marvell.com>;
> Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Zhirun Yan
> <yanzhirun_163@163.com>
> Cc: dev@dpdk.org; Nitin Saxena <nsaxena16@gmail.com>
> Subject: RE: [RFC PATCH 1/3] graph: add feature arc support
>
>
>
> > -----Original Message-----
> > From: Nitin Saxena <nsaxena@marvell.com>
> > Sent: Saturday, September 7, 2024 1:01 PM
> > To: Jerin Jacob <jerinj@marvell.com>; Kiran Kumar Kokkilagadda
> > <kirankumark@marvell.com>; Nithin Kumar Dabilpuram
> > <ndabilpuram@marvell.com>; Zhirun Yan <yanzhirun_163@163.com>
> > Cc: dev@dpdk.org; Nitin Saxena <nsaxena16@gmail.com>
> > Subject: [RFC PATCH 1/3] graph: add feature arc support
> >
> > add feature arc to allow dynamic steering of packets across graph nodes
> > based on protocol features enabled on incoming or outgoing interface
> >
> > Signed-off-by: Nitin Saxena <nsaxena@marvell.com>
> > ---
> > lib/graph/graph_feature_arc.c | 959 +++++++++++++++++++++++
> > lib/graph/meson.build | 2 +
> > lib/graph/rte_graph_feature_arc.h | 373 +++++++++
> > lib/graph/rte_graph_feature_arc_worker.h | 548 +++++++++++++
> > lib/graph/version.map | 17 +
> > 5 files changed, 1899 insertions(+)
> > create mode 100644 lib/graph/graph_feature_arc.c
> > create mode 100644 lib/graph/rte_graph_feature_arc.h
> > create mode 100644 lib/graph/rte_graph_feature_arc_worker.h
> >
> > diff --git a/lib/graph/graph_feature_arc.c b/lib/graph/graph_feature_arc.c
> > new file mode 100644
> > index 0000000000..3b05bac137
> > --- /dev/null
> > +++ b/lib/graph/graph_feature_arc.c
> > @@ -0,0 +1,959 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2024 Marvell International Ltd.
> > + */
> > +
> > +#include "graph_private.h"
> > +#include <rte_graph_feature_arc_worker.h>
> > +#include <rte_malloc.h>
> > +
> > +#define __RTE_GRAPH_FEATURE_ARC_MAX 32
> > +
> > +#define ARC_PASSIVE_LIST(arc) (arc->active_feature_list ^ 0x1)
> > +
> > +#define rte_graph_uint_cast(x) ((unsigned int)x)
> > +#define feat_dbg graph_err
> > +
> > +rte_graph_feature_arc_main_t *__feature_arc_main;
> > +
> > +/* Make sure fast path cache line is compact */
> > +_Static_assert((offsetof(struct rte_graph_feature_arc,
> slow_path_variables)
> > + - offsetof(struct rte_graph_feature_arc, fast_path_variables))
> > + <= RTE_CACHE_LINE_SIZE);
> > +
> > +
> > +static int
> > +feature_lookup(struct rte_graph_feature_arc *arc, const char *feat_name,
> > + struct rte_graph_feature_node_list **ffinfo, uint32_t *slot)
> > +{
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + const char *name;
> > +
> > + if (!feat_name)
> > + return -1;
> > +
> > + if (slot)
> > + *slot = 0;
> > +
> > + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> > + RTE_VERIFY(finfo->feature_arc == arc);
> > + name = rte_node_id_to_name(finfo->feature_node->id);
> > + if (!strncmp(name, feat_name, RTE_GRAPH_NAMESIZE)) {
> > + if (ffinfo)
> > + *ffinfo = finfo;
> > + return 0;
> > + }
> > + if (slot)
> > + (*slot)++;
> > + }
> > + return -1;
> > +}
> > +
> > +static int
> > +feature_arc_node_info_lookup(struct rte_graph_feature_arc *arc, uint32_t
> > feature_index,
> > + struct rte_graph_feature_node_list **ppfinfo)
> > +{
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + uint32_t index = 0;
> > +
> > + if (!ppfinfo)
> > + return -1;
> > +
> > + *ppfinfo = NULL;
> > + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> > + if (index == feature_index) {
> > + if (finfo->node_index == feature_index)
> > + return -1;
> > + *ppfinfo = finfo;
> > + }
> > + index++;
> > + }
> > + if (feature_index && (index >= feature_index))
> > + return -1;
> > +
> > + return 0;
> > +}
> > +
> > +static void
> > +prepare_feature_arc(struct rte_graph_feature_arc *arc)
> > +{
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + uint32_t index = 0;
> > +
> > + STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
> > + finfo->node_index = index;
> > + index++;
> > + }
> > +}
> > +
> > +static int
> > +feature_arc_lookup(rte_graph_feature_arc_t _arc)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> > + uint32_t iter;
> > +
> > + if (!__feature_arc_main)
> > + return -1;
> > +
> > + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> > + if (dm->feature_arcs[iter] ==
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> > + continue;
> > +
> > + if (arc == (rte_graph_feature_arc_get(dm-
> > >feature_arcs[iter])))
> > + return 0;
> > + }
> > + return -1;
> > +}
> > +
> > +static int
> > +get_existing_edge(const char *arc_name, struct rte_node_register
> > *parent_node,
> > + struct rte_node_register *child_node, rte_edge_t *_edge)
> > +{
> > + char **next_edges = NULL;
> > + uint32_t count, i;
> > +
> > + RTE_SET_USED(arc_name);
> > +
> > + count = rte_node_edge_get(parent_node->id, NULL);
> > + next_edges = malloc(count);
> > +
> > + if (!next_edges)
> > + return -1;
> > +
> > + count = rte_node_edge_get(parent_node->id, next_edges);
> > + for (i = 0; i < count; i++) {
> > + if (strstr(child_node->name, next_edges[i])) {
> > + feat_dbg("%s: Edge exists [%s[%u]: \"%s\"]",
> > arc_name,
> > + parent_node->name, i, child_node->name);
> > + if (_edge)
> > + *_edge = (rte_edge_t)i;
> > +
> > + free(next_edges);
> > + return 0;
> > + }
> > + }
> > + free(next_edges);
> > +
> > + return -1;
> > +}
> > +
> > +static int
> > +connect_graph_nodes(struct rte_node_register *parent_node, struct
> > rte_node_register *child_node,
> > + rte_edge_t *_edge, char *arc_name)
> > +{
> > + const char *next_node = NULL;
> > + rte_edge_t edge;
> > +
> > + if (!get_existing_edge(arc_name, parent_node, child_node, &edge)) {
> > + feat_dbg("%s: add_feature: Edge reused [%s[%u]: \"%s\"]",
> > arc_name,
> > + parent_node->name, edge, child_node->name);
> > +
> > + if (_edge)
> > + *_edge = edge;
> > +
> > + return 0;
> > + }
> > +
> > + /* Node to be added */
> > + next_node = child_node->name;
> > +
> > + edge = rte_node_edge_update(parent_node->id,
> > RTE_EDGE_ID_INVALID, &next_node, 1);
> > +
> > + if (edge == RTE_EDGE_ID_INVALID) {
> > + graph_err("edge invalid");
> > + return -1;
> > + }
> > + edge = rte_node_edge_count(parent_node->id) - 1;
> > +
> > + feat_dbg("%s: add_feature: edge added [%s[%u]: \"%s\"]", arc_name,
> > parent_node->name, edge,
> > + child_node->name);
> > +
> > + if (_edge)
> > + *_edge = edge;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +feature_arc_init(rte_graph_feature_arc_main_t **pfl, uint32_t
> > max_feature_arcs)
> > +{
> > + rte_graph_feature_arc_main_t *pm = NULL;
> > + uint32_t i;
> > + size_t sz;
> > +
> > + if (!pfl)
> > + return -1;
> > +
> > + sz = sizeof(rte_graph_feature_arc_main_t) +
> > + (sizeof(pm->feature_arcs[0]) * max_feature_arcs);
> > +
> > + pm = malloc(sz);
> > + if (!pm)
> > + return -1;
> > +
> > + memset(pm, 0, sz);
> > +
> > + for (i = 0; i < max_feature_arcs; i++)
> > + pm->feature_arcs[i] =
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER;
> > +
> > + pm->max_feature_arcs = max_feature_arcs;
> > +
> > + *pfl = pm;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_init(int max_feature_arcs)
> > +{
> > + if (!max_feature_arcs)
> > + return -1;
> > +
> > + if (__feature_arc_main)
> > + return -1;
> > +
> > + return feature_arc_init(&__feature_arc_main, max_feature_arcs);
> > +}
> > +
> > +static void
> > +feature_arc_list_reset(struct rte_graph_feature_arc *arc, uint32_t
> list_index)
> > +{
> > + rte_graph_feature_data_t *fdata = NULL;
> > + rte_graph_feature_list_t *list = NULL;
> > + struct rte_graph_feature *feat = NULL;
> > + uint32_t i, j;
> > +
> > + list = arc->feature_list[list_index];
> > + feat = arc->features[list_index];
> > +
> > + /*Initialize variables*/
> > + memset(feat, 0, arc->feature_size);
> > + memset(list, 0, arc->feature_list_size);
> > +
> > + /* Initialize feature and feature_data */
> > + for (i = 0; i < arc->max_features; i++) {
> > + feat = __rte_graph_feature_get(arc, i, list_index);
> > + feat->this_feature_index = i;
> > +
> > + for (j = 0; j < arc->max_indexes; j++) {
> > + fdata = rte_graph_feature_data_get(arc, feat, j);
> > + fdata->next_enabled_feature =
> > RTE_GRAPH_FEATURE_INVALID;
> > + fdata->next_edge = UINT16_MAX;
> > + fdata->user_data = UINT32_MAX;
> > + }
> > + }
> > +
> > + for (i = 0; i < arc->max_indexes; i++)
> > + list->first_enabled_feature_by_index[i] =
> > RTE_GRAPH_FEATURE_INVALID;
> > +}
> > +
> > +static int
> > +feature_arc_list_init(struct rte_graph_feature_arc *arc, const char
> > *flist_name,
> > + rte_graph_feature_list_t **pplist,
> > + struct rte_graph_feature **ppfeature, uint32_t
> > list_index)
> > +{
> > + char fname[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
> > + size_t list_size, feat_size, fdata_size;
> > + rte_graph_feature_list_t *list = NULL;
> > + struct rte_graph_feature *feat = NULL;
> > +
> > + list_size = sizeof(list->first_enabled_feature_by_index[0]) * arc-
> > >max_indexes;
> > +
> > + list = rte_malloc(flist_name, list_size, RTE_CACHE_LINE_SIZE);
> > + if (!list)
> > + return -ENOMEM;
> > +
> > + fdata_size = arc->max_indexes * sizeof(rte_graph_feature_data_t);
> > +
> > + /* Let one feature capture complete cache lines */
> > + feat_size = RTE_ALIGN_CEIL(sizeof(struct rte_graph_feature) +
> > fdata_size,
> > + RTE_CACHE_LINE_SIZE);
> > +
> > + snprintf(fname, sizeof(fname), "%s-%s", arc->feature_arc_name,
> > "feat");
> > +
> > + feat = rte_malloc(fname, feat_size * arc->max_features,
> > RTE_CACHE_LINE_SIZE);
> > + if (!feat) {
> > + rte_free(list);
> > + return -ENOMEM;
> > + }
> > + arc->feature_size = feat_size;
> > + arc->feature_data_size = fdata_size;
> > + arc->feature_list_size = list_size;
> > +
> > + /* Initialize list */
> > + list->indexed_by_features = feat;
> > + *pplist = list;
> > + *ppfeature = feat;
> > +
> > + feature_arc_list_reset(arc, list_index);
> > +
> > + return 0;
> > +}
> > +
> > +static void
> > +feature_arc_list_destroy(rte_graph_feature_list_t *list)
> > +{
> > + rte_free(list->indexed_by_features);
> Do you need to free individual rte_graph_feature here, that is allocated in
> arc_list_init?
Nitin> It seems correct to me. feature_arc_list_destroy() frees all memory allocated in feature_arc_list_init(). So feature_arc_list_destroy() is calling rte_free() for every rte_malloc() happened in feature_arc_list_init(). To make it clear, I have refactor the function and has added appropriate comment from v2 patch set onwards
>
> > + rte_free(list);
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_create(const char *feature_arc_name, int
> > max_features, int max_indexes,
> > + struct rte_node_register *start_node,
> > rte_graph_feature_arc_t *_arc)
> > +{
> > + char name[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
> > + rte_graph_feature_arc_main_t *dfm = NULL;
> > + struct rte_graph_feature_arc *arc = NULL;
> > + struct rte_graph_feature_data *gfd = NULL;
> > + struct rte_graph_feature *df = NULL;
> > + uint32_t iter, j, arc_index;
> > + size_t sz;
> > +
> > + if (!_arc)
> > + return -1;
> > +
> > + if (max_features < 2)
> > + return -1;
> > +
> > + if (!start_node)
> > + return -1;
> > +
> > + if (!feature_arc_name)
> > + return -1;
> > +
> > + if (max_features > RTE_GRAPH_FEATURE_MAX_PER_ARC) {
> > + graph_err("Invalid max features: %u", max_features);
> > + return -1;
> > + }
> > +
> > + /*
> > + * Application hasn't called rte_graph_feature_arc_init(). Initialize
> with
> > + * default values
> > + */
> > + if (!__feature_arc_main) {
> > + if
> > (rte_graph_feature_arc_init((int)__RTE_GRAPH_FEATURE_ARC_MAX) < 0) {
> > + graph_err("rte_graph_feature_arc_init() failed");
> > + return -1;
> > + }
> > + }
> > +
> > + dfm = __feature_arc_main;
> > +
> > + /* threshold check */
> > + if (dfm->num_feature_arcs > (dfm->max_feature_arcs - 1)) {
> > + graph_err("max threshold for num_feature_arcs: %d
> > reached",
> > + dfm->max_feature_arcs - 1);
> > + return -1;
> > + }
> > + /* Find the free slot for feature arc */
> > + for (iter = 0; iter < dfm->max_feature_arcs; iter++) {
> > + if (dfm->feature_arcs[iter] ==
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> > + break;
> > + }
> > + arc_index = iter;
> > +
> > + if (arc_index >= dfm->max_feature_arcs) {
> > + graph_err("No free slot found for num_feature_arc");
> > + return -1;
> > + }
> > +
> > + /* This should not happen */
> > + RTE_VERIFY(dfm->feature_arcs[arc_index] ==
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER);
> > +
> > + /* size of feature arc + feature_bit_mask_by_index */
> > + sz = sizeof(*arc) + (sizeof(uint64_t) * max_indexes);
> > +
> > + arc = rte_malloc(feature_arc_name, sz, RTE_CACHE_LINE_SIZE);
> > +
> > + if (!arc) {
> > + graph_err("malloc failed for feature_arc_create()");
> > + return -1;
> > + }
> > +
> > + memset(arc, 0, sz);
> > +
> > + /* Initialize rte_graph port group fixed variables */
> > + STAILQ_INIT(&arc->all_features);
> > + strncpy(arc->feature_arc_name, feature_arc_name,
> > RTE_GRAPH_FEATURE_ARC_NAMELEN - 1);
> > + arc->feature_arc_main = (void *)dfm;
> > + arc->start_node = start_node;
> > + arc->max_features = max_features;
> > + arc->max_indexes = max_indexes;
> > +
> > + snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist0");
> > +
> > + if (feature_arc_list_init(arc, name, &arc->feature_list[0], &arc-
> > >features[0], 0) < 0) {
> > + rte_free(arc);
> > + graph_err("feature_arc_list_init(0) failed");
> > + return -1;
> > + }
> > + snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist1");
> > +
> > + if (feature_arc_list_init(arc, name, &arc->feature_list[1], &arc-
> > >features[1], 1) < 0) {
> > + feature_arc_list_destroy(arc->feature_list[0]);
> > + graph_err("feature_arc_list_init(1) failed");
> > + return -1;
> > + }
> > +
> > + for (iter = 0; iter < arc->max_features; iter++) {
> > + df = rte_graph_feature_get(arc, iter);
> > + for (j = 0; j < arc->max_indexes; j++) {
> > + gfd = rte_graph_feature_data_get(arc, df, j);
> > + gfd->next_enabled_feature =
> > RTE_GRAPH_FEATURE_INVALID;
> > + }
> > + }
> > + arc->feature_arc_index = arc_index;
> > + dfm->feature_arcs[arc->feature_arc_index] =
> > (rte_graph_feature_arc_t)arc;
> > + dfm->num_feature_arcs++;
> > +
> > + if (_arc)
> > + *_arc = (rte_graph_feature_arc_t)arc;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct
> > rte_node_register *feature_node,
> > + const char *after_feature, const char *before_feature)
> > +{
> > + struct rte_graph_feature_node_list *after_finfo = NULL, *before_finfo
> > = NULL;
> > + struct rte_graph_feature_node_list *temp = NULL, *finfo = NULL;
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + uint32_t slot, add_flag;
> > + rte_edge_t edge = -1;
> > +
> > + RTE_VERIFY(arc->feature_arc_main == __feature_arc_main);
> > +
> > + if (feature_arc_lookup(_arc)) {
> > + graph_err("invalid feature arc: 0x%016" PRIx64,
> > (uint64_t)_arc);
> > + return -1;
> > + }
> > +
> > + if (arc->runtime_enabled_features) {
> > + graph_err("adding features after enabling any one of them is
> > not supported");
> > + return -1;
> > + }
> > +
> > + if ((after_feature != NULL) && (before_feature != NULL) &&
> > + (after_feature == before_feature)) {
> > + graph_err("after_feature and before_feature are same
> > '%s:%s]", after_feature,
> > + before_feature);
> > + return -1;
> > + }
> > +
> > + if (!feature_node) {
> > + graph_err("feature_node: %p invalid", feature_node);
> > + return -1;
> > + }
> > +
> > + arc = rte_graph_feature_arc_get(_arc);
> > +
> > + if (feature_node->id == RTE_NODE_ID_INVALID) {
> > + graph_err("Invalid node: %s", feature_node->name);
> > + return -1;
> > + }
> > +
> > + if (!feature_lookup(arc, feature_node->name, &finfo, &slot)) {
> > + graph_err("%s feature already added", feature_node->name);
> > + return -1;
> > + }
> > +
> > + if (slot >= RTE_GRAPH_FEATURE_MAX_PER_ARC) {
> > + graph_err("Max slot %u reached for feature addition", slot);
> > + return -1;
> > + }
> > +
> > + if (strstr(feature_node->name, arc->start_node->name)) {
> > + graph_err("Feature %s cannot point to itself: %s",
> > feature_node->name,
> > + arc->start_node->name);
> > + return -1;
> > + }
> > +
> > + if (connect_graph_nodes(arc->start_node, feature_node, &edge, arc-
> > >feature_arc_name)) {
> > + graph_err("unable to connect %s -> %s", arc->start_node-
> > >name, feature_node->name);
> > + return -1;
> > + }
> > +
> > + finfo = malloc(sizeof(*finfo));
> > + if (!finfo)
> > + return -1;
> > +
> > + memset(finfo, 0, sizeof(*finfo));
> > +
> > + finfo->feature_arc = (void *)arc;
> > + finfo->feature_node = feature_node;
> > + finfo->edge_to_this_feature = edge;
> > +
> > + /* Check for before and after constraints */
> > + if (before_feature) {
> > + /* before_feature sanity */
> > + if (feature_lookup(arc, before_feature, &before_finfo, NULL))
> > + SET_ERR_JMP(EINVAL, finfo_free,
> > + "Invalid before feature name: %s",
> > before_feature);
> > +
> > + if (!before_finfo)
> > + SET_ERR_JMP(EINVAL, finfo_free,
> > + "before_feature %s does not exist",
> > before_feature);
> > +
> > + /*
> > + * Starting from 0 to before_feature, continue connecting
> > edges
> > + */
> > + add_flag = 1;
> > + STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
> > + /*
> > + * As soon as we see before_feature. stop adding
> > edges
> > + */
> > + if (!strncmp(temp->feature_node->name,
> > before_feature,
> > + RTE_GRAPH_NAMESIZE))
> > + if (!connect_graph_nodes(finfo-
> > >feature_node, temp->feature_node,
> > + &edge, arc-
> > >feature_arc_name))
> > + add_flag = 0;
> > +
> > + if (add_flag)
> > + connect_graph_nodes(temp->feature_node,
> > finfo->feature_node, NULL,
> > + arc->feature_arc_name);
> > + }
> > + }
> > +
> > + if (after_feature) {
> > + if (feature_lookup(arc, after_feature, &after_finfo, NULL))
> > + SET_ERR_JMP(EINVAL, finfo_free,
> > + "Invalid after feature_name %s",
> > after_feature);
> > +
> > + if (!after_finfo)
> > + SET_ERR_JMP(EINVAL, finfo_free,
> > + "after_feature %s does not exist",
> > after_feature);
> > +
> > + /* Starting from after_feature to end continue connecting
> > edges */
> > + add_flag = 0;
> > + STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
> > + /* We have already seen after_feature now */
> > + if (add_flag)
> > + /* Add all features as next node to current
> > feature*/
> > + connect_graph_nodes(finfo->feature_node,
> > temp->feature_node, NULL,
> > + arc->feature_arc_name);
> > +
> > + /* as soon as we see after_feature. start adding edges
> > + * from next iteration
> > + */
> > + if (!strncmp(temp->feature_node->name,
> > after_feature, RTE_GRAPH_NAMESIZE))
> > + /* connect after_feature to this feature */
> > + if (!connect_graph_nodes(temp-
> > >feature_node, finfo->feature_node,
> > + &edge, arc-
> > >feature_arc_name))
> > + add_flag = 1;
> > + }
> > +
> > + /* add feature next to after_feature */
> > + STAILQ_INSERT_AFTER(&arc->all_features, after_finfo, finfo,
> > next_feature);
> > + } else {
> > + if (before_finfo) {
> > + after_finfo = NULL;
> > + STAILQ_FOREACH(temp, &arc->all_features,
> > next_feature) {
> > + if (before_finfo == temp) {
> > + if (after_finfo)
> > + STAILQ_INSERT_AFTER(&arc-
> > >all_features, after_finfo,
> > + finfo,
> > next_feature);
> > + else
> > + STAILQ_INSERT_HEAD(&arc-
> > >all_features, finfo,
> > +
> > next_feature);
> > +
> > + return 0;
> > + }
> > + after_finfo = temp;
> > + }
> > + } else {
> > + STAILQ_INSERT_TAIL(&arc->all_features, finfo,
> > next_feature);
> > + }
> > + }
> > +
> > + return 0;
> > +
> > +finfo_free:
> > + free(finfo);
> > +
> > + return -1;
> > +}
> > +
> > +int
> > +rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char
> > *feature_name,
> > + rte_graph_feature_t *feat)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + uint32_t slot;
> > +
> > + if (!feature_lookup(arc, feature_name, &finfo, &slot)) {
> > + *feat = (rte_graph_feature_t) slot;
> > + return 0;
> > + }
> > +
> > + return -1;
> > +}
> > +
> > +int
> > +rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t index,
> > const char *feature_name,
> > + int is_enable_disable)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + struct rte_graph_feature *gf = NULL;
> > + uint32_t slot;
> > +
> > + /* validate _arc */
> > + if (arc->feature_arc_main != __feature_arc_main) {
> > + graph_err("invalid feature arc: 0x%016" PRIx64,
> > (uint64_t)_arc);
> > + return -EINVAL;
> > + }
> > +
> > + /* validate index */
> > + if (index >= arc->max_indexes) {
> > + graph_err("%s: Invalid provided index: %u >= %u configured",
> > arc->feature_arc_name,
> > + index, arc->max_indexes);
> > + return -1;
> > + }
> > +
> > + /* validate feature_name is already added or not */
> > + if (feature_lookup(arc, feature_name, &finfo, &slot)) {
> > + graph_err("%s: No feature %s added", arc-
> > >feature_arc_name, feature_name);
> > + return -EINVAL;
> > + }
> > +
> > + if (!finfo) {
> > + graph_err("%s: No feature: %s found", arc-
> > >feature_arc_name, feature_name);
> > + return -EINVAL;
> > + }
> > +
> > + /* slot should be in valid range */
> > + if (slot >= arc->max_features) {
> > + graph_err("%s/%s: Invalid free slot %u(max=%u) for feature",
> > arc->feature_arc_name,
> > + feature_name, slot, arc->max_features);
> > + return -EINVAL;
> > + }
> > +
> > + /* slot should be in range of 0 - 63 */
> > + if (slot > (RTE_GRAPH_FEATURE_MAX_PER_ARC - 1)) {
> > + graph_err("%s/%s: Invalid slot: %u", arc->feature_arc_name,
> > + feature_name, slot);
> > + return -EINVAL;
> > + }
> > +
> > + if (finfo->node_index != slot) {
> > + graph_err("%s/%s: feature lookup slot mismatch with finfo
> > index: %u and lookup slot: %u",
> > + arc->feature_arc_name, feature_name, finfo-
> > >node_index, slot);
> > + return -1;
> > + }
> > +
> > + /* Get feature from active list */
> > + gf = __rte_graph_feature_get(arc, slot, ARC_PASSIVE_LIST(arc));
> > + if (gf->this_feature_index != slot) {
> > + graph_err("%s: %s received feature_index: %u does not
> match
> > with saved feature_index: %u",
> > + arc->feature_arc_name, feature_name, slot, gf-
> > >this_feature_index);
> > + return -1;
> > + }
> > +
> > + if (is_enable_disable && (arc->feature_bit_mask_by_index[index] &
> > + RTE_BIT64(slot))) {
> > + graph_err("%s: %s already enabled on index: %u",
> > + arc->feature_arc_name, feature_name, index);
> > + return -1;
> > + }
> > +
> > + if (!is_enable_disable && !arc->runtime_enabled_features) {
> > + graph_err("%s: No feature enabled to disable", arc-
> > >feature_arc_name);
> > + return -1;
> > + }
> > +
> > + if (!is_enable_disable && !(arc->feature_bit_mask_by_index[index] &
> > RTE_BIT64(slot))) {
> > + graph_err("%s: %s not enabled in bitmask for index: %u",
> > + arc->feature_arc_name, feature_name, index);
> > + return -1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void
> > +copy_fastpath_user_data(struct rte_graph_feature_arc *arc, uint16_t
> > dest_list_index,
> > + uint16_t src_list_index)
> > +{
> > + rte_graph_feature_data_t *sgfd = NULL, *dgfd = NULL;
> > + struct rte_graph_feature *sgf = NULL, *dgf = NULL;
> > + uint32_t i, j;
> > +
> > + for (i = 0; i < arc->max_features; i++) {
> > + sgf = __rte_graph_feature_get(arc, i, src_list_index);
> > + dgf = __rte_graph_feature_get(arc, i, dest_list_index);
> > + for (j = 0; j < arc->max_indexes; j++) {
> > + sgfd = rte_graph_feature_data_get(arc, sgf, j);
> > + dgfd = rte_graph_feature_data_get(arc, dgf, j);
> > + dgfd->user_data = sgfd->user_data;
> > + }
> > + }
> > +}
> > +
> > +static void
> > +refill_feature_fastpath_data(struct rte_graph_feature_arc *arc, uint16_t
> > list_index)
> > +{
> > + struct rte_graph_feature_node_list *finfo = NULL, *prev_finfo = NULL;
> > + struct rte_graph_feature_data *gfd = NULL, *prev_gfd = NULL;
> > + struct rte_graph_feature *gf = NULL, *prev_gf = NULL;
> > + rte_graph_feature_list_t *flist = NULL;
> > + uint32_t fi, di, prev_fi;
> > + uint64_t bitmask;
> > + rte_edge_t edge;
> > +
> > + flist = arc->feature_list[list_index];
> > +
> > + for (di = 0; di < arc->max_indexes; di++) {
> > + bitmask = arc->feature_bit_mask_by_index[di];
> > + prev_fi = RTE_GRAPH_FEATURE_INVALID;
> > + /* for each feature set for index, set fast path data */
> > + while (rte_bsf64_safe(bitmask, &fi)) {
> > + gf = __rte_graph_feature_get(arc, fi, list_index);
> > + gfd = rte_graph_feature_data_get(arc, gf, di);
> > + feature_arc_node_info_lookup(arc, fi, &finfo);
> > +
> > + /* If previous feature_index was valid in last loop */
> > + if (prev_fi != RTE_GRAPH_FEATURE_INVALID) {
> > + prev_gf = __rte_graph_feature_get(arc,
> > prev_fi, list_index);
> > + prev_gfd = rte_graph_feature_data_get(arc,
> > prev_gf, di);
> > + /*
> > + * Get edge of previous feature node
> > connecting to this feature node
> > + */
> > + feature_arc_node_info_lookup(arc, prev_fi,
> > &prev_finfo);
> > + if (!get_existing_edge(arc->feature_arc_name,
> > + prev_finfo->feature_node,
> > + finfo->feature_node,
> > &edge)) {
> > + feat_dbg("[%s/%s(%2u)/idx:%2u]:
> > %s[%u] = %s",
> > + arc->feature_arc_name,
> > + prev_finfo->feature_node-
> > >name, prev_fi, di,
> > + prev_finfo->feature_node-
> > >name,
> > + edge, finfo->feature_node-
> > >name);
> > + /* Copy feature index for next
> > iteration*/
> > + gfd->next_edge = edge;
> > + prev_fi = fi;
> > + /*
> > + * Fill current feature as next enabled
> > + * feature to previous one
> > + */
> > + prev_gfd->next_enabled_feature = fi;
> > + } else {
> > + /* Should not fail */
> > + RTE_VERIFY(0);
> > + }
> > + }
> > + /* On first feature edge of the node to be added */
> > + if (fi == rte_bsf64(arc-
> > >feature_bit_mask_by_index[di])) {
> > + if (!get_existing_edge(arc->feature_arc_name,
> > arc->start_node,
> > + finfo->feature_node,
> > + &edge)) {
> > + feat_dbg("[%s/%s/%2u/idx:%2u]: 1st
> > feat %s[%u] = %s",
> > + arc->feature_arc_name,
> > + arc->start_node->name, fi, di,
> > + arc->start_node->name,
> > edge,
> > + finfo->feature_node->name);
> > + /* Copy feature index for next
> > iteration*/
> > + gfd->next_edge = edge;
> > + prev_fi = fi;
> > + /* Set first feature set array for
> > index*/
> > + flist-
> > >first_enabled_feature_by_index[di] = fi;
> > + } else {
> > + /* Should not fail */
> > + RTE_VERIFY(0);
> > + }
> > + }
> > + /* Clear current feature index */
> > + bitmask &= ~RTE_BIT64(fi);
> > + }
> > + }
> > +}
> > +
> > +int
> > +rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t index,
> > const
> > + char *feature_name, int32_t user_data)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + struct rte_graph_feature_data *gfd = NULL;
> > + rte_graph_feature_rt_list_t passive_list;
> > + struct rte_graph_feature *gf = NULL;
> > + uint64_t fp_bitmask;
> > + uint32_t slot;
> > +
> > + if (rte_graph_feature_validate(_arc, index, feature_name, 1))
> > + return -1;
> > +
> > + /** This should not fail as validate() has passed */
> > + if (feature_lookup(arc, feature_name, &finfo, &slot))
> > + RTE_VERIFY(0);
> > +
> > + if (!arc->runtime_enabled_features)
> > + prepare_feature_arc(arc);
> > +
> > + passive_list = ARC_PASSIVE_LIST(arc);
> > +
> > + gf = __rte_graph_feature_get(arc, slot, passive_list);
> > + gfd = rte_graph_feature_data_get(arc, gf, index);
> > +
> > + feat_dbg("%s/%s: Enabling feature on list: %u for index: %u at feature
> > slot %u",
> > + arc->feature_arc_name, feature_name, passive_list, index,
> > slot);
> > +
> > + /* Reset feature list */
> > + feature_arc_list_reset(arc, passive_list);
> > +
> > + /* Copy user-data */
> > + copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
> > +
> > + /* Set current user-data */
> > + gfd->user_data = user_data;
> > +
> > + /* Set bitmask in control path bitmask */
> > + rte_bit_relaxed_set64(rte_graph_uint_cast(slot), &arc-
> > >feature_bit_mask_by_index[index]);
> > + refill_feature_fastpath_data(arc, passive_list);
> > +
> > + /* Set fast path enable bitmask */
> > + fp_bitmask = __atomic_load_n(&arc-
> > >feature_enable_bitmask[passive_list], __ATOMIC_RELAXED);
> > + fp_bitmask |= RTE_BIT64(slot);
> > + __atomic_store(&arc->feature_enable_bitmask[passive_list],
> > &fp_bitmask, __ATOMIC_RELAXED);
> > +
> > + /* Slow path updates */
> > + arc->runtime_enabled_features++;
> > +
> > + /* Increase feature node info reference count */
> > + finfo->ref_count++;
> > +
> > + /* Store release semantics for active_list update */
> > + __atomic_store(&arc->active_feature_list, &passive_list,
> > __ATOMIC_RELEASE);
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t index,
> > const char *feature_name)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + struct rte_graph_feature_data *gfd = NULL;
> > + struct rte_graph_feature_node_list *finfo = NULL;
> > + rte_graph_feature_rt_list_t passive_list;
> > + struct rte_graph_feature *gf = NULL;
> > + uint32_t slot;
> > +
> > + if (rte_graph_feature_validate(_arc, index, feature_name, 0))
> > + return -1;
> > +
> > + if (feature_lookup(arc, feature_name, &finfo, &slot))
> > + return -1;
> > +
> > + passive_list = ARC_PASSIVE_LIST(arc);
> > +
> > + gf = __rte_graph_feature_get(arc, slot, passive_list);
> > + gfd = rte_graph_feature_data_get(arc, gf, index);
> > +
> > + feat_dbg("%s/%s: Disabling feature for index: %u at feature slot %u",
> > arc->feature_arc_name,
> > + feature_name, index, slot);
> > +
> > + rte_bit_relaxed_clear64(rte_graph_uint_cast(slot), &arc-
> > >feature_bit_mask_by_index[index]);
> > +
> > + /* Set fast path enable bitmask */
> > + arc->feature_enable_bitmask[passive_list] &= ~(RTE_BIT64(slot));
> > +
> > + /* Reset feature list */
> > + feature_arc_list_reset(arc, passive_list);
> > +
> > + /* Copy user-data */
> > + copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
> > +
> > + /* Reset current user-data */
> > + gfd->user_data = ~0;
> > +
> > + refill_feature_fastpath_data(arc, passive_list);
> > +
> > + finfo->ref_count--;
> > + arc->runtime_enabled_features--;
> > +
> > + /* Store release semantics for active_list update */
> > + __atomic_store(&arc->active_feature_list, &passive_list,
> > __ATOMIC_RELEASE);
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> > + struct rte_graph_feature_node_list *node_info = NULL;
> > +
> > + while (!STAILQ_EMPTY(&arc->all_features)) {
> > + node_info = STAILQ_FIRST(&arc->all_features);
> > + STAILQ_REMOVE_HEAD(&arc->all_features, next_feature);
> > + free(node_info);
> > + }
> > + feature_arc_list_destroy(arc->feature_list[0]);
> > + feature_arc_list_destroy(arc->feature_list[1]);
> > + rte_free(arc->features[0]);
> > + rte_free(arc->features[1]);
> > +
> > + dm->feature_arcs[arc->feature_arc_index] =
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER;
> > +
> > + rte_free(arc);
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_cleanup(void)
> > +{
> > + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> > + uint32_t iter;
> > +
> > + if (!__feature_arc_main)
> > + return -1;
> > +
> > + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> > + if (dm->feature_arcs[iter] ==
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> > + continue;
> > +
> > + rte_graph_feature_arc_destroy((rte_graph_feature_arc_t)dm-
> > >feature_arcs[iter]);
> > + }
> > + free(dm);
> > +
> > + __feature_arc_main = NULL;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_lookup_by_name(const char *arc_name,
> > rte_graph_feature_arc_t *_arc)
> > +{
> > + rte_graph_feature_arc_main_t *dm = __feature_arc_main;
> > + struct rte_graph_feature_arc *arc = NULL;
> > + uint32_t iter;
> > +
> > + if (!__feature_arc_main)
> > + return -1;
> > +
> > + for (iter = 0; iter < dm->max_feature_arcs; iter++) {
> > + if (dm->feature_arcs[iter] ==
> > RTE_GRAPH_FEATURE_ARC_INITIALIZER)
> > + continue;
> > +
> > + arc = rte_graph_feature_arc_get(dm->feature_arcs[iter]);
> > +
> > + if (strstr(arc_name, arc->feature_arc_name)) {
> > + if (_arc)
> > + *_arc = (rte_graph_feature_arc_t)arc;
> > + return 0;
> > + }
> > + }
> > +
> > + return -1;
> > +}
> > +
> > +int
> > +rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t
> > _arc)
> > +{
> > + struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
> > +
> > + return arc->runtime_enabled_features;
> > +}
> > +
> > +
> > diff --git a/lib/graph/meson.build b/lib/graph/meson.build
> > index 0cb15442ab..d916176fb7 100644
> > --- a/lib/graph/meson.build
> > +++ b/lib/graph/meson.build
> > @@ -14,11 +14,13 @@ sources = files(
> > 'graph_debug.c',
> > 'graph_stats.c',
> > 'graph_populate.c',
> > + 'graph_feature_arc.c',
> > 'graph_pcap.c',
> > 'rte_graph_worker.c',
> > 'rte_graph_model_mcore_dispatch.c',
> > )
> > headers = files('rte_graph.h', 'rte_graph_worker.h')
> > +headers += files('rte_graph_feature_arc.h',
> 'rte_graph_feature_arc_worker.h')
> > indirect_headers += files(
> > 'rte_graph_model_mcore_dispatch.h',
> > 'rte_graph_model_rtc.h',
> > diff --git a/lib/graph/rte_graph_feature_arc.h
> > b/lib/graph/rte_graph_feature_arc.h
> > new file mode 100644
> > index 0000000000..e3bf4eb73d
> > --- /dev/null
> > +++ b/lib/graph/rte_graph_feature_arc.h
> > @@ -0,0 +1,373 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2024 Marvell International Ltd.
> > + */
> > +
> > +#ifndef _RTE_GRAPH_FEATURE_ARC_H_
> > +#define _RTE_GRAPH_FEATURE_ARC_H_
> > +
> > +#include <assert.h>
> > +#include <errno.h>
> > +#include <signal.h>
> > +#include <stddef.h>
> > +#include <stdint.h>
> > +#include <stdio.h>
> > +#include <stdlib.h>
> > +#include <string.h>
> > +
> > +#include <rte_common.h>
> > +#include <rte_compat.h>
> > +#include <rte_debug.h>
> > +#include <rte_graph.h>
> > +#include <rte_graph_worker.h>
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +/**
> > + * @file
> > + *
> > + * rte_graph_feature_arc.h
> > + *
> > + * Define APIs and structures/variables with respect to feature arc
> > + *
> > + * - Feature arc(s)
> > + * - Feature(s)
> > + *
> > + * A feature arc represents an ordered list of features/protocol-nodes at a
> > + * given networking layer. Feature arc provides a high level abstraction to
> > + * connect various *rte_graph* nodes, designated as *feature nodes*, and
> > + * allowing steering of packets across these feature nodes fast path
> > processing
> > + * in a generic manner. In a typical network stack, often a protocol or
> feature
> > + * must be first enabled on a given interface, before any packet is steered
> > + * towards it for feature processing. For eg: incoming IPv4 packets are sent
> to
> > + * routing sub-system only after a valid IPv4 address is assigned to the
> > + * received interface. In other words, often packets needs to be steered
> across
> > + * features not based on the packet content but based on whether a
> feature is
> > + * enable or disable on a given incoming/outgoing interface. Feature arc
> > + * provides mechanism to enable/disable feature(s) on each interface at
> > runtime
> > + * and allow seamless packet steering across runtime enabled feature
> nodes
> > in
> > + * fast path.
> > + *
> > + * Feature arc also provides a way to steer packets from standard nodes to
> > + * custom/user-defined *feature nodes* without any change in standard
> > node's
> > + * fast path functions
> > + *
> > + * On a given interface multiple feature(s) might be enabled in a particular
> > + * feature arc. For instance, both "ipv4-output" and "IPsec policy output"
> > + * features may be enabled on "eth0" interface in "L3-output" feature arc.
> > + * Similarly, "ipv6-output" and "ipsec-output" may be enabled on "eth1"
> > + * interface in same "L3-output" feature arc.
> > + *
> > + * When multiple features are present in a given feature arc, its imperative
> > + * to allow each feature processing in a particular sequential order. For
> > + * instance, in "L3-input" feature arc it may be required to run "IPsec
> > + * input" feature first, for packet decryption, before "ip-lookup". So a
> > + * sequential order must be maintained among features present in a
> feature
> > arc.
> > + *
> > + * Features are enabled/disabled multiple times at runtime to some or all
> > + * available interfaces present in the system. Features can be
> > enabled/disabled
> > + * even after @b rte_graph_create() is called. Enable/disabling features on
> > one
> > + * interface is independent of other interface.
> > + *
> > + * A given feature might consume packet (if it's configured to consume) or
> > may
> > + * forward it to next enabled feature. For instance, "IPsec input" feature
> may
> > + * consume/drop all packets with "Protect" policy action while all packets
> with
> > + * policy action as "Bypass" may be forwarded to next enabled feature
> (with
> > in
> > + * same feature arc)
> > + *
> > + * This library facilitates rte graph based applications to steer packets in
> > + * fast path to different feature nodes with-in a feature arc and support all
> > + * functionalities described above
> > + *
> > + * In order to use feature-arc APIs, applications needs to do following in
> > + * control path:
> > + * - Initialize feature arc library via rte_graph_feature_arc_init()
> > + * - Create feature arc via rte_graph_feature_arc_create()
> > + * - *Before calling rte_graph_create()*, features must be added to feature-
> > arc
> > + * via rte_graph_feature_add(). rte_graph_feature_add() allows adding
> > + * features in a sequential order with "runs_after" and "runs_before"
> > + * constraints.
> > + * - Post rte_graph_create(), features can be enabled/disabled at runtime
> on
> > + * any interface via
> rte_graph_feature_enable()/rte_graph_feature_disable()
> > + * - Feature arc can be destroyed via rte_graph_feature_arc_destroy()
> > + *
> > + * In fast path, APIs are provided to steer packets towards feature path
> from
> > + * - start_node (provided as an argument to
> rte_graph_feature_arc_create())
> > + * - feature nodes (which are added via rte_graph_feature_add())
> > + *
> > + * For typical steering of packets across feature nodes, application required
> > + * to know "rte_edges" which are saved in feature data object. Feature
> data
> > + * object is unique for every interface per feature with in a feature arc.
> > + *
> > + * When steering packets from start_node to feature node:
> > + * - rte_graph_feature_arc_first_feature_get() provides first enabled
> feature.
> > + * - Next rte_edge from start_node to first enabled feature can be obtained
> > via
> > + * rte_graph_feature_arc_feature_set()
> > + *
> > + * rte_mbuf can carry [current feature, index] from start_node of an arc to
> > other
> > + * feature nodes
> > + *
> > + * In feature node, application can get 32-bit user_data
> > + * via_rte_graph_feature_user_data_get() which is provided in
> > + * rte_graph_feature_enable(). User data can hold feature specific cookie
> like
> > + * IPsec policy database index (if more than one are supported)
> > + *
> > + * If feature node is not consuming packet, next enabled feature and next
> > + * rte_edge can be obtained via rte_graph_feature_arc_next_feature_get()
> > + *
> > + * It is application responsibility to ensure that at-least *last feature*(or
> sink
> > + * feature) must be enabled from where packet can exit feature-arc path, if
> > + * *NO* intermediate feature is consuming the packet and it has reached
> till
> > + * the end of feature arc path
> > + *
> > + * Synchronization among cores
> > + * ---------------------------
> > + * Subsequent calls to rte_graph_feature_enable() is allowed while worker
> > cores
> > + * are processing in rte_graph_walk() loop. However, for
> > + * rte_graph_feature_disable() application must use RCU based
> > synchronization
> > + */
> > +
> > +/**< Initializer value for rte_graph_feature_arc_t */
> > +#define RTE_GRAPH_FEATURE_ARC_INITIALIZER
> > ((rte_graph_feature_arc_t)UINT64_MAX)
> > +
> > +/** Max number of features supported in a given feature arc */
> > +#define RTE_GRAPH_FEATURE_MAX_PER_ARC 64
> > +
> > +/** Length of feature arc name */
> > +#define RTE_GRAPH_FEATURE_ARC_NAMELEN RTE_NODE_NAMESIZE
> > +
> > +/** @internal */
> > +#define rte_graph_feature_cast(x) ((rte_graph_feature_t)x)
> > +
> > +/**< Initializer value for rte_graph_feature_arc_t */
> > +#define RTE_GRAPH_FEATURE_INVALID
> > rte_graph_feature_cast(UINT8_MAX)
> > +
> > +/** rte_graph feature arc object */
> > +typedef uint64_t rte_graph_feature_arc_t;
> > +
> > +/** rte_graph feature object */
> > +typedef uint8_t rte_graph_feature_t;
> > +
> > +/** runtime active feature list index with in feature arc*/
> > +typedef uint8_t rte_graph_feature_rt_list_t;
> > +
> > +/** per feature arc monotonically increasing counter to synchronize fast
> path
> > APIs */
> > +typedef uint16_t rte_graph_feature_counter_t;
> > +
> > +/**
> > + * Initialize feature arc subsystem
> > + *
> > + * @param max_feature_arcs
> > + * Maximum number of feature arcs required to be supported
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_arc_init(int max_feature_arcs);
> > +
> > +/**
> > + * Create a feature arc
> > + *
> > + * @param feature_arc_name
> > + * Feature arc name with max length of @ref
> > RTE_GRAPH_FEATURE_ARC_NAMELEN
> > + * @param max_features
> > + * Maximum number of features to be supported in this feature arc
> > + * @param max_indexes
> > + * Maximum number of interfaces/ports/indexes to be supported
> > + * @param start_node
> > + * Base node where this feature arc's features are checked in fast path
> > + * @param[out] _arc
> > + * Feature arc object
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_arc_create(const char *feature_arc_name, int
> > max_features, int max_indexes,
> > + struct rte_node_register *start_node,
> > + rte_graph_feature_arc_t *_arc);
> > +
> > +/**
> > + * Get feature arc object with name
> > + *
> > + * @param arc_name
> > + * Feature arc name provided to successful @ref
> > rte_graph_feature_arc_create
> > + * @param[out] _arc
> > + * Feature arc object returned
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure.
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_arc_lookup_by_name(const char *arc_name,
> > rte_graph_feature_arc_t *_arc);
> > +
> > +/**
> > + * Add a feature to already created feature arc. For instance
> > + *
> > + * 1. Add first feature node: "ipv4-input" to input arc
> > + * rte_graph_feature_add(ipv4_input_arc, "ipv4-input", NULL, NULL);
> > + *
> > + * 2. Add "ipsec-input" feature node after "ipv4-input" node
> > + * rte_graph_feature_add(ipv4_input_arc, "ipsec-input", "ipv4-input",
> > NULL);
> > + *
> > + * 3. Add "ipv4-pre-classify-input" node before "ipv4-input" node
> > + * rte_graph_feature_add(ipv4_input_arc, "ipv4-pre-classify-input"",
> NULL,
> > "ipv4-input");
> > + *
> > + * 4. Add "acl-classify-input" node after ipv4-input but before ipsec-input
> > + * rte_graph_feature_add(ipv4_input_arc, "acl-classify-input", "ipv4-
> input",
> > "ipsec-input");
> > + *
> > + * @param _arc
> > + * Feature arc handle returned from @ref rte_graph_feature_arc_create()
> > + * @param feature_node
> > + * Graph node representing feature. On success, feature_node is
> next_node
> > of
> > + * feature_arc->start_node
> > + * @param runs_after
> > + * Add this feature_node after already added "runs_after". Creates
> > + * start_node -> runs_after -> this_feature sequence
> > + * @param runs_before
> > + * Add this feature_node before already added "runs_before". Creates
> > + * start_node -> this_feature -> runs_before sequence
> > + *
> > + * <I> Must be called before rte_graph_create() </I>
> > + * <I> rte_graph_feature_add() is not allowed after call to
> > + * rte_graph_feature_enable() so all features must be added before they
> can
> > be
> > + * enabled </I>
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct
> > rte_node_register *feature_node,
> > + const char *runs_after, const char *runs_before);
> > +
> > +/**
> > + * Enable feature within a feature arc
> > + *
> > + * Must be called after @b rte_graph_create().
> > + *
> > + * @param _arc
> > + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> > @ref
> > + * rte_graph_feature_arc_lookup_by_name
> > + * @param index
> > + * Application specific index. Can be corresponding to
> interface_id/port_id
> > etc
> > + * @param feature_name
> > + * Name of the node which is already added via @ref
> rte_graph_feature_add
> > + * @param user_data
> > + * Application specific data which is retrieved in fast path
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t
> index,
> > const char *feature_name,
> > + int32_t user_data);
> > +
> > +/**
> > + * Validate whether subsequent enable/disable feature would succeed or
> not.
> > + * API is thread-safe
> > + *
> > + * @param _arc
> > + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> > @ref
> > + * rte_graph_feature_arc_lookup_by_name
> > + * @param index
> > + * Application specific index. Can be corresponding to
> interface_id/port_id
> > etc
> > + * @param feature_name
> > + * Name of the node which is already added via @ref
> rte_graph_feature_add
> > + * @param is_enable_disable
> > + * If 1, validate whether subsequent @ref rte_graph_feature_enable
> would
> > pass or not
> > + * If 0, validate whether subsequent @ref rte_graph_feature_disable
> would
> > pass or not
> > + *
> > + * @return
> > + * 0: Subsequent enable/disable API would pass
> > + * <0: Subsequent enable/disable API would not pass
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t
> index,
> > + const char *feature_name, int is_enable_disable);
> > +
> > +/**
> > + * Disable already enabled feature within a feature arc
> > + *
> > + * Must be called after @b rte_graph_create(). API is *NOT* Thread-safe
> > + *
> > + * @param _arc
> > + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> > @ref
> > + * rte_graph_feature_arc_lookup_by_name
> > + * @param index
> > + * Application specific index. Can be corresponding to
> interface_id/port_id
> > etc
> > + * @param feature_name
> > + * Name of the node which is already added via @ref
> rte_graph_feature_add
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t
> index,
> > + const char *feature_name);
> > +
> > +/**
> > + * Get rte_graph_feature_t object from feature name
> > + *
> > + * @param arc
> > + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> > @ref
> > + * rte_graph_feature_arc_lookup_by_name
> > + * @param feature_name
> > + * Feature name provided to @ref rte_graph_feature_add
> > + * @param[out] feature
> > + * Feature object
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char
> > *feature_name,
> > + rte_graph_feature_t *feature);
> > +
> > +/**
> > + * Delete feature_arc object
> > + *
> > + * @param _arc
> > + * Feature arc object returned by @ref rte_graph_feature_arc_create or
> > @ref
> > + * rte_graph_feature_arc_lookup_by_name
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc);
> > +
> > +/**
> > + * Cleanup all feature arcs
> > + *
> > + * @return
> > + * 0: Success
> > + * <0: Failure
> > + */
> > +__rte_experimental
> > +int rte_graph_feature_arc_cleanup(void);
> > +
> > +/**
> > + * Slow path API to know how many features are currently enabled within a
> > featur-arc
> > + *
> > + * @param _arc
> > + * Feature arc object
> > + *
> > + * @return: Number of enabled features
> > + */
> > +__rte_experimental
> > +int
> rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t
> > _arc);
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif
> > diff --git a/lib/graph/rte_graph_feature_arc_worker.h
> > b/lib/graph/rte_graph_feature_arc_worker.h
> > new file mode 100644
> > index 0000000000..6019d74853
> > --- /dev/null
> > +++ b/lib/graph/rte_graph_feature_arc_worker.h
> > @@ -0,0 +1,548 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2024 Marvell International Ltd.
> > + */
> > +
> > +#ifndef _RTE_GRAPH_FEATURE_ARC_WORKER_H_
> > +#define _RTE_GRAPH_FEATURE_ARC_WORKER_H_
> > +
> > +#include <stddef.h>
> > +#include <rte_graph_feature_arc.h>
> > +#include <rte_bitops.h>
> > +
> > +/**
> > + * @file
> > + *
> > + * rte_graph_feature_arc_worker.h
> > + *
> > + * Defines fast path structure
> > + */
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +/** @internal
> > + *
> > + * Slow path feature node info list
> > + */
> > +struct rte_graph_feature_node_list {
> > + /** Next feature */
> > + STAILQ_ENTRY(rte_graph_feature_node_list) next_feature;
> > +
> > + /** node representing feature */
> > + struct rte_node_register *feature_node;
> > +
> > + /** How many indexes/interfaces using this feature */
> > + int32_t ref_count;
> > +
> > + /* node_index in list (after feature_enable())*/
> > + uint32_t node_index;
> > +
> > + /** Back pointer to feature arc */
> > + void *feature_arc;
> > +
> > + /** rte_edge_t to this feature node from feature_arc->start_node */
> > + rte_edge_t edge_to_this_feature;
> > +};
> > +
> > +/**
> > + * Fast path holding rte_edge_t and next enabled feature for an feature
> > + */
> > +typedef struct __rte_packed rte_graph_feature_data {
> > + /* next node to which current mbuf should go*/
> > + rte_edge_t next_edge;
> > +
> > + /* next enabled feature on this arc for current index */
> > + union {
> > + uint16_t reserved;
> > + struct {
> > + rte_graph_feature_t next_enabled_feature;
> > + };
> > + };
> > +
> > + /* user_data */
> > + int32_t user_data;
> > +} rte_graph_feature_data_t;
> > +
> > +/**
> > + * Fast path feature structure. Holds re_graph_feature_data_t per index
> > + */
> > +struct __rte_cache_aligned rte_graph_feature {
> > + uint16_t this_feature_index;
> > +
> > + /* Array of size arc->feature_data_size
> > + * [data-index-0][data-index-1]...
> > + * Each index of size: sizeof(rte_graph_feature_data_t)
> > + */
> > + uint8_t feature_data_by_index[];
> > +};
> > +
> > +/**
> > + * fast path cache aligned feature list holding all features
> > + * There are two feature lists: active, passive
> > + *
> > + * Fast APIs works on active list while control plane updates passive list
> > + * A atomic update to arc->active_feature_list is done to switch between
> > active
> > + * and passive
> > + */
> > +typedef struct __rte_cache_aligned rte_graph_feature_list {
> > + /**
> > + * fast path array holding per_feature data.
> > + * Duplicate entry as feature-arc also hold this pointer
> > + * arc->features[]
> > + *
> > + *<-------------feature-0 ---------><CEIL><---------feature-1 --------------
> > >...
> > + *[index-0][index-1]...[max_index-1] [index-0][index-1]
> > ...[max_index-1]...
> > + */
> > + struct rte_graph_feature *indexed_by_features;
> > + /*
> > + * fast path array holding first enabled feature per index
> > + * (Required in start_node. In non start_node, mbuf can hold next
> > enabled
> > + * feature)
> > + */
> > + rte_graph_feature_t first_enabled_feature_by_index[];
> > +} rte_graph_feature_list_t;
> > +
> > +/**
> > + * rte_graph feature arc object
> > + *
> > + * A feature-arc can only hold RTE_GRAPH_FEATURE_MAX_PER_ARC
> features
> > but no
> > + * limit to interface index
> > + *
> > + * Representing a feature arc holding all features which are
> enabled/disabled
> > + * on any interfaces
> > + */
> > +struct __rte_cache_aligned rte_graph_feature_arc {
> > + /* First 64B is fast path variables */
> > + RTE_MARKER fast_path_variables;
> > +
> > + /** runtime active feature list */
> > + rte_graph_feature_rt_list_t active_feature_list;
> > +
> > + /* Actual Size of feature_list0 */
> > + uint16_t feature_list_size;
> > +
> > + /**
> > + * Size each feature in fastpath.
> > + * sizeof(arc->active_list->indexed_by_feature[0])
> > + */
> > + uint16_t feature_size;
> > +
> > + /* Size of arc->max_index * sizeof(rte_graph_feature_data_t) */
> > + uint16_t feature_data_size;
> > +
> > + /**
> > + * Fast path bitmask indicating if a feature is enabled or not Number
> > + * of bits: RTE_GRAPH_FEATURE_MAX_PER_ARC
> > + */
> > + uint64_t feature_enable_bitmask[2];
> > + rte_graph_feature_list_t *feature_list[2];
> > + struct rte_graph_feature *features[2];
> > +
> > + /** index in feature_arc_main */
> > + uint16_t feature_arc_index;
> > +
> > + uint16_t reserved[3];
> > +
> > + /** Slow path variables follows*/
> > + RTE_MARKER slow_path_variables;
> > +
> > + /** feature arc name */
> > + char feature_arc_name[RTE_GRAPH_FEATURE_ARC_NAMELEN];
> > +
> > + /** All feature lists */
> > + STAILQ_HEAD(, rte_graph_feature_node_list) all_features;
> > +
> > + uint32_t runtime_enabled_features;
> > +
> > + /** Back pointer to feature_arc_main */
> > + void *feature_arc_main;
> > +
> > + /* start_node */
> > + struct rte_node_register *start_node;
> > +
> > + /* maximum number of features supported by this arc */
> > + uint32_t max_features;
> > +
> > + /* maximum number of index supported by this arc */
> > + uint32_t max_indexes;
> > +
> > + /* Slow path bit mask per feature per index */
> > + uint64_t feature_bit_mask_by_index[];
> > +};
> > +
> > +/** Feature arc main */
> > +typedef struct feature_arc_main {
> > + /** number of feature arcs created by application */
> > + uint32_t num_feature_arcs;
> > +
> > + /** max features arcs allowed */
> > + uint32_t max_feature_arcs;
> > +
> > + /** feature arcs */
> > + rte_graph_feature_arc_t feature_arcs[];
> > +} rte_graph_feature_arc_main_t;
> > +
> > +/** @internal Get feature arc pointer from object */
> > +#define rte_graph_feature_arc_get(arc) ((struct rte_graph_feature_arc
> *)arc)
> > +
> > +extern rte_graph_feature_arc_main_t *__feature_arc_main;
> > +
> > +/**
> > + * API to know if feature is valid or not
> > + */
> > +
> > +static __rte_always_inline int
> > +rte_graph_feature_is_valid(rte_graph_feature_t feature)
> > +{
> > + return (feature != RTE_GRAPH_FEATURE_INVALID);
> > +}
> > +
> > +/**
> > + * Get rte_graph_feature object with no checks
> > + *
> > + * @param arc
> > + * Feature arc pointer
> > + * @param feature
> > + * Feature index
> > + * @param feature_list
> > + * active feature list retrieved from
> > rte_graph_feature_arc_has_any_feature()
> > + * or rte_graph_feature_arc_has_feature()
> > + *
> > + * @return
> > + * Internal feature object.
> > + */
> > +static __rte_always_inline struct rte_graph_feature *
> > +__rte_graph_feature_get(struct rte_graph_feature_arc *arc,
> > rte_graph_feature_t feature,
> > + const rte_graph_feature_rt_list_t feature_list)
> > +{
> > + return ((struct rte_graph_feature *)((uint8_t *)(arc-
> > >features[feature_list] +
> > + (feature * arc->feature_size))));
> > +}
> > +
> > +/**
> > + * Get rte_graph_feature object for a given interface/index from feature arc
> > + *
> > + * @param arc
> > + * Feature arc pointer
> > + * @param feature
> > + * Feature index
> > + *
> > + * @return
> > + * Internal feature object.
> > + */
> > +static __rte_always_inline struct rte_graph_feature *
> > +rte_graph_feature_get(struct rte_graph_feature_arc *arc,
> > rte_graph_feature_t feature)
> > +{
> > + RTE_VERIFY(feature < arc->max_features);
> > +
> > + if (likely(rte_graph_feature_is_valid(feature)))
> > + return __rte_graph_feature_get(arc, feature, arc-
> > >active_feature_list);
> > +
> > + return NULL;
> > +}
> > +
> > +static __rte_always_inline rte_graph_feature_data_t *
> > +__rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct
> > rte_graph_feature *feature,
> > + uint8_t index)
> > +{
> > + RTE_SET_USED(arc);
> > + return ((rte_graph_feature_data_t *)(feature->feature_data_by_index
> > +
> > + (index *
> > sizeof(rte_graph_feature_data_t))));
> > +}
> > +
> > +/**
> > + * Get rte_graph feature data object for a index in feature
> > + *
> > + * @param arc
> > + * feature arc
> > + * @param feature
> > + * Pointer to feature object
> > + * @param index
> > + * Index of feature maintained in slow path linked list
> > + *
> > + * @return
> > + * Valid feature data
> > + */
> > +static __rte_always_inline rte_graph_feature_data_t *
> > +rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct
> > rte_graph_feature *feature,
> > + uint8_t index)
> > +{
> > + if (likely(index < arc->max_indexes))
> > + return __rte_graph_feature_data_get(arc, feature, index);
> > +
> > + RTE_VERIFY(0);
> > +}
> > +
> > +/**
> > + * Fast path API to check if any feature enabled on a feature arc
> > + * Typically from arc->start_node process function
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param[out] plist
> > + * Pointer to runtime active feature list which needs to be provided to
> other
> > + * fast path APIs
> > + *
> > + * @return
> > + * 0: If no feature enabled
> > + * Non-Zero: Bitmask of features enabled. plist is valid
> > + *
> > + */
> > +static __rte_always_inline uint64_t
> > +rte_graph_feature_arc_has_any_feature(struct rte_graph_feature_arc *arc,
> > + rte_graph_feature_rt_list_t *plist)
> > +{
> > + *plist = __atomic_load_n(&arc->active_feature_list,
> > __ATOMIC_RELAXED);
> > +
> > + return (__atomic_load_n(arc->feature_enable_bitmask +
> > (uint8_t)*plist,
> > + __ATOMIC_RELAXED));
> > +}
> > +
> > +/**
> > + * Fast path API to check if provided feature is enabled on any
> interface/index
> > + * or not
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param feature
> > + * Input rte_graph_feature_t that needs to be checked
> > + * @param[out] plist
> > + * Returns active list to caller which needs to be provided to other fast
> path
> > + * APIs
> > + *
> > + * @return
> > + * 1: If feature is enabled in arc
> > + * 0: If feature is not enabled in arc
> > + */
> > +static __rte_always_inline int
> > +rte_graph_feature_arc_has_feature(struct rte_graph_feature_arc *arc,
> > + rte_graph_feature_t feature,
> > + rte_graph_feature_rt_list_t *plist)
> > +{
> > + uint64_t bitmask = RTE_BIT64(feature);
> > +
> > + *plist = __atomic_load_n(&arc->active_feature_list,
> > __ATOMIC_RELAXED);
> > +
> > + return (bitmask & __atomic_load_n(arc->feature_enable_bitmask +
> > (uint8_t)*plist,
> > + __ATOMIC_RELAXED));
> > +}
> > +
> > +/**
> > + * Prefetch feature arc fast path cache line
> > + *
> > + * @param arc
> > + * RTE_GRAPH feature arc object
> > + */
> > +static __rte_always_inline void
> > +rte_graph_feature_arc_prefetch(struct rte_graph_feature_arc *arc)
> > +{
> > + rte_prefetch0((void *)&arc->fast_path_variables);
> > +}
> > +
> > +/**
> > + * Prefetch feature related fast path cache line
> > + *
> > + * @param arc
> > + * RTE_GRAPH feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > rte_graph_feature_arc_has_any_feature();
> > + * @param feature
> > + * Pointer to feature object
> > + */
> > +static __rte_always_inline void
> > +rte_graph_feature_arc_feature_prefetch(struct rte_graph_feature_arc *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + rte_graph_feature_t feature)
> > +{
> > + /* feature cache line */
> > + if (likely(rte_graph_feature_is_valid(feature)))
> > + rte_prefetch0((void *)__rte_graph_feature_get(arc, feature,
> > list));
> > +}
> > +
> > +/**
> > + * Prefetch feature data upfront. Perform sanity
> > + *
> > + * @param _arc
> > + * RTE_GRAPH feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > rte_graph_feature_arc_has_any_feature();
> > + * @param feature
> > + * Pointer to feature object returned from @ref
> > + * rte_graph_feature_arc_first_feature_get()
> > + * @param index
> > + * Interface/index
> > + */
> > +static __rte_always_inline void
> > +rte_graph_feature_arc_data_prefetch(struct rte_graph_feature_arc *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + rte_graph_feature_t feature, uint32_t
> index)
> > +{
> > + if (likely(rte_graph_feature_is_valid(feature)))
> > + rte_prefetch0((void *)((uint8_t *)arc->features[list] +
> > + offsetof(struct rte_graph_feature,
> > feature_data_by_index) +
> > + (index * sizeof(rte_graph_feature_data_t))));
> > +}
> > +
> > +/**
> > + * Fast path API to get first enabled feature on interface index
> > + * Typically required in arc->start_node so that from returned feature,
> > + * feature-data can be retrieved to steer packets
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > + * rte_graph_feature_arc_has_any_feature() or
> > + * rte_graph_feature_arc_has_feature()
> > + * @param index
> > + * Interface Index
> > + * @param[out] feature
> > + * Pointer to rte_graph_feature_t.
> > + *
> > + * @return
> > + * 0. Success. feature field is valid
> > + * 1. Failure. feature field is invalid
> > + *
> > + */
> > +static __rte_always_inline int
> > +rte_graph_feature_arc_first_feature_get(struct rte_graph_feature_arc *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + uint32_t index,
> > + rte_graph_feature_t *feature)
> > +{
> > + struct rte_graph_feature_list *feature_list = arc->feature_list[list];
> > +
> > + *feature = feature_list->first_enabled_feature_by_index[index];
> > +
> > + return rte_graph_feature_is_valid(*feature);
> > +}
> > +
> > +/**
> > + * Fast path API to get next enabled feature on interface index with
> provided
> > + * input feature
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > + * rte_graph_feature_arc_has_any_feature() or
> > + * @param index
> > + * Interface Index
> > + * @param[in][out] feature
> > + * Pointer to rte_graph_feature_t. Input feature set to next enabled
> feature
> > + * after success return
> > + * @param[out] next_edge
> > + * Edge from current feature to next feature. Valid only if next feature is
> > valid
> > + *
> > + * @return
> > + * 0. Success. next enabled feature is valid.
> > + * 1. Failure. next enabled feature is invalid
> > + */
> > +static __rte_always_inline int
> > +rte_graph_feature_arc_next_feature_get(struct rte_graph_feature_arc
> *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + uint32_t index,
> > + rte_graph_feature_t *feature,
> > + rte_edge_t *next_edge)
> > +{
> > + rte_graph_feature_data_t *feature_data = NULL;
> > + struct rte_graph_feature *f = NULL;
> > +
> > + if (likely(rte_graph_feature_is_valid(*feature))) {
> > + f = __rte_graph_feature_get(arc, *feature, list);
> > + feature_data = rte_graph_feature_data_get(arc, f, index);
> > + *feature = feature_data->next_enabled_feature;
> > + *next_edge = feature_data->next_edge;
> > + return (*feature == RTE_GRAPH_FEATURE_INVALID);
> > + }
> > +
> > + return 1;
> > +}
> > +
> > +/**
> > + * Set fields with respect to first enabled feature in an arc and return edge
> > + * Typically returned feature and interface index must be saved in
> rte_mbuf
> > + * structure to pass this information to next feature node
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > rte_graph_feature_arc_has_any_feature();
> > + * @param index
> > + * Index (of interface)
> > + * @param[out] gf
> > + * Pointer to rte_graph_feature_t. Valid if API returns Success
> > + * @param[out] edge
> > + * Edge to steer packet from arc->start_node to first enabled feature. Valid
> > + * only if API returns Success
> > + *
> > + * @return
> > + * 0: If valid feature is set by API
> > + * 1: If valid feature is NOT set by API
> > + */
> > +static __rte_always_inline rte_graph_feature_t
> > +rte_graph_feature_arc_feature_set(struct rte_graph_feature_arc *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + uint32_t index,
> > + rte_graph_feature_t *gf,
> > + rte_edge_t *edge)
> > +{
> > + struct rte_graph_feature_list *feature_list = arc->feature_list[list];
> > + struct rte_graph_feature_data *feature_data = NULL;
> > + struct rte_graph_feature *feature = NULL;
> > + rte_graph_feature_t f;
> > +
> > + /* reset */
> > + *gf = RTE_GRAPH_FEATURE_INVALID;
> > + f = feature_list->first_enabled_feature_by_index[index];
> > +
> > + if (unlikely(rte_graph_feature_is_valid(f))) {
> > + feature = __rte_graph_feature_get(arc, f, list);
> > + feature_data = rte_graph_feature_data_get(arc, feature,
> > index);
> > + *gf = f;
> > + *edge = feature_data->next_edge;
> > + return 0;
> > + }
> > +
> > + return 1;
> > +}
> > +
> > +/**
> > + * Get user data corresponding to current feature set by application in
> > + * rte_graph_feature_enable()
> > + *
> > + * @param arc
> > + * Feature arc object
> > + * @param list
> > + * Pointer to runtime active feature list from
> > rte_graph_feature_arc_has_any_feature();
> > + * @param feature
> > + * Feature index
> > + * @param index
> > + * Interface index
> > + *
> > + * @return
> > + * UINT32_MAX: Failure
> > + * Valid user data: Success
> > + */
> > +static __rte_always_inline uint32_t
> > +rte_graph_feature_user_data_get(struct rte_graph_feature_arc *arc,
> > + const rte_graph_feature_rt_list_t list,
> > + rte_graph_feature_t feature,
> > + uint32_t index)
> > +{
> > + rte_graph_feature_data_t *fdata = NULL;
> > + struct rte_graph_feature *f = NULL;
> > +
> > + if (likely(rte_graph_feature_is_valid(feature))) {
> > + f = __rte_graph_feature_get(arc, feature, list);
> > + fdata = rte_graph_feature_data_get(arc, f, index);
> > + return fdata->user_data;
> > + }
> > +
> > + return UINT32_MAX;
> > +}
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +#endif
> > diff --git a/lib/graph/version.map b/lib/graph/version.map
> > index 2c83425ddc..82b2469fba 100644
> > --- a/lib/graph/version.map
> > +++ b/lib/graph/version.map
> > @@ -52,3 +52,20 @@ DPDK_25 {
> >
> > local: *;
> > };
> > +
> > +EXPERIMENTAL {
> > + global:
> > +
> > + # added in 24.11
> > + rte_graph_feature_arc_init;
> > + rte_graph_feature_arc_create;
> > + rte_graph_feature_arc_lookup_by_name;
> > + rte_graph_feature_add;
> > + rte_graph_feature_enable;
> > + rte_graph_feature_validate;
> > + rte_graph_feature_disable;
> > + rte_graph_feature_lookup;
> > + rte_graph_feature_arc_destroy;
> > + rte_graph_feature_arc_cleanup;
> > + rte_graph_feature_arc_num_enabled_features;
> > +};
> > --
> > 2.43.0
new file mode 100644
@@ -0,0 +1,959 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#include "graph_private.h"
+#include <rte_graph_feature_arc_worker.h>
+#include <rte_malloc.h>
+
+#define __RTE_GRAPH_FEATURE_ARC_MAX 32
+
+#define ARC_PASSIVE_LIST(arc) (arc->active_feature_list ^ 0x1)
+
+#define rte_graph_uint_cast(x) ((unsigned int)x)
+#define feat_dbg graph_err
+
+rte_graph_feature_arc_main_t *__feature_arc_main;
+
+/* Make sure fast path cache line is compact */
+_Static_assert((offsetof(struct rte_graph_feature_arc, slow_path_variables)
+ - offsetof(struct rte_graph_feature_arc, fast_path_variables))
+ <= RTE_CACHE_LINE_SIZE);
+
+
+static int
+feature_lookup(struct rte_graph_feature_arc *arc, const char *feat_name,
+ struct rte_graph_feature_node_list **ffinfo, uint32_t *slot)
+{
+ struct rte_graph_feature_node_list *finfo = NULL;
+ const char *name;
+
+ if (!feat_name)
+ return -1;
+
+ if (slot)
+ *slot = 0;
+
+ STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
+ RTE_VERIFY(finfo->feature_arc == arc);
+ name = rte_node_id_to_name(finfo->feature_node->id);
+ if (!strncmp(name, feat_name, RTE_GRAPH_NAMESIZE)) {
+ if (ffinfo)
+ *ffinfo = finfo;
+ return 0;
+ }
+ if (slot)
+ (*slot)++;
+ }
+ return -1;
+}
+
+static int
+feature_arc_node_info_lookup(struct rte_graph_feature_arc *arc, uint32_t feature_index,
+ struct rte_graph_feature_node_list **ppfinfo)
+{
+ struct rte_graph_feature_node_list *finfo = NULL;
+ uint32_t index = 0;
+
+ if (!ppfinfo)
+ return -1;
+
+ *ppfinfo = NULL;
+ STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
+ if (index == feature_index) {
+ if (finfo->node_index == feature_index)
+ return -1;
+ *ppfinfo = finfo;
+ }
+ index++;
+ }
+ if (feature_index && (index >= feature_index))
+ return -1;
+
+ return 0;
+}
+
+static void
+prepare_feature_arc(struct rte_graph_feature_arc *arc)
+{
+ struct rte_graph_feature_node_list *finfo = NULL;
+ uint32_t index = 0;
+
+ STAILQ_FOREACH(finfo, &arc->all_features, next_feature) {
+ finfo->node_index = index;
+ index++;
+ }
+}
+
+static int
+feature_arc_lookup(rte_graph_feature_arc_t _arc)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+ uint32_t iter;
+
+ if (!__feature_arc_main)
+ return -1;
+
+ for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+ if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+ continue;
+
+ if (arc == (rte_graph_feature_arc_get(dm->feature_arcs[iter])))
+ return 0;
+ }
+ return -1;
+}
+
+static int
+get_existing_edge(const char *arc_name, struct rte_node_register *parent_node,
+ struct rte_node_register *child_node, rte_edge_t *_edge)
+{
+ char **next_edges = NULL;
+ uint32_t count, i;
+
+ RTE_SET_USED(arc_name);
+
+ count = rte_node_edge_get(parent_node->id, NULL);
+ next_edges = malloc(count);
+
+ if (!next_edges)
+ return -1;
+
+ count = rte_node_edge_get(parent_node->id, next_edges);
+ for (i = 0; i < count; i++) {
+ if (strstr(child_node->name, next_edges[i])) {
+ feat_dbg("%s: Edge exists [%s[%u]: \"%s\"]", arc_name,
+ parent_node->name, i, child_node->name);
+ if (_edge)
+ *_edge = (rte_edge_t)i;
+
+ free(next_edges);
+ return 0;
+ }
+ }
+ free(next_edges);
+
+ return -1;
+}
+
+static int
+connect_graph_nodes(struct rte_node_register *parent_node, struct rte_node_register *child_node,
+ rte_edge_t *_edge, char *arc_name)
+{
+ const char *next_node = NULL;
+ rte_edge_t edge;
+
+ if (!get_existing_edge(arc_name, parent_node, child_node, &edge)) {
+ feat_dbg("%s: add_feature: Edge reused [%s[%u]: \"%s\"]", arc_name,
+ parent_node->name, edge, child_node->name);
+
+ if (_edge)
+ *_edge = edge;
+
+ return 0;
+ }
+
+ /* Node to be added */
+ next_node = child_node->name;
+
+ edge = rte_node_edge_update(parent_node->id, RTE_EDGE_ID_INVALID, &next_node, 1);
+
+ if (edge == RTE_EDGE_ID_INVALID) {
+ graph_err("edge invalid");
+ return -1;
+ }
+ edge = rte_node_edge_count(parent_node->id) - 1;
+
+ feat_dbg("%s: add_feature: edge added [%s[%u]: \"%s\"]", arc_name, parent_node->name, edge,
+ child_node->name);
+
+ if (_edge)
+ *_edge = edge;
+
+ return 0;
+}
+
+static int
+feature_arc_init(rte_graph_feature_arc_main_t **pfl, uint32_t max_feature_arcs)
+{
+ rte_graph_feature_arc_main_t *pm = NULL;
+ uint32_t i;
+ size_t sz;
+
+ if (!pfl)
+ return -1;
+
+ sz = sizeof(rte_graph_feature_arc_main_t) +
+ (sizeof(pm->feature_arcs[0]) * max_feature_arcs);
+
+ pm = malloc(sz);
+ if (!pm)
+ return -1;
+
+ memset(pm, 0, sz);
+
+ for (i = 0; i < max_feature_arcs; i++)
+ pm->feature_arcs[i] = RTE_GRAPH_FEATURE_ARC_INITIALIZER;
+
+ pm->max_feature_arcs = max_feature_arcs;
+
+ *pfl = pm;
+
+ return 0;
+}
+
+int
+rte_graph_feature_arc_init(int max_feature_arcs)
+{
+ if (!max_feature_arcs)
+ return -1;
+
+ if (__feature_arc_main)
+ return -1;
+
+ return feature_arc_init(&__feature_arc_main, max_feature_arcs);
+}
+
+static void
+feature_arc_list_reset(struct rte_graph_feature_arc *arc, uint32_t list_index)
+{
+ rte_graph_feature_data_t *fdata = NULL;
+ rte_graph_feature_list_t *list = NULL;
+ struct rte_graph_feature *feat = NULL;
+ uint32_t i, j;
+
+ list = arc->feature_list[list_index];
+ feat = arc->features[list_index];
+
+ /*Initialize variables*/
+ memset(feat, 0, arc->feature_size);
+ memset(list, 0, arc->feature_list_size);
+
+ /* Initialize feature and feature_data */
+ for (i = 0; i < arc->max_features; i++) {
+ feat = __rte_graph_feature_get(arc, i, list_index);
+ feat->this_feature_index = i;
+
+ for (j = 0; j < arc->max_indexes; j++) {
+ fdata = rte_graph_feature_data_get(arc, feat, j);
+ fdata->next_enabled_feature = RTE_GRAPH_FEATURE_INVALID;
+ fdata->next_edge = UINT16_MAX;
+ fdata->user_data = UINT32_MAX;
+ }
+ }
+
+ for (i = 0; i < arc->max_indexes; i++)
+ list->first_enabled_feature_by_index[i] = RTE_GRAPH_FEATURE_INVALID;
+}
+
+static int
+feature_arc_list_init(struct rte_graph_feature_arc *arc, const char *flist_name,
+ rte_graph_feature_list_t **pplist,
+ struct rte_graph_feature **ppfeature, uint32_t list_index)
+{
+ char fname[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
+ size_t list_size, feat_size, fdata_size;
+ rte_graph_feature_list_t *list = NULL;
+ struct rte_graph_feature *feat = NULL;
+
+ list_size = sizeof(list->first_enabled_feature_by_index[0]) * arc->max_indexes;
+
+ list = rte_malloc(flist_name, list_size, RTE_CACHE_LINE_SIZE);
+ if (!list)
+ return -ENOMEM;
+
+ fdata_size = arc->max_indexes * sizeof(rte_graph_feature_data_t);
+
+ /* Let one feature capture complete cache lines */
+ feat_size = RTE_ALIGN_CEIL(sizeof(struct rte_graph_feature) + fdata_size,
+ RTE_CACHE_LINE_SIZE);
+
+ snprintf(fname, sizeof(fname), "%s-%s", arc->feature_arc_name, "feat");
+
+ feat = rte_malloc(fname, feat_size * arc->max_features, RTE_CACHE_LINE_SIZE);
+ if (!feat) {
+ rte_free(list);
+ return -ENOMEM;
+ }
+ arc->feature_size = feat_size;
+ arc->feature_data_size = fdata_size;
+ arc->feature_list_size = list_size;
+
+ /* Initialize list */
+ list->indexed_by_features = feat;
+ *pplist = list;
+ *ppfeature = feat;
+
+ feature_arc_list_reset(arc, list_index);
+
+ return 0;
+}
+
+static void
+feature_arc_list_destroy(rte_graph_feature_list_t *list)
+{
+ rte_free(list->indexed_by_features);
+ rte_free(list);
+}
+
+int
+rte_graph_feature_arc_create(const char *feature_arc_name, int max_features, int max_indexes,
+ struct rte_node_register *start_node, rte_graph_feature_arc_t *_arc)
+{
+ char name[2 * RTE_GRAPH_FEATURE_ARC_NAMELEN];
+ rte_graph_feature_arc_main_t *dfm = NULL;
+ struct rte_graph_feature_arc *arc = NULL;
+ struct rte_graph_feature_data *gfd = NULL;
+ struct rte_graph_feature *df = NULL;
+ uint32_t iter, j, arc_index;
+ size_t sz;
+
+ if (!_arc)
+ return -1;
+
+ if (max_features < 2)
+ return -1;
+
+ if (!start_node)
+ return -1;
+
+ if (!feature_arc_name)
+ return -1;
+
+ if (max_features > RTE_GRAPH_FEATURE_MAX_PER_ARC) {
+ graph_err("Invalid max features: %u", max_features);
+ return -1;
+ }
+
+ /*
+ * Application hasn't called rte_graph_feature_arc_init(). Initialize with
+ * default values
+ */
+ if (!__feature_arc_main) {
+ if (rte_graph_feature_arc_init((int)__RTE_GRAPH_FEATURE_ARC_MAX) < 0) {
+ graph_err("rte_graph_feature_arc_init() failed");
+ return -1;
+ }
+ }
+
+ dfm = __feature_arc_main;
+
+ /* threshold check */
+ if (dfm->num_feature_arcs > (dfm->max_feature_arcs - 1)) {
+ graph_err("max threshold for num_feature_arcs: %d reached",
+ dfm->max_feature_arcs - 1);
+ return -1;
+ }
+ /* Find the free slot for feature arc */
+ for (iter = 0; iter < dfm->max_feature_arcs; iter++) {
+ if (dfm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+ break;
+ }
+ arc_index = iter;
+
+ if (arc_index >= dfm->max_feature_arcs) {
+ graph_err("No free slot found for num_feature_arc");
+ return -1;
+ }
+
+ /* This should not happen */
+ RTE_VERIFY(dfm->feature_arcs[arc_index] == RTE_GRAPH_FEATURE_ARC_INITIALIZER);
+
+ /* size of feature arc + feature_bit_mask_by_index */
+ sz = sizeof(*arc) + (sizeof(uint64_t) * max_indexes);
+
+ arc = rte_malloc(feature_arc_name, sz, RTE_CACHE_LINE_SIZE);
+
+ if (!arc) {
+ graph_err("malloc failed for feature_arc_create()");
+ return -1;
+ }
+
+ memset(arc, 0, sz);
+
+ /* Initialize rte_graph port group fixed variables */
+ STAILQ_INIT(&arc->all_features);
+ strncpy(arc->feature_arc_name, feature_arc_name, RTE_GRAPH_FEATURE_ARC_NAMELEN - 1);
+ arc->feature_arc_main = (void *)dfm;
+ arc->start_node = start_node;
+ arc->max_features = max_features;
+ arc->max_indexes = max_indexes;
+
+ snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist0");
+
+ if (feature_arc_list_init(arc, name, &arc->feature_list[0], &arc->features[0], 0) < 0) {
+ rte_free(arc);
+ graph_err("feature_arc_list_init(0) failed");
+ return -1;
+ }
+ snprintf(name, sizeof(name), "%s-%s", feature_arc_name, "flist1");
+
+ if (feature_arc_list_init(arc, name, &arc->feature_list[1], &arc->features[1], 1) < 0) {
+ feature_arc_list_destroy(arc->feature_list[0]);
+ graph_err("feature_arc_list_init(1) failed");
+ return -1;
+ }
+
+ for (iter = 0; iter < arc->max_features; iter++) {
+ df = rte_graph_feature_get(arc, iter);
+ for (j = 0; j < arc->max_indexes; j++) {
+ gfd = rte_graph_feature_data_get(arc, df, j);
+ gfd->next_enabled_feature = RTE_GRAPH_FEATURE_INVALID;
+ }
+ }
+ arc->feature_arc_index = arc_index;
+ dfm->feature_arcs[arc->feature_arc_index] = (rte_graph_feature_arc_t)arc;
+ dfm->num_feature_arcs++;
+
+ if (_arc)
+ *_arc = (rte_graph_feature_arc_t)arc;
+
+ return 0;
+}
+
+int
+rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct rte_node_register *feature_node,
+ const char *after_feature, const char *before_feature)
+{
+ struct rte_graph_feature_node_list *after_finfo = NULL, *before_finfo = NULL;
+ struct rte_graph_feature_node_list *temp = NULL, *finfo = NULL;
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ uint32_t slot, add_flag;
+ rte_edge_t edge = -1;
+
+ RTE_VERIFY(arc->feature_arc_main == __feature_arc_main);
+
+ if (feature_arc_lookup(_arc)) {
+ graph_err("invalid feature arc: 0x%016" PRIx64, (uint64_t)_arc);
+ return -1;
+ }
+
+ if (arc->runtime_enabled_features) {
+ graph_err("adding features after enabling any one of them is not supported");
+ return -1;
+ }
+
+ if ((after_feature != NULL) && (before_feature != NULL) &&
+ (after_feature == before_feature)) {
+ graph_err("after_feature and before_feature are same '%s:%s]", after_feature,
+ before_feature);
+ return -1;
+ }
+
+ if (!feature_node) {
+ graph_err("feature_node: %p invalid", feature_node);
+ return -1;
+ }
+
+ arc = rte_graph_feature_arc_get(_arc);
+
+ if (feature_node->id == RTE_NODE_ID_INVALID) {
+ graph_err("Invalid node: %s", feature_node->name);
+ return -1;
+ }
+
+ if (!feature_lookup(arc, feature_node->name, &finfo, &slot)) {
+ graph_err("%s feature already added", feature_node->name);
+ return -1;
+ }
+
+ if (slot >= RTE_GRAPH_FEATURE_MAX_PER_ARC) {
+ graph_err("Max slot %u reached for feature addition", slot);
+ return -1;
+ }
+
+ if (strstr(feature_node->name, arc->start_node->name)) {
+ graph_err("Feature %s cannot point to itself: %s", feature_node->name,
+ arc->start_node->name);
+ return -1;
+ }
+
+ if (connect_graph_nodes(arc->start_node, feature_node, &edge, arc->feature_arc_name)) {
+ graph_err("unable to connect %s -> %s", arc->start_node->name, feature_node->name);
+ return -1;
+ }
+
+ finfo = malloc(sizeof(*finfo));
+ if (!finfo)
+ return -1;
+
+ memset(finfo, 0, sizeof(*finfo));
+
+ finfo->feature_arc = (void *)arc;
+ finfo->feature_node = feature_node;
+ finfo->edge_to_this_feature = edge;
+
+ /* Check for before and after constraints */
+ if (before_feature) {
+ /* before_feature sanity */
+ if (feature_lookup(arc, before_feature, &before_finfo, NULL))
+ SET_ERR_JMP(EINVAL, finfo_free,
+ "Invalid before feature name: %s", before_feature);
+
+ if (!before_finfo)
+ SET_ERR_JMP(EINVAL, finfo_free,
+ "before_feature %s does not exist", before_feature);
+
+ /*
+ * Starting from 0 to before_feature, continue connecting edges
+ */
+ add_flag = 1;
+ STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
+ /*
+ * As soon as we see before_feature. stop adding edges
+ */
+ if (!strncmp(temp->feature_node->name, before_feature,
+ RTE_GRAPH_NAMESIZE))
+ if (!connect_graph_nodes(finfo->feature_node, temp->feature_node,
+ &edge, arc->feature_arc_name))
+ add_flag = 0;
+
+ if (add_flag)
+ connect_graph_nodes(temp->feature_node, finfo->feature_node, NULL,
+ arc->feature_arc_name);
+ }
+ }
+
+ if (after_feature) {
+ if (feature_lookup(arc, after_feature, &after_finfo, NULL))
+ SET_ERR_JMP(EINVAL, finfo_free,
+ "Invalid after feature_name %s", after_feature);
+
+ if (!after_finfo)
+ SET_ERR_JMP(EINVAL, finfo_free,
+ "after_feature %s does not exist", after_feature);
+
+ /* Starting from after_feature to end continue connecting edges */
+ add_flag = 0;
+ STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
+ /* We have already seen after_feature now */
+ if (add_flag)
+ /* Add all features as next node to current feature*/
+ connect_graph_nodes(finfo->feature_node, temp->feature_node, NULL,
+ arc->feature_arc_name);
+
+ /* as soon as we see after_feature. start adding edges
+ * from next iteration
+ */
+ if (!strncmp(temp->feature_node->name, after_feature, RTE_GRAPH_NAMESIZE))
+ /* connect after_feature to this feature */
+ if (!connect_graph_nodes(temp->feature_node, finfo->feature_node,
+ &edge, arc->feature_arc_name))
+ add_flag = 1;
+ }
+
+ /* add feature next to after_feature */
+ STAILQ_INSERT_AFTER(&arc->all_features, after_finfo, finfo, next_feature);
+ } else {
+ if (before_finfo) {
+ after_finfo = NULL;
+ STAILQ_FOREACH(temp, &arc->all_features, next_feature) {
+ if (before_finfo == temp) {
+ if (after_finfo)
+ STAILQ_INSERT_AFTER(&arc->all_features, after_finfo,
+ finfo, next_feature);
+ else
+ STAILQ_INSERT_HEAD(&arc->all_features, finfo,
+ next_feature);
+
+ return 0;
+ }
+ after_finfo = temp;
+ }
+ } else {
+ STAILQ_INSERT_TAIL(&arc->all_features, finfo, next_feature);
+ }
+ }
+
+ return 0;
+
+finfo_free:
+ free(finfo);
+
+ return -1;
+}
+
+int
+rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char *feature_name,
+ rte_graph_feature_t *feat)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ struct rte_graph_feature_node_list *finfo = NULL;
+ uint32_t slot;
+
+ if (!feature_lookup(arc, feature_name, &finfo, &slot)) {
+ *feat = (rte_graph_feature_t) slot;
+ return 0;
+ }
+
+ return -1;
+}
+
+int
+rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t index, const char *feature_name,
+ int is_enable_disable)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ struct rte_graph_feature_node_list *finfo = NULL;
+ struct rte_graph_feature *gf = NULL;
+ uint32_t slot;
+
+ /* validate _arc */
+ if (arc->feature_arc_main != __feature_arc_main) {
+ graph_err("invalid feature arc: 0x%016" PRIx64, (uint64_t)_arc);
+ return -EINVAL;
+ }
+
+ /* validate index */
+ if (index >= arc->max_indexes) {
+ graph_err("%s: Invalid provided index: %u >= %u configured", arc->feature_arc_name,
+ index, arc->max_indexes);
+ return -1;
+ }
+
+ /* validate feature_name is already added or not */
+ if (feature_lookup(arc, feature_name, &finfo, &slot)) {
+ graph_err("%s: No feature %s added", arc->feature_arc_name, feature_name);
+ return -EINVAL;
+ }
+
+ if (!finfo) {
+ graph_err("%s: No feature: %s found", arc->feature_arc_name, feature_name);
+ return -EINVAL;
+ }
+
+ /* slot should be in valid range */
+ if (slot >= arc->max_features) {
+ graph_err("%s/%s: Invalid free slot %u(max=%u) for feature", arc->feature_arc_name,
+ feature_name, slot, arc->max_features);
+ return -EINVAL;
+ }
+
+ /* slot should be in range of 0 - 63 */
+ if (slot > (RTE_GRAPH_FEATURE_MAX_PER_ARC - 1)) {
+ graph_err("%s/%s: Invalid slot: %u", arc->feature_arc_name,
+ feature_name, slot);
+ return -EINVAL;
+ }
+
+ if (finfo->node_index != slot) {
+ graph_err("%s/%s: feature lookup slot mismatch with finfo index: %u and lookup slot: %u",
+ arc->feature_arc_name, feature_name, finfo->node_index, slot);
+ return -1;
+ }
+
+ /* Get feature from active list */
+ gf = __rte_graph_feature_get(arc, slot, ARC_PASSIVE_LIST(arc));
+ if (gf->this_feature_index != slot) {
+ graph_err("%s: %s received feature_index: %u does not match with saved feature_index: %u",
+ arc->feature_arc_name, feature_name, slot, gf->this_feature_index);
+ return -1;
+ }
+
+ if (is_enable_disable && (arc->feature_bit_mask_by_index[index] &
+ RTE_BIT64(slot))) {
+ graph_err("%s: %s already enabled on index: %u",
+ arc->feature_arc_name, feature_name, index);
+ return -1;
+ }
+
+ if (!is_enable_disable && !arc->runtime_enabled_features) {
+ graph_err("%s: No feature enabled to disable", arc->feature_arc_name);
+ return -1;
+ }
+
+ if (!is_enable_disable && !(arc->feature_bit_mask_by_index[index] & RTE_BIT64(slot))) {
+ graph_err("%s: %s not enabled in bitmask for index: %u",
+ arc->feature_arc_name, feature_name, index);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+copy_fastpath_user_data(struct rte_graph_feature_arc *arc, uint16_t dest_list_index,
+ uint16_t src_list_index)
+{
+ rte_graph_feature_data_t *sgfd = NULL, *dgfd = NULL;
+ struct rte_graph_feature *sgf = NULL, *dgf = NULL;
+ uint32_t i, j;
+
+ for (i = 0; i < arc->max_features; i++) {
+ sgf = __rte_graph_feature_get(arc, i, src_list_index);
+ dgf = __rte_graph_feature_get(arc, i, dest_list_index);
+ for (j = 0; j < arc->max_indexes; j++) {
+ sgfd = rte_graph_feature_data_get(arc, sgf, j);
+ dgfd = rte_graph_feature_data_get(arc, dgf, j);
+ dgfd->user_data = sgfd->user_data;
+ }
+ }
+}
+
+static void
+refill_feature_fastpath_data(struct rte_graph_feature_arc *arc, uint16_t list_index)
+{
+ struct rte_graph_feature_node_list *finfo = NULL, *prev_finfo = NULL;
+ struct rte_graph_feature_data *gfd = NULL, *prev_gfd = NULL;
+ struct rte_graph_feature *gf = NULL, *prev_gf = NULL;
+ rte_graph_feature_list_t *flist = NULL;
+ uint32_t fi, di, prev_fi;
+ uint64_t bitmask;
+ rte_edge_t edge;
+
+ flist = arc->feature_list[list_index];
+
+ for (di = 0; di < arc->max_indexes; di++) {
+ bitmask = arc->feature_bit_mask_by_index[di];
+ prev_fi = RTE_GRAPH_FEATURE_INVALID;
+ /* for each feature set for index, set fast path data */
+ while (rte_bsf64_safe(bitmask, &fi)) {
+ gf = __rte_graph_feature_get(arc, fi, list_index);
+ gfd = rte_graph_feature_data_get(arc, gf, di);
+ feature_arc_node_info_lookup(arc, fi, &finfo);
+
+ /* If previous feature_index was valid in last loop */
+ if (prev_fi != RTE_GRAPH_FEATURE_INVALID) {
+ prev_gf = __rte_graph_feature_get(arc, prev_fi, list_index);
+ prev_gfd = rte_graph_feature_data_get(arc, prev_gf, di);
+ /*
+ * Get edge of previous feature node connecting to this feature node
+ */
+ feature_arc_node_info_lookup(arc, prev_fi, &prev_finfo);
+ if (!get_existing_edge(arc->feature_arc_name,
+ prev_finfo->feature_node,
+ finfo->feature_node, &edge)) {
+ feat_dbg("[%s/%s(%2u)/idx:%2u]: %s[%u] = %s",
+ arc->feature_arc_name,
+ prev_finfo->feature_node->name, prev_fi, di,
+ prev_finfo->feature_node->name,
+ edge, finfo->feature_node->name);
+ /* Copy feature index for next iteration*/
+ gfd->next_edge = edge;
+ prev_fi = fi;
+ /*
+ * Fill current feature as next enabled
+ * feature to previous one
+ */
+ prev_gfd->next_enabled_feature = fi;
+ } else {
+ /* Should not fail */
+ RTE_VERIFY(0);
+ }
+ }
+ /* On first feature edge of the node to be added */
+ if (fi == rte_bsf64(arc->feature_bit_mask_by_index[di])) {
+ if (!get_existing_edge(arc->feature_arc_name, arc->start_node,
+ finfo->feature_node,
+ &edge)) {
+ feat_dbg("[%s/%s/%2u/idx:%2u]: 1st feat %s[%u] = %s",
+ arc->feature_arc_name,
+ arc->start_node->name, fi, di,
+ arc->start_node->name, edge,
+ finfo->feature_node->name);
+ /* Copy feature index for next iteration*/
+ gfd->next_edge = edge;
+ prev_fi = fi;
+ /* Set first feature set array for index*/
+ flist->first_enabled_feature_by_index[di] = fi;
+ } else {
+ /* Should not fail */
+ RTE_VERIFY(0);
+ }
+ }
+ /* Clear current feature index */
+ bitmask &= ~RTE_BIT64(fi);
+ }
+ }
+}
+
+int
+rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t index, const
+ char *feature_name, int32_t user_data)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ struct rte_graph_feature_node_list *finfo = NULL;
+ struct rte_graph_feature_data *gfd = NULL;
+ rte_graph_feature_rt_list_t passive_list;
+ struct rte_graph_feature *gf = NULL;
+ uint64_t fp_bitmask;
+ uint32_t slot;
+
+ if (rte_graph_feature_validate(_arc, index, feature_name, 1))
+ return -1;
+
+ /** This should not fail as validate() has passed */
+ if (feature_lookup(arc, feature_name, &finfo, &slot))
+ RTE_VERIFY(0);
+
+ if (!arc->runtime_enabled_features)
+ prepare_feature_arc(arc);
+
+ passive_list = ARC_PASSIVE_LIST(arc);
+
+ gf = __rte_graph_feature_get(arc, slot, passive_list);
+ gfd = rte_graph_feature_data_get(arc, gf, index);
+
+ feat_dbg("%s/%s: Enabling feature on list: %u for index: %u at feature slot %u",
+ arc->feature_arc_name, feature_name, passive_list, index, slot);
+
+ /* Reset feature list */
+ feature_arc_list_reset(arc, passive_list);
+
+ /* Copy user-data */
+ copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
+
+ /* Set current user-data */
+ gfd->user_data = user_data;
+
+ /* Set bitmask in control path bitmask */
+ rte_bit_relaxed_set64(rte_graph_uint_cast(slot), &arc->feature_bit_mask_by_index[index]);
+ refill_feature_fastpath_data(arc, passive_list);
+
+ /* Set fast path enable bitmask */
+ fp_bitmask = __atomic_load_n(&arc->feature_enable_bitmask[passive_list], __ATOMIC_RELAXED);
+ fp_bitmask |= RTE_BIT64(slot);
+ __atomic_store(&arc->feature_enable_bitmask[passive_list], &fp_bitmask, __ATOMIC_RELAXED);
+
+ /* Slow path updates */
+ arc->runtime_enabled_features++;
+
+ /* Increase feature node info reference count */
+ finfo->ref_count++;
+
+ /* Store release semantics for active_list update */
+ __atomic_store(&arc->active_feature_list, &passive_list, __ATOMIC_RELEASE);
+
+ return 0;
+}
+
+int
+rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t index, const char *feature_name)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ struct rte_graph_feature_data *gfd = NULL;
+ struct rte_graph_feature_node_list *finfo = NULL;
+ rte_graph_feature_rt_list_t passive_list;
+ struct rte_graph_feature *gf = NULL;
+ uint32_t slot;
+
+ if (rte_graph_feature_validate(_arc, index, feature_name, 0))
+ return -1;
+
+ if (feature_lookup(arc, feature_name, &finfo, &slot))
+ return -1;
+
+ passive_list = ARC_PASSIVE_LIST(arc);
+
+ gf = __rte_graph_feature_get(arc, slot, passive_list);
+ gfd = rte_graph_feature_data_get(arc, gf, index);
+
+ feat_dbg("%s/%s: Disabling feature for index: %u at feature slot %u", arc->feature_arc_name,
+ feature_name, index, slot);
+
+ rte_bit_relaxed_clear64(rte_graph_uint_cast(slot), &arc->feature_bit_mask_by_index[index]);
+
+ /* Set fast path enable bitmask */
+ arc->feature_enable_bitmask[passive_list] &= ~(RTE_BIT64(slot));
+
+ /* Reset feature list */
+ feature_arc_list_reset(arc, passive_list);
+
+ /* Copy user-data */
+ copy_fastpath_user_data(arc, passive_list, arc->active_feature_list);
+
+ /* Reset current user-data */
+ gfd->user_data = ~0;
+
+ refill_feature_fastpath_data(arc, passive_list);
+
+ finfo->ref_count--;
+ arc->runtime_enabled_features--;
+
+ /* Store release semantics for active_list update */
+ __atomic_store(&arc->active_feature_list, &passive_list, __ATOMIC_RELEASE);
+
+ return 0;
+}
+
+int
+rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+ rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+ struct rte_graph_feature_node_list *node_info = NULL;
+
+ while (!STAILQ_EMPTY(&arc->all_features)) {
+ node_info = STAILQ_FIRST(&arc->all_features);
+ STAILQ_REMOVE_HEAD(&arc->all_features, next_feature);
+ free(node_info);
+ }
+ feature_arc_list_destroy(arc->feature_list[0]);
+ feature_arc_list_destroy(arc->feature_list[1]);
+ rte_free(arc->features[0]);
+ rte_free(arc->features[1]);
+
+ dm->feature_arcs[arc->feature_arc_index] = RTE_GRAPH_FEATURE_ARC_INITIALIZER;
+
+ rte_free(arc);
+ return 0;
+}
+
+int
+rte_graph_feature_arc_cleanup(void)
+{
+ rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+ uint32_t iter;
+
+ if (!__feature_arc_main)
+ return -1;
+
+ for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+ if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+ continue;
+
+ rte_graph_feature_arc_destroy((rte_graph_feature_arc_t)dm->feature_arcs[iter]);
+ }
+ free(dm);
+
+ __feature_arc_main = NULL;
+
+ return 0;
+}
+
+int
+rte_graph_feature_arc_lookup_by_name(const char *arc_name, rte_graph_feature_arc_t *_arc)
+{
+ rte_graph_feature_arc_main_t *dm = __feature_arc_main;
+ struct rte_graph_feature_arc *arc = NULL;
+ uint32_t iter;
+
+ if (!__feature_arc_main)
+ return -1;
+
+ for (iter = 0; iter < dm->max_feature_arcs; iter++) {
+ if (dm->feature_arcs[iter] == RTE_GRAPH_FEATURE_ARC_INITIALIZER)
+ continue;
+
+ arc = rte_graph_feature_arc_get(dm->feature_arcs[iter]);
+
+ if (strstr(arc_name, arc->feature_arc_name)) {
+ if (_arc)
+ *_arc = (rte_graph_feature_arc_t)arc;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int
+rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t _arc)
+{
+ struct rte_graph_feature_arc *arc = rte_graph_feature_arc_get(_arc);
+
+ return arc->runtime_enabled_features;
+}
+
+
@@ -14,11 +14,13 @@ sources = files(
'graph_debug.c',
'graph_stats.c',
'graph_populate.c',
+ 'graph_feature_arc.c',
'graph_pcap.c',
'rte_graph_worker.c',
'rte_graph_model_mcore_dispatch.c',
)
headers = files('rte_graph.h', 'rte_graph_worker.h')
+headers += files('rte_graph_feature_arc.h', 'rte_graph_feature_arc_worker.h')
indirect_headers += files(
'rte_graph_model_mcore_dispatch.h',
'rte_graph_model_rtc.h',
new file mode 100644
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_FEATURE_ARC_H_
+#define _RTE_GRAPH_FEATURE_ARC_H_
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_debug.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ *
+ * rte_graph_feature_arc.h
+ *
+ * Define APIs and structures/variables with respect to feature arc
+ *
+ * - Feature arc(s)
+ * - Feature(s)
+ *
+ * A feature arc represents an ordered list of features/protocol-nodes at a
+ * given networking layer. Feature arc provides a high level abstraction to
+ * connect various *rte_graph* nodes, designated as *feature nodes*, and
+ * allowing steering of packets across these feature nodes fast path processing
+ * in a generic manner. In a typical network stack, often a protocol or feature
+ * must be first enabled on a given interface, before any packet is steered
+ * towards it for feature processing. For eg: incoming IPv4 packets are sent to
+ * routing sub-system only after a valid IPv4 address is assigned to the
+ * received interface. In other words, often packets needs to be steered across
+ * features not based on the packet content but based on whether a feature is
+ * enable or disable on a given incoming/outgoing interface. Feature arc
+ * provides mechanism to enable/disable feature(s) on each interface at runtime
+ * and allow seamless packet steering across runtime enabled feature nodes in
+ * fast path.
+ *
+ * Feature arc also provides a way to steer packets from standard nodes to
+ * custom/user-defined *feature nodes* without any change in standard node's
+ * fast path functions
+ *
+ * On a given interface multiple feature(s) might be enabled in a particular
+ * feature arc. For instance, both "ipv4-output" and "IPsec policy output"
+ * features may be enabled on "eth0" interface in "L3-output" feature arc.
+ * Similarly, "ipv6-output" and "ipsec-output" may be enabled on "eth1"
+ * interface in same "L3-output" feature arc.
+ *
+ * When multiple features are present in a given feature arc, its imperative
+ * to allow each feature processing in a particular sequential order. For
+ * instance, in "L3-input" feature arc it may be required to run "IPsec
+ * input" feature first, for packet decryption, before "ip-lookup". So a
+ * sequential order must be maintained among features present in a feature arc.
+ *
+ * Features are enabled/disabled multiple times at runtime to some or all
+ * available interfaces present in the system. Features can be enabled/disabled
+ * even after @b rte_graph_create() is called. Enable/disabling features on one
+ * interface is independent of other interface.
+ *
+ * A given feature might consume packet (if it's configured to consume) or may
+ * forward it to next enabled feature. For instance, "IPsec input" feature may
+ * consume/drop all packets with "Protect" policy action while all packets with
+ * policy action as "Bypass" may be forwarded to next enabled feature (with in
+ * same feature arc)
+ *
+ * This library facilitates rte graph based applications to steer packets in
+ * fast path to different feature nodes with-in a feature arc and support all
+ * functionalities described above
+ *
+ * In order to use feature-arc APIs, applications needs to do following in
+ * control path:
+ * - Initialize feature arc library via rte_graph_feature_arc_init()
+ * - Create feature arc via rte_graph_feature_arc_create()
+ * - *Before calling rte_graph_create()*, features must be added to feature-arc
+ * via rte_graph_feature_add(). rte_graph_feature_add() allows adding
+ * features in a sequential order with "runs_after" and "runs_before"
+ * constraints.
+ * - Post rte_graph_create(), features can be enabled/disabled at runtime on
+ * any interface via rte_graph_feature_enable()/rte_graph_feature_disable()
+ * - Feature arc can be destroyed via rte_graph_feature_arc_destroy()
+ *
+ * In fast path, APIs are provided to steer packets towards feature path from
+ * - start_node (provided as an argument to rte_graph_feature_arc_create())
+ * - feature nodes (which are added via rte_graph_feature_add())
+ *
+ * For typical steering of packets across feature nodes, application required
+ * to know "rte_edges" which are saved in feature data object. Feature data
+ * object is unique for every interface per feature with in a feature arc.
+ *
+ * When steering packets from start_node to feature node:
+ * - rte_graph_feature_arc_first_feature_get() provides first enabled feature.
+ * - Next rte_edge from start_node to first enabled feature can be obtained via
+ * rte_graph_feature_arc_feature_set()
+ *
+ * rte_mbuf can carry [current feature, index] from start_node of an arc to other
+ * feature nodes
+ *
+ * In feature node, application can get 32-bit user_data
+ * via_rte_graph_feature_user_data_get() which is provided in
+ * rte_graph_feature_enable(). User data can hold feature specific cookie like
+ * IPsec policy database index (if more than one are supported)
+ *
+ * If feature node is not consuming packet, next enabled feature and next
+ * rte_edge can be obtained via rte_graph_feature_arc_next_feature_get()
+ *
+ * It is application responsibility to ensure that at-least *last feature*(or sink
+ * feature) must be enabled from where packet can exit feature-arc path, if
+ * *NO* intermediate feature is consuming the packet and it has reached till
+ * the end of feature arc path
+ *
+ * Synchronization among cores
+ * ---------------------------
+ * Subsequent calls to rte_graph_feature_enable() is allowed while worker cores
+ * are processing in rte_graph_walk() loop. However, for
+ * rte_graph_feature_disable() application must use RCU based synchronization
+ */
+
+/**< Initializer value for rte_graph_feature_arc_t */
+#define RTE_GRAPH_FEATURE_ARC_INITIALIZER ((rte_graph_feature_arc_t)UINT64_MAX)
+
+/** Max number of features supported in a given feature arc */
+#define RTE_GRAPH_FEATURE_MAX_PER_ARC 64
+
+/** Length of feature arc name */
+#define RTE_GRAPH_FEATURE_ARC_NAMELEN RTE_NODE_NAMESIZE
+
+/** @internal */
+#define rte_graph_feature_cast(x) ((rte_graph_feature_t)x)
+
+/**< Initializer value for rte_graph_feature_arc_t */
+#define RTE_GRAPH_FEATURE_INVALID rte_graph_feature_cast(UINT8_MAX)
+
+/** rte_graph feature arc object */
+typedef uint64_t rte_graph_feature_arc_t;
+
+/** rte_graph feature object */
+typedef uint8_t rte_graph_feature_t;
+
+/** runtime active feature list index with in feature arc*/
+typedef uint8_t rte_graph_feature_rt_list_t;
+
+/** per feature arc monotonically increasing counter to synchronize fast path APIs */
+typedef uint16_t rte_graph_feature_counter_t;
+
+/**
+ * Initialize feature arc subsystem
+ *
+ * @param max_feature_arcs
+ * Maximum number of feature arcs required to be supported
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_init(int max_feature_arcs);
+
+/**
+ * Create a feature arc
+ *
+ * @param feature_arc_name
+ * Feature arc name with max length of @ref RTE_GRAPH_FEATURE_ARC_NAMELEN
+ * @param max_features
+ * Maximum number of features to be supported in this feature arc
+ * @param max_indexes
+ * Maximum number of interfaces/ports/indexes to be supported
+ * @param start_node
+ * Base node where this feature arc's features are checked in fast path
+ * @param[out] _arc
+ * Feature arc object
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_create(const char *feature_arc_name, int max_features, int max_indexes,
+ struct rte_node_register *start_node,
+ rte_graph_feature_arc_t *_arc);
+
+/**
+ * Get feature arc object with name
+ *
+ * @param arc_name
+ * Feature arc name provided to successful @ref rte_graph_feature_arc_create
+ * @param[out] _arc
+ * Feature arc object returned
+ *
+ * @return
+ * 0: Success
+ * <0: Failure.
+ */
+__rte_experimental
+int rte_graph_feature_arc_lookup_by_name(const char *arc_name, rte_graph_feature_arc_t *_arc);
+
+/**
+ * Add a feature to already created feature arc. For instance
+ *
+ * 1. Add first feature node: "ipv4-input" to input arc
+ * rte_graph_feature_add(ipv4_input_arc, "ipv4-input", NULL, NULL);
+ *
+ * 2. Add "ipsec-input" feature node after "ipv4-input" node
+ * rte_graph_feature_add(ipv4_input_arc, "ipsec-input", "ipv4-input", NULL);
+ *
+ * 3. Add "ipv4-pre-classify-input" node before "ipv4-input" node
+ * rte_graph_feature_add(ipv4_input_arc, "ipv4-pre-classify-input"", NULL, "ipv4-input");
+ *
+ * 4. Add "acl-classify-input" node after ipv4-input but before ipsec-input
+ * rte_graph_feature_add(ipv4_input_arc, "acl-classify-input", "ipv4-input", "ipsec-input");
+ *
+ * @param _arc
+ * Feature arc handle returned from @ref rte_graph_feature_arc_create()
+ * @param feature_node
+ * Graph node representing feature. On success, feature_node is next_node of
+ * feature_arc->start_node
+ * @param runs_after
+ * Add this feature_node after already added "runs_after". Creates
+ * start_node -> runs_after -> this_feature sequence
+ * @param runs_before
+ * Add this feature_node before already added "runs_before". Creates
+ * start_node -> this_feature -> runs_before sequence
+ *
+ * <I> Must be called before rte_graph_create() </I>
+ * <I> rte_graph_feature_add() is not allowed after call to
+ * rte_graph_feature_enable() so all features must be added before they can be
+ * enabled </I>
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_add(rte_graph_feature_arc_t _arc, struct rte_node_register *feature_node,
+ const char *runs_after, const char *runs_before);
+
+/**
+ * Enable feature within a feature arc
+ *
+ * Must be called after @b rte_graph_create().
+ *
+ * @param _arc
+ * Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ * rte_graph_feature_arc_lookup_by_name
+ * @param index
+ * Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ * Name of the node which is already added via @ref rte_graph_feature_add
+ * @param user_data
+ * Application specific data which is retrieved in fast path
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_enable(rte_graph_feature_arc_t _arc, uint32_t index, const char *feature_name,
+ int32_t user_data);
+
+/**
+ * Validate whether subsequent enable/disable feature would succeed or not.
+ * API is thread-safe
+ *
+ * @param _arc
+ * Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ * rte_graph_feature_arc_lookup_by_name
+ * @param index
+ * Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ * Name of the node which is already added via @ref rte_graph_feature_add
+ * @param is_enable_disable
+ * If 1, validate whether subsequent @ref rte_graph_feature_enable would pass or not
+ * If 0, validate whether subsequent @ref rte_graph_feature_disable would pass or not
+ *
+ * @return
+ * 0: Subsequent enable/disable API would pass
+ * <0: Subsequent enable/disable API would not pass
+ */
+__rte_experimental
+int rte_graph_feature_validate(rte_graph_feature_arc_t _arc, uint32_t index,
+ const char *feature_name, int is_enable_disable);
+
+/**
+ * Disable already enabled feature within a feature arc
+ *
+ * Must be called after @b rte_graph_create(). API is *NOT* Thread-safe
+ *
+ * @param _arc
+ * Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ * rte_graph_feature_arc_lookup_by_name
+ * @param index
+ * Application specific index. Can be corresponding to interface_id/port_id etc
+ * @param feature_name
+ * Name of the node which is already added via @ref rte_graph_feature_add
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_disable(rte_graph_feature_arc_t _arc, uint32_t index,
+ const char *feature_name);
+
+/**
+ * Get rte_graph_feature_t object from feature name
+ *
+ * @param arc
+ * Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ * rte_graph_feature_arc_lookup_by_name
+ * @param feature_name
+ * Feature name provided to @ref rte_graph_feature_add
+ * @param[out] feature
+ * Feature object
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_lookup(rte_graph_feature_arc_t _arc, const char *feature_name,
+ rte_graph_feature_t *feature);
+
+/**
+ * Delete feature_arc object
+ *
+ * @param _arc
+ * Feature arc object returned by @ref rte_graph_feature_arc_create or @ref
+ * rte_graph_feature_arc_lookup_by_name
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_destroy(rte_graph_feature_arc_t _arc);
+
+/**
+ * Cleanup all feature arcs
+ *
+ * @return
+ * 0: Success
+ * <0: Failure
+ */
+__rte_experimental
+int rte_graph_feature_arc_cleanup(void);
+
+/**
+ * Slow path API to know how many features are currently enabled within a featur-arc
+ *
+ * @param _arc
+ * Feature arc object
+ *
+ * @return: Number of enabled features
+ */
+__rte_experimental
+int rte_graph_feature_arc_num_enabled_features(rte_graph_feature_arc_t _arc);
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell International Ltd.
+ */
+
+#ifndef _RTE_GRAPH_FEATURE_ARC_WORKER_H_
+#define _RTE_GRAPH_FEATURE_ARC_WORKER_H_
+
+#include <stddef.h>
+#include <rte_graph_feature_arc.h>
+#include <rte_bitops.h>
+
+/**
+ * @file
+ *
+ * rte_graph_feature_arc_worker.h
+ *
+ * Defines fast path structure
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @internal
+ *
+ * Slow path feature node info list
+ */
+struct rte_graph_feature_node_list {
+ /** Next feature */
+ STAILQ_ENTRY(rte_graph_feature_node_list) next_feature;
+
+ /** node representing feature */
+ struct rte_node_register *feature_node;
+
+ /** How many indexes/interfaces using this feature */
+ int32_t ref_count;
+
+ /* node_index in list (after feature_enable())*/
+ uint32_t node_index;
+
+ /** Back pointer to feature arc */
+ void *feature_arc;
+
+ /** rte_edge_t to this feature node from feature_arc->start_node */
+ rte_edge_t edge_to_this_feature;
+};
+
+/**
+ * Fast path holding rte_edge_t and next enabled feature for an feature
+ */
+typedef struct __rte_packed rte_graph_feature_data {
+ /* next node to which current mbuf should go*/
+ rte_edge_t next_edge;
+
+ /* next enabled feature on this arc for current index */
+ union {
+ uint16_t reserved;
+ struct {
+ rte_graph_feature_t next_enabled_feature;
+ };
+ };
+
+ /* user_data */
+ int32_t user_data;
+} rte_graph_feature_data_t;
+
+/**
+ * Fast path feature structure. Holds re_graph_feature_data_t per index
+ */
+struct __rte_cache_aligned rte_graph_feature {
+ uint16_t this_feature_index;
+
+ /* Array of size arc->feature_data_size
+ * [data-index-0][data-index-1]...
+ * Each index of size: sizeof(rte_graph_feature_data_t)
+ */
+ uint8_t feature_data_by_index[];
+};
+
+/**
+ * fast path cache aligned feature list holding all features
+ * There are two feature lists: active, passive
+ *
+ * Fast APIs works on active list while control plane updates passive list
+ * A atomic update to arc->active_feature_list is done to switch between active
+ * and passive
+ */
+typedef struct __rte_cache_aligned rte_graph_feature_list {
+ /**
+ * fast path array holding per_feature data.
+ * Duplicate entry as feature-arc also hold this pointer
+ * arc->features[]
+ *
+ *<-------------feature-0 ---------><CEIL><---------feature-1 -------------->...
+ *[index-0][index-1]...[max_index-1] [index-0][index-1] ...[max_index-1]...
+ */
+ struct rte_graph_feature *indexed_by_features;
+ /*
+ * fast path array holding first enabled feature per index
+ * (Required in start_node. In non start_node, mbuf can hold next enabled
+ * feature)
+ */
+ rte_graph_feature_t first_enabled_feature_by_index[];
+} rte_graph_feature_list_t;
+
+/**
+ * rte_graph feature arc object
+ *
+ * A feature-arc can only hold RTE_GRAPH_FEATURE_MAX_PER_ARC features but no
+ * limit to interface index
+ *
+ * Representing a feature arc holding all features which are enabled/disabled
+ * on any interfaces
+ */
+struct __rte_cache_aligned rte_graph_feature_arc {
+ /* First 64B is fast path variables */
+ RTE_MARKER fast_path_variables;
+
+ /** runtime active feature list */
+ rte_graph_feature_rt_list_t active_feature_list;
+
+ /* Actual Size of feature_list0 */
+ uint16_t feature_list_size;
+
+ /**
+ * Size each feature in fastpath.
+ * sizeof(arc->active_list->indexed_by_feature[0])
+ */
+ uint16_t feature_size;
+
+ /* Size of arc->max_index * sizeof(rte_graph_feature_data_t) */
+ uint16_t feature_data_size;
+
+ /**
+ * Fast path bitmask indicating if a feature is enabled or not Number
+ * of bits: RTE_GRAPH_FEATURE_MAX_PER_ARC
+ */
+ uint64_t feature_enable_bitmask[2];
+ rte_graph_feature_list_t *feature_list[2];
+ struct rte_graph_feature *features[2];
+
+ /** index in feature_arc_main */
+ uint16_t feature_arc_index;
+
+ uint16_t reserved[3];
+
+ /** Slow path variables follows*/
+ RTE_MARKER slow_path_variables;
+
+ /** feature arc name */
+ char feature_arc_name[RTE_GRAPH_FEATURE_ARC_NAMELEN];
+
+ /** All feature lists */
+ STAILQ_HEAD(, rte_graph_feature_node_list) all_features;
+
+ uint32_t runtime_enabled_features;
+
+ /** Back pointer to feature_arc_main */
+ void *feature_arc_main;
+
+ /* start_node */
+ struct rte_node_register *start_node;
+
+ /* maximum number of features supported by this arc */
+ uint32_t max_features;
+
+ /* maximum number of index supported by this arc */
+ uint32_t max_indexes;
+
+ /* Slow path bit mask per feature per index */
+ uint64_t feature_bit_mask_by_index[];
+};
+
+/** Feature arc main */
+typedef struct feature_arc_main {
+ /** number of feature arcs created by application */
+ uint32_t num_feature_arcs;
+
+ /** max features arcs allowed */
+ uint32_t max_feature_arcs;
+
+ /** feature arcs */
+ rte_graph_feature_arc_t feature_arcs[];
+} rte_graph_feature_arc_main_t;
+
+/** @internal Get feature arc pointer from object */
+#define rte_graph_feature_arc_get(arc) ((struct rte_graph_feature_arc *)arc)
+
+extern rte_graph_feature_arc_main_t *__feature_arc_main;
+
+/**
+ * API to know if feature is valid or not
+ */
+
+static __rte_always_inline int
+rte_graph_feature_is_valid(rte_graph_feature_t feature)
+{
+ return (feature != RTE_GRAPH_FEATURE_INVALID);
+}
+
+/**
+ * Get rte_graph_feature object with no checks
+ *
+ * @param arc
+ * Feature arc pointer
+ * @param feature
+ * Feature index
+ * @param feature_list
+ * active feature list retrieved from rte_graph_feature_arc_has_any_feature()
+ * or rte_graph_feature_arc_has_feature()
+ *
+ * @return
+ * Internal feature object.
+ */
+static __rte_always_inline struct rte_graph_feature *
+__rte_graph_feature_get(struct rte_graph_feature_arc *arc, rte_graph_feature_t feature,
+ const rte_graph_feature_rt_list_t feature_list)
+{
+ return ((struct rte_graph_feature *)((uint8_t *)(arc->features[feature_list] +
+ (feature * arc->feature_size))));
+}
+
+/**
+ * Get rte_graph_feature object for a given interface/index from feature arc
+ *
+ * @param arc
+ * Feature arc pointer
+ * @param feature
+ * Feature index
+ *
+ * @return
+ * Internal feature object.
+ */
+static __rte_always_inline struct rte_graph_feature *
+rte_graph_feature_get(struct rte_graph_feature_arc *arc, rte_graph_feature_t feature)
+{
+ RTE_VERIFY(feature < arc->max_features);
+
+ if (likely(rte_graph_feature_is_valid(feature)))
+ return __rte_graph_feature_get(arc, feature, arc->active_feature_list);
+
+ return NULL;
+}
+
+static __rte_always_inline rte_graph_feature_data_t *
+__rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct rte_graph_feature *feature,
+ uint8_t index)
+{
+ RTE_SET_USED(arc);
+ return ((rte_graph_feature_data_t *)(feature->feature_data_by_index +
+ (index * sizeof(rte_graph_feature_data_t))));
+}
+
+/**
+ * Get rte_graph feature data object for a index in feature
+ *
+ * @param arc
+ * feature arc
+ * @param feature
+ * Pointer to feature object
+ * @param index
+ * Index of feature maintained in slow path linked list
+ *
+ * @return
+ * Valid feature data
+ */
+static __rte_always_inline rte_graph_feature_data_t *
+rte_graph_feature_data_get(struct rte_graph_feature_arc *arc, struct rte_graph_feature *feature,
+ uint8_t index)
+{
+ if (likely(index < arc->max_indexes))
+ return __rte_graph_feature_data_get(arc, feature, index);
+
+ RTE_VERIFY(0);
+}
+
+/**
+ * Fast path API to check if any feature enabled on a feature arc
+ * Typically from arc->start_node process function
+ *
+ * @param arc
+ * Feature arc object
+ * @param[out] plist
+ * Pointer to runtime active feature list which needs to be provided to other
+ * fast path APIs
+ *
+ * @return
+ * 0: If no feature enabled
+ * Non-Zero: Bitmask of features enabled. plist is valid
+ *
+ */
+static __rte_always_inline uint64_t
+rte_graph_feature_arc_has_any_feature(struct rte_graph_feature_arc *arc,
+ rte_graph_feature_rt_list_t *plist)
+{
+ *plist = __atomic_load_n(&arc->active_feature_list, __ATOMIC_RELAXED);
+
+ return (__atomic_load_n(arc->feature_enable_bitmask + (uint8_t)*plist,
+ __ATOMIC_RELAXED));
+}
+
+/**
+ * Fast path API to check if provided feature is enabled on any interface/index
+ * or not
+ *
+ * @param arc
+ * Feature arc object
+ * @param feature
+ * Input rte_graph_feature_t that needs to be checked
+ * @param[out] plist
+ * Returns active list to caller which needs to be provided to other fast path
+ * APIs
+ *
+ * @return
+ * 1: If feature is enabled in arc
+ * 0: If feature is not enabled in arc
+ */
+static __rte_always_inline int
+rte_graph_feature_arc_has_feature(struct rte_graph_feature_arc *arc,
+ rte_graph_feature_t feature,
+ rte_graph_feature_rt_list_t *plist)
+{
+ uint64_t bitmask = RTE_BIT64(feature);
+
+ *plist = __atomic_load_n(&arc->active_feature_list, __ATOMIC_RELAXED);
+
+ return (bitmask & __atomic_load_n(arc->feature_enable_bitmask + (uint8_t)*plist,
+ __ATOMIC_RELAXED));
+}
+
+/**
+ * Prefetch feature arc fast path cache line
+ *
+ * @param arc
+ * RTE_GRAPH feature arc object
+ */
+static __rte_always_inline void
+rte_graph_feature_arc_prefetch(struct rte_graph_feature_arc *arc)
+{
+ rte_prefetch0((void *)&arc->fast_path_variables);
+}
+
+/**
+ * Prefetch feature related fast path cache line
+ *
+ * @param arc
+ * RTE_GRAPH feature arc object
+ * @param list
+ * Pointer to runtime active feature list from rte_graph_feature_arc_has_any_feature();
+ * @param feature
+ * Pointer to feature object
+ */
+static __rte_always_inline void
+rte_graph_feature_arc_feature_prefetch(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ rte_graph_feature_t feature)
+{
+ /* feature cache line */
+ if (likely(rte_graph_feature_is_valid(feature)))
+ rte_prefetch0((void *)__rte_graph_feature_get(arc, feature, list));
+}
+
+/**
+ * Prefetch feature data upfront. Perform sanity
+ *
+ * @param _arc
+ * RTE_GRAPH feature arc object
+ * @param list
+ * Pointer to runtime active feature list from rte_graph_feature_arc_has_any_feature();
+ * @param feature
+ * Pointer to feature object returned from @ref
+ * rte_graph_feature_arc_first_feature_get()
+ * @param index
+ * Interface/index
+ */
+static __rte_always_inline void
+rte_graph_feature_arc_data_prefetch(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ rte_graph_feature_t feature, uint32_t index)
+{
+ if (likely(rte_graph_feature_is_valid(feature)))
+ rte_prefetch0((void *)((uint8_t *)arc->features[list] +
+ offsetof(struct rte_graph_feature, feature_data_by_index) +
+ (index * sizeof(rte_graph_feature_data_t))));
+}
+
+/**
+ * Fast path API to get first enabled feature on interface index
+ * Typically required in arc->start_node so that from returned feature,
+ * feature-data can be retrieved to steer packets
+ *
+ * @param arc
+ * Feature arc object
+ * @param list
+ * Pointer to runtime active feature list from
+ * rte_graph_feature_arc_has_any_feature() or
+ * rte_graph_feature_arc_has_feature()
+ * @param index
+ * Interface Index
+ * @param[out] feature
+ * Pointer to rte_graph_feature_t.
+ *
+ * @return
+ * 0. Success. feature field is valid
+ * 1. Failure. feature field is invalid
+ *
+ */
+static __rte_always_inline int
+rte_graph_feature_arc_first_feature_get(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ uint32_t index,
+ rte_graph_feature_t *feature)
+{
+ struct rte_graph_feature_list *feature_list = arc->feature_list[list];
+
+ *feature = feature_list->first_enabled_feature_by_index[index];
+
+ return rte_graph_feature_is_valid(*feature);
+}
+
+/**
+ * Fast path API to get next enabled feature on interface index with provided
+ * input feature
+ *
+ * @param arc
+ * Feature arc object
+ * @param list
+ * Pointer to runtime active feature list from
+ * rte_graph_feature_arc_has_any_feature() or
+ * @param index
+ * Interface Index
+ * @param[in][out] feature
+ * Pointer to rte_graph_feature_t. Input feature set to next enabled feature
+ * after success return
+ * @param[out] next_edge
+ * Edge from current feature to next feature. Valid only if next feature is valid
+ *
+ * @return
+ * 0. Success. next enabled feature is valid.
+ * 1. Failure. next enabled feature is invalid
+ */
+static __rte_always_inline int
+rte_graph_feature_arc_next_feature_get(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ uint32_t index,
+ rte_graph_feature_t *feature,
+ rte_edge_t *next_edge)
+{
+ rte_graph_feature_data_t *feature_data = NULL;
+ struct rte_graph_feature *f = NULL;
+
+ if (likely(rte_graph_feature_is_valid(*feature))) {
+ f = __rte_graph_feature_get(arc, *feature, list);
+ feature_data = rte_graph_feature_data_get(arc, f, index);
+ *feature = feature_data->next_enabled_feature;
+ *next_edge = feature_data->next_edge;
+ return (*feature == RTE_GRAPH_FEATURE_INVALID);
+ }
+
+ return 1;
+}
+
+/**
+ * Set fields with respect to first enabled feature in an arc and return edge
+ * Typically returned feature and interface index must be saved in rte_mbuf
+ * structure to pass this information to next feature node
+ *
+ * @param arc
+ * Feature arc object
+ * @param list
+ * Pointer to runtime active feature list from rte_graph_feature_arc_has_any_feature();
+ * @param index
+ * Index (of interface)
+ * @param[out] gf
+ * Pointer to rte_graph_feature_t. Valid if API returns Success
+ * @param[out] edge
+ * Edge to steer packet from arc->start_node to first enabled feature. Valid
+ * only if API returns Success
+ *
+ * @return
+ * 0: If valid feature is set by API
+ * 1: If valid feature is NOT set by API
+ */
+static __rte_always_inline rte_graph_feature_t
+rte_graph_feature_arc_feature_set(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ uint32_t index,
+ rte_graph_feature_t *gf,
+ rte_edge_t *edge)
+{
+ struct rte_graph_feature_list *feature_list = arc->feature_list[list];
+ struct rte_graph_feature_data *feature_data = NULL;
+ struct rte_graph_feature *feature = NULL;
+ rte_graph_feature_t f;
+
+ /* reset */
+ *gf = RTE_GRAPH_FEATURE_INVALID;
+ f = feature_list->first_enabled_feature_by_index[index];
+
+ if (unlikely(rte_graph_feature_is_valid(f))) {
+ feature = __rte_graph_feature_get(arc, f, list);
+ feature_data = rte_graph_feature_data_get(arc, feature, index);
+ *gf = f;
+ *edge = feature_data->next_edge;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Get user data corresponding to current feature set by application in
+ * rte_graph_feature_enable()
+ *
+ * @param arc
+ * Feature arc object
+ * @param list
+ * Pointer to runtime active feature list from rte_graph_feature_arc_has_any_feature();
+ * @param feature
+ * Feature index
+ * @param index
+ * Interface index
+ *
+ * @return
+ * UINT32_MAX: Failure
+ * Valid user data: Success
+ */
+static __rte_always_inline uint32_t
+rte_graph_feature_user_data_get(struct rte_graph_feature_arc *arc,
+ const rte_graph_feature_rt_list_t list,
+ rte_graph_feature_t feature,
+ uint32_t index)
+{
+ rte_graph_feature_data_t *fdata = NULL;
+ struct rte_graph_feature *f = NULL;
+
+ if (likely(rte_graph_feature_is_valid(feature))) {
+ f = __rte_graph_feature_get(arc, feature, list);
+ fdata = rte_graph_feature_data_get(arc, f, index);
+ return fdata->user_data;
+ }
+
+ return UINT32_MAX;
+}
+#ifdef __cplusplus
+}
+#endif
+#endif
@@ -52,3 +52,20 @@ DPDK_25 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ # added in 24.11
+ rte_graph_feature_arc_init;
+ rte_graph_feature_arc_create;
+ rte_graph_feature_arc_lookup_by_name;
+ rte_graph_feature_add;
+ rte_graph_feature_enable;
+ rte_graph_feature_validate;
+ rte_graph_feature_disable;
+ rte_graph_feature_lookup;
+ rte_graph_feature_arc_destroy;
+ rte_graph_feature_arc_cleanup;
+ rte_graph_feature_arc_num_enabled_features;
+};