get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66906/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66906,
    "url": "https://patches.dpdk.org/api/patches/66906/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200318213551.3489504-13-jerinj@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200318213551.3489504-13-jerinj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200318213551.3489504-13-jerinj@marvell.com",
    "date": "2020-03-18T21:35:37",
    "name": "[v1,12/26] graph: implement fastpath API routines",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "45a2c06e905c08e8ff36a81e7d643e27ab559279",
    "submitter": {
        "id": 1188,
        "url": "https://patches.dpdk.org/api/people/1188/?format=api",
        "name": "Jerin Jacob Kollanukkaran",
        "email": "jerinj@marvell.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200318213551.3489504-13-jerinj@marvell.com/mbox/",
    "series": [
        {
            "id": 8974,
            "url": "https://patches.dpdk.org/api/series/8974/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=8974",
            "date": "2020-03-18T21:35:25",
            "name": "graph: introduce graph subsystem",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/8974/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/66906/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/66906/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 35167A057D;\n\tWed, 18 Mar 2020 22:37:35 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A2BB41BF30;\n\tWed, 18 Mar 2020 22:36:12 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id CDCFA1C10C\n for <dev@dpdk.org>; Wed, 18 Mar 2020 22:36:10 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n 02ILUsFQ003319; Wed, 18 Mar 2020 14:36:08 -0700",
            "from sc-exch01.marvell.com ([199.233.58.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 2yu8pqmrbr-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Wed, 18 Mar 2020 14:36:07 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH01.marvell.com\n (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 18 Mar\n 2020 14:36:06 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n (10.93.176.81) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Wed, 18 Mar 2020 14:36:06 -0700",
            "from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14])\n by maili.marvell.com (Postfix) with ESMTP id 6A7173F7041;\n Wed, 18 Mar 2020 14:36:03 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0818;\n bh=pp6jdSFAMf3SAdy/Xt8Ba4Iow8bktXBUxAEwOfkD8Yo=;\n b=xa5Bip77sb6PzlWmkuisGhoeTAd8vZff8xNwmzdEtdwqFIMu+zRKQ867hAOPORokdV6h\n lS31fPhc1vrT2gGiVG61yb5WMG1E9MHhRgy0QqACgCygqF5qUhvyKi8oyAsVBw8ZDuHo\n 67z7IIUNQhLw/zbBBVKLJBjm7KogVZ8vH2tsS43sg8x+FphwZp9RVh/peY4MCB5LmvO/\n O5UCIGwOHL1J+R4XHSlPmczYz5/E/oGJytuNArEaflheE+1zq23VpU4ABhr0JPCHgR+s\n wdJQ+W6nOQ8bHB8cUEchrIhsOF7tGmzprlayBz+/803Zy/mRJsPjIzdiVhEF+4OXGdl6 +w==",
        "From": "<jerinj@marvell.com>",
        "To": "John McNamara <john.mcnamara@intel.com>, Marko Kovacevic\n <marko.kovacevic@intel.com>, Jerin Jacob <jerinj@marvell.com>,\n Kiran Kumar K <kirankumark@marvell.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <david.marchand@redhat.com>,\n <mdr@ashroe.eu>, <mattias.ronnblom@ericsson.com>,\n <pbhagavatula@marvell.com>, <ndabilpuram@marvell.com>",
        "Date": "Thu, 19 Mar 2020 03:05:37 +0530",
        "Message-ID": "<20200318213551.3489504-13-jerinj@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20200318213551.3489504-1-jerinj@marvell.com>",
        "References": "<20200318213551.3489504-1-jerinj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.138, 18.0.645\n definitions=2020-03-18_07:2020-03-18,\n 2020-03-18 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v1 12/26] graph: implement fastpath API routines",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jerin Jacob <jerinj@marvell.com>\n\nAdding implementation for rte_graph_walk() API. This will perform a walk\non the circular buffer and call the process function of each node\nand collect the stats if stats collection is enabled.\n\nSigned-off-by: Jerin Jacob <jerinj@marvell.com>\nSigned-off-by: Kiran Kumar K <kirankumark@marvell.com>\nSigned-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\n---\n doc/api/doxy-api-index.md              |   1 +\n lib/librte_graph/graph.c               |  16 +\n lib/librte_graph/rte_graph_version.map |  10 +\n lib/librte_graph/rte_graph_worker.h    | 434 +++++++++++++++++++++++++\n 4 files changed, 461 insertions(+)",
    "diff": "diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md\nindex 5cc50f750..fd2ff64d7 100644\n--- a/doc/api/doxy-api-index.md\n+++ b/doc/api/doxy-api-index.md\n@@ -160,6 +160,7 @@ The public API headers are grouped by topics:\n     [port_in_action]   (@ref rte_port_in_action.h)\n     [table_action]     (@ref rte_table_action.h)\n   * [graph]            (@ref rte_graph.h):\n+    [graph_worker]     (@ref rte_graph_worker.h)\n \n - **basic**:\n   [approx fraction]    (@ref rte_approx.h),\ndiff --git a/lib/librte_graph/graph.c b/lib/librte_graph/graph.c\nindex cc1e523d9..78bc83c4e 100644\n--- a/lib/librte_graph/graph.c\n+++ b/lib/librte_graph/graph.c\n@@ -475,6 +475,22 @@ __rte_node_stream_alloc(struct rte_graph *graph, struct rte_node *node)\n \tnode->realloc_count++;\n }\n \n+void __rte_noinline\n+__rte_node_stream_alloc_size(struct rte_graph *graph, struct rte_node *node,\n+\t\t\t     uint16_t req_size)\n+{\n+\tuint16_t size = node->size;\n+\n+\tRTE_VERIFY(size != UINT16_MAX);\n+\t/* Allocate double amount of size to avoid immediate realloc */\n+\tsize = RTE_MIN(UINT16_MAX, RTE_MAX(RTE_GRAPH_BURST_SIZE, req_size * 2));\n+\tnode->objs = rte_realloc_socket(node->objs, size * sizeof(void *),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE, graph->socket);\n+\tRTE_VERIFY(node->objs);\n+\tnode->size = size;\n+\tnode->realloc_count++;\n+}\n+\n static int\n graph_to_dot(FILE *f, struct graph *graph)\n {\ndiff --git a/lib/librte_graph/rte_graph_version.map b/lib/librte_graph/rte_graph_version.map\nindex adf55d406..13b838752 100644\n--- a/lib/librte_graph/rte_graph_version.map\n+++ b/lib/librte_graph/rte_graph_version.map\n@@ -3,6 +3,7 @@ EXPERIMENTAL {\n \n \t__rte_node_register;\n \t__rte_node_stream_alloc;\n+\t__rte_node_stream_alloc_size;\n \n \trte_graph_create;\n \trte_graph_destroy;\n@@ -16,6 +17,7 @@ EXPERIMENTAL {\n \trte_graph_node_get;\n \trte_graph_node_get_by_name;\n \trte_graph_obj_dump;\n+\trte_graph_walk;\n \n \trte_graph_cluster_stats_create;\n \trte_graph_cluster_stats_destroy;\n@@ -28,10 +30,18 @@ EXPERIMENTAL {\n \trte_node_edge_get;\n \trte_node_edge_shrink;\n \trte_node_edge_update;\n+\trte_node_enqueue;\n+\trte_node_enqueue_x1;\n+\trte_node_enqueue_x2;\n+\trte_node_enqueue_x4;\n+\trte_node_enqueue_next;\n \trte_node_from_name;\n \trte_node_id_to_name;\n \trte_node_list_dump;\n \trte_node_max_count;\n+\trte_node_next_stream_get;\n+\trte_node_next_stream_put;\n+\trte_node_next_stream_move;\n \n \tlocal: *;\n };\ndiff --git a/lib/librte_graph/rte_graph_worker.h b/lib/librte_graph/rte_graph_worker.h\nindex a7c780d4d..8e067e673 100644\n--- a/lib/librte_graph/rte_graph_worker.h\n+++ b/lib/librte_graph/rte_graph_worker.h\n@@ -100,6 +100,440 @@ struct rte_node {\n __rte_experimental\n void __rte_node_stream_alloc(struct rte_graph *graph, struct rte_node *node);\n \n+/**\n+ * @internal\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Allocate a stream with requested number of objects.\n+ *\n+ * If stream already exists then re-allocate it to a larger size.\n+ *\n+ * @param graph\n+ *   Pointer to the graph object.\n+ * @param node\n+ *   Pointer to the node object.\n+ * @param req_size\n+ *   Number of objects to be allocated.\n+ */\n+__rte_experimental\n+void __rte_node_stream_alloc_size(struct rte_graph *graph,\n+\t\t\t\t  struct rte_node *node, uint16_t req_size);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Perform graph walk on the circular buffer and invoke the process function\n+ * of the nodes and collect the stats.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup function.\n+ *\n+ * @see rte_graph_lookup()\n+ */\n+__rte_experimental\n+static inline void\n+rte_graph_walk(struct rte_graph *graph)\n+{\n+\tconst rte_graph_off_t *cir_start = graph->cir_start;\n+\tconst rte_node_t mask = graph->cir_mask;\n+\tuint32_t head = graph->head;\n+\tstruct rte_node *node;\n+\tuint64_t start;\n+\tuint16_t rc;\n+\tvoid **objs;\n+\n+\t/*\n+\t * Walk on the source node(s) ((cir_start - head) -> cir_start) and then\n+\t * on the pending streams (cir_start -> (cir_start + mask) -> cir_start)\n+\t * in a circular buffer fashion.\n+\t *\n+\t *\t+-----+ <= cir_start - head [number of source nodes]\n+\t *\t|     |\n+\t *\t| ... | <= source nodes\n+\t *\t|     |\n+\t *\t+-----+ <= cir_start [head = 0] [tail = 0]\n+\t *\t|     |\n+\t *\t| ... | <= pending streams\n+\t *\t|     |\n+\t *\t+-----+ <= cir_start + mask\n+\t */\n+\twhile (likely(head != graph->tail)) {\n+\t\tnode = RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);\n+\t\tRTE_ASSERT(node->fence == RTE_GRAPH_FENCE);\n+\t\tobjs = node->objs;\n+\t\trte_prefetch0(objs);\n+\n+\t\tif (rte_graph_has_stats_feature()) {\n+\t\t\tstart = rte_rdtsc();\n+\t\t\trc = node->process(graph, node, objs, node->idx);\n+\t\t\tnode->total_cycles += rte_rdtsc() - start;\n+\t\t\tnode->total_calls++;\n+\t\t\tnode->total_objs += rc;\n+\t\t} else {\n+\t\t\tnode->process(graph, node, objs, node->idx);\n+\t\t}\n+\t\tnode->idx = 0;\n+\t\thead = likely((int32_t)head > 0) ? head & mask : head;\n+\t}\n+\tgraph->tail = 0;\n+}\n+\n+/* Fast path helper functions */\n+\n+/**\n+ * @internal\n+ *\n+ * Enqueue a given node to the tail of the graph reel.\n+ *\n+ * @param graph\n+ *   Pointer Graph object.\n+ * @param node\n+ *   Pointer to node object to be enqueued.\n+ */\n+static __rte_always_inline void\n+__rte_node_enqueue_tail_update(struct rte_graph *graph, struct rte_node *node)\n+{\n+\tuint32_t tail;\n+\n+\ttail = graph->tail;\n+\tgraph->cir_start[tail++] = node->off;\n+\tgraph->tail = tail & graph->cir_mask;\n+}\n+\n+/**\n+ * @internal\n+ *\n+ * Enqueue sequence prologue function.\n+ *\n+ * Updates the node to tail of graph reel and resizes the number of objects\n+ * available in the stream as needed.\n+ *\n+ * @param graph\n+ *   Pointer to the graph object.\n+ * @param node\n+ *   Pointer to the node object.\n+ * @param idx\n+ *   Index at which the object enqueue starts from.\n+ * @param space\n+ *   Space required for the object enqueue.\n+ */\n+static __rte_always_inline void\n+__rte_node_enqueue_prologue(struct rte_graph *graph, struct rte_node *node,\n+\t\t\t    const uint16_t idx, const uint16_t space)\n+{\n+\n+\t/* Add to the pending stream list if the node is new */\n+\tif (idx == 0)\n+\t\t__rte_node_enqueue_tail_update(graph, node);\n+\n+\tif (unlikely(node->size < (idx + space)))\n+\t\t__rte_node_stream_alloc(graph, node);\n+}\n+\n+/**\n+ * @internal\n+ *\n+ * Get the node pointer from current node edge id.\n+ *\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Edge id of the required node.\n+ *\n+ * @return\n+ *   Pointer to the node denoted by the edge id.\n+ */\n+static __rte_always_inline struct rte_node *\n+__rte_node_next_node_get(struct rte_node *node, rte_edge_t next)\n+{\n+\tRTE_ASSERT(next < node->nb_edges);\n+\tRTE_ASSERT(node->fence == RTE_GRAPH_FENCE);\n+\tnode = node->nodes[next];\n+\tRTE_ASSERT(node->fence == RTE_GRAPH_FENCE);\n+\n+\treturn node;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enqueue the objs to next node for further processing and set\n+ * the next node to pending state in the circular buffer.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index to enqueue objs.\n+ * @param objs\n+ *   Objs to enqueue.\n+ * @param nb_objs\n+ *   Number of objs to enqueue.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_enqueue(struct rte_graph *graph, struct rte_node *node,\n+\t\t rte_edge_t next, void **objs, uint16_t nb_objs)\n+{\n+\tnode = __rte_node_next_node_get(node, next);\n+\tconst uint16_t idx = node->idx;\n+\n+\t__rte_node_enqueue_prologue(graph, node, idx, nb_objs);\n+\n+\trte_memcpy(&node->objs[idx], objs, nb_objs * sizeof(void *));\n+\tnode->idx = idx + nb_objs;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enqueue only one obj to next node for further processing and\n+ * set the next node to pending state in the circular buffer.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index to enqueue objs.\n+ * @param obj\n+ *   Obj to enqueue.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node,\n+\t\t    rte_edge_t next, void *obj)\n+{\n+\tnode = __rte_node_next_node_get(node, next);\n+\tuint16_t idx = node->idx;\n+\n+\t__rte_node_enqueue_prologue(graph, node, idx, 1);\n+\n+\tnode->objs[idx++] = obj;\n+\tnode->idx = idx;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enqueue only two objs to next node for further processing and\n+ * set the next node to pending state in the circular buffer.\n+ * Same as rte_node_enqueue_x1 but enqueue two objs.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index to enqueue objs.\n+ * @param obj0\n+ *   Obj to enqueue.\n+ * @param obj1\n+ *   Obj to enqueue.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node,\n+\t\t    rte_edge_t next, void *obj0, void *obj1)\n+{\n+\tnode = __rte_node_next_node_get(node, next);\n+\tuint16_t idx = node->idx;\n+\n+\t__rte_node_enqueue_prologue(graph, node, idx, 2);\n+\n+\tnode->objs[idx++] = obj0;\n+\tnode->objs[idx++] = obj1;\n+\tnode->idx = idx;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enqueue only four objs to next node for further processing and\n+ * set the next node to pending state in the circular buffer.\n+ * Same as rte_node_enqueue_x1 but enqueue four objs.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index to enqueue objs.\n+ * @param obj0\n+ *   1st obj to enqueue.\n+ * @param obj1\n+ *   2nd obj to enqueue.\n+ * @param obj2\n+ *   3rd obj to enqueue.\n+ * @param obj3\n+ *   4th obj to enqueue.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node,\n+\t\t    rte_edge_t next, void *obj0, void *obj1, void *obj2,\n+\t\t    void *obj3)\n+{\n+\tnode = __rte_node_next_node_get(node, next);\n+\tuint16_t idx = node->idx;\n+\n+\t__rte_node_enqueue_prologue(graph, node, idx, 4);\n+\n+\tnode->objs[idx++] = obj0;\n+\tnode->objs[idx++] = obj1;\n+\tnode->objs[idx++] = obj2;\n+\tnode->objs[idx++] = obj3;\n+\tnode->idx = idx;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Enqueue objs to multiple next nodes for further processing and\n+ * set the next nodes to pending state in the circular buffer.\n+ * objs[i] will be enqueued to nexts[i].\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param nexts\n+ *   List of relative next node indices to enqueue objs.\n+ * @param objs\n+ *   List of objs to enqueue.\n+ * @param nb_objs\n+ *   Number of objs to enqueue.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node,\n+\t\t      rte_edge_t *nexts, void **objs, uint16_t nb_objs)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < nb_objs; i++)\n+\t\trte_node_enqueue_x1(graph, node, nexts[i], objs[i]);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Get the stream of next node to enqueue the objs.\n+ * Once done with the updating the objs, needs to call\n+ * rte_node_next_stream_put to put the next node to pending state.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index to get stream.\n+ * @param nb_objs\n+ *   Requested free size of the next stream.\n+ *\n+ * @return\n+ *   Valid next stream on success.\n+ *\n+ * @see rte_node_next_stream_put().\n+ */\n+__rte_experimental\n+static inline void **\n+rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node,\n+\t\t\t rte_edge_t next, uint16_t nb_objs)\n+{\n+\tnode = __rte_node_next_node_get(node, next);\n+\tconst uint16_t idx = node->idx;\n+\tuint16_t free_space = node->size - idx;\n+\n+\tif (unlikely(free_space < nb_objs))\n+\t\t__rte_node_stream_alloc_size(graph, node, nb_objs);\n+\n+\treturn &node->objs[idx];\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Put the next stream to pending state in the circular buffer\n+ * for further processing. Should be invoked followed by\n+ * rte_node_next_stream_get().\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param node\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index..\n+ * @param idx\n+ *   Number of objs updated in the stream after getting the stream using\n+ *   rte_node_next_stream_get.\n+ *\n+ * @see rte_node_next_stream_get().\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node,\n+\t\t\t rte_edge_t next, uint16_t idx)\n+{\n+\tif (unlikely(!idx))\n+\t\treturn;\n+\n+\tnode = __rte_node_next_node_get(node, next);\n+\tif (node->idx == 0)\n+\t\t__rte_node_enqueue_tail_update(graph, node);\n+\n+\tnode->idx += idx;\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ *\n+ * Home run scenario, Enqueue all the objs of current node to next\n+ * node in optimized way by swapping the streams of both nodes.\n+ * Performs good when next node is already not in pending state.\n+ * If next node is already in pending state then normal enqueue\n+ * will be used.\n+ *\n+ * @param graph\n+ *   Graph pointer returned from rte_graph_lookup().\n+ * @param src\n+ *   Current node pointer.\n+ * @param next\n+ *   Relative next node index.\n+ */\n+__rte_experimental\n+static inline void\n+rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src,\n+\t\t\t  rte_edge_t next)\n+{\n+\tstruct rte_node *dst = __rte_node_next_node_get(src, next);\n+\n+\t/* Let swap the pointers if dst don't have valid objs */\n+\tif (likely(dst->idx == 0)) {\n+\t\tvoid **dobjs = dst->objs;\n+\t\tuint16_t dsz = dst->size;\n+\t\tdst->objs = src->objs;\n+\t\tdst->size = src->size;\n+\t\tsrc->objs = dobjs;\n+\t\tsrc->size = dsz;\n+\t\tdst->idx = src->idx;\n+\t\t__rte_node_enqueue_tail_update(graph, dst);\n+\t} else { /* Move the objects from src node to dst node */\n+\t\trte_node_enqueue(graph, src, next, src->objs, src->idx);\n+\t}\n+}\n+\n #ifdef __cplusplus\n }\n #endif\n",
    "prefixes": [
        "v1",
        "12/26"
    ]
}