@@ -401,6 +401,14 @@ On LPM lookup failure, objects are redirected to pkt_drop node.
To achieve home run, node use ``rte_node_stream_move()`` as mentioned in above
sections.
+ip6_rewrite
+~~~~~~~~~~~
+This node gets packets from ``ip6_lookup`` node with next-hop id for each
+packet is embedded in ``node_mbuf_priv1(mbuf)->nh``. This id is used
+to determine the L2 header to be written to the packet before sending
+the packet out to a particular ethdev_tx node.
+``rte_node_ip6_rewrite_add()`` is control path API to add next-hop info.
+
null
~~~~
This node ignores the set of objects passed to it and reports that all are
@@ -12,6 +12,7 @@
#include "ethdev_rx_priv.h"
#include "ethdev_tx_priv.h"
#include "ip4_rewrite_priv.h"
+#include "ip6_rewrite_priv.h"
#include "node_private.h"
static struct ethdev_ctrl {
@@ -23,6 +24,7 @@ rte_node_eth_config(struct rte_node_ethdev_config *conf, uint16_t nb_confs,
uint16_t nb_graphs)
{
struct rte_node_register *ip4_rewrite_node;
+ struct rte_node_register *ip6_rewrite_node;
struct ethdev_tx_node_main *tx_node_data;
uint16_t tx_q_used, rx_q_used, port_id;
struct rte_node_register *tx_node;
@@ -33,6 +35,7 @@ rte_node_eth_config(struct rte_node_ethdev_config *conf, uint16_t nb_confs,
uint32_t id;
ip4_rewrite_node = ip4_rewrite_node_get();
+ ip6_rewrite_node = ip6_rewrite_node_get();
tx_node_data = ethdev_tx_node_data_get();
tx_node = ethdev_tx_node_get();
for (i = 0; i < nb_confs; i++) {
@@ -110,6 +113,16 @@ rte_node_eth_config(struct rte_node_ethdev_config *conf, uint16_t nb_confs,
port_id, rte_node_edge_count(ip4_rewrite_node->id) - 1);
if (rc < 0)
return rc;
+
+ /* Add this tx port node as next to ip6_rewrite_node */
+ rte_node_edge_update(ip6_rewrite_node->id, RTE_EDGE_ID_INVALID,
+ &next_nodes, 1);
+ /* Assuming edge id is the last one alloc'ed */
+ rc = ip6_rewrite_set_next(
+ port_id, rte_node_edge_count(ip6_rewrite_node->id) - 1);
+ if (rc < 0)
+ return rc;
+
}
ctrl.nb_graphs = nb_graphs;
new file mode 100644
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+#include <rte_ip.h>
+#include <rte_malloc.h>
+#include <rte_vect.h>
+
+#include "rte_node_ip6_api.h"
+
+#include "ip6_rewrite_priv.h"
+#include "node_private.h"
+
+struct ip6_rewrite_node_ctx {
+ /* Dynamic offset to mbuf priv1 */
+ int mbuf_priv1_off;
+ /* Cached next index */
+ uint16_t next_index;
+};
+
+static struct ip6_rewrite_node_main *ip6_rewrite_nm;
+
+#define IP6_REWRITE_NODE_LAST_NEXT(ctx) \
+ (((struct ip6_rewrite_node_ctx *)ctx)->next_index)
+
+#define IP6_REWRITE_NODE_PRIV1_OFF(ctx) \
+ (((struct ip6_rewrite_node_ctx *)ctx)->mbuf_priv1_off)
+
+static uint16_t
+ip6_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
+ void **objs, uint16_t nb_objs)
+{
+ struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
+ struct ip6_rewrite_nh_header *nh = ip6_rewrite_nm->nh;
+ const int dyn = IP6_REWRITE_NODE_PRIV1_OFF(node->ctx);
+ uint16_t next0, next1, next2, next3, next_index;
+ uint16_t n_left_from, held = 0, last_spec = 0;
+ struct rte_ipv6_hdr *ip0, *ip1, *ip2, *ip3;
+ void *d0, *d1, *d2, *d3;
+ void **to_next, **from;
+ rte_xmm_t priv01;
+ rte_xmm_t priv23;
+ int i;
+
+ /* Speculative next as last next */
+ next_index = IP6_REWRITE_NODE_LAST_NEXT(node->ctx);
+ rte_prefetch0(nh);
+
+ pkts = (struct rte_mbuf **)objs;
+ from = objs;
+ n_left_from = nb_objs;
+
+ for (i = 0; i < 4 && i < n_left_from; i++)
+ rte_prefetch0(pkts[i]);
+
+ /* Get stream for the speculated next node */
+ to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
+ /* Update Ethernet header of pkts */
+ while (n_left_from >= 4) {
+ if (likely(n_left_from > 7)) {
+ /* Prefetch only next-mbuf struct and priv area.
+ * Data need not be prefetched as we only write.
+ */
+ rte_prefetch0(pkts[4]);
+ rte_prefetch0(pkts[5]);
+ rte_prefetch0(pkts[6]);
+ rte_prefetch0(pkts[7]);
+ }
+
+ mbuf0 = pkts[0];
+ mbuf1 = pkts[1];
+ mbuf2 = pkts[2];
+ mbuf3 = pkts[3];
+
+ pkts += 4;
+ n_left_from -= 4;
+ priv01.u64[0] = node_mbuf_priv1(mbuf0, dyn)->u;
+ priv01.u64[1] = node_mbuf_priv1(mbuf1, dyn)->u;
+ priv23.u64[0] = node_mbuf_priv1(mbuf2, dyn)->u;
+ priv23.u64[1] = node_mbuf_priv1(mbuf3, dyn)->u;
+
+ /* Update next_hop rewrite ethernet hdr on mbuf0 */
+ d0 = rte_pktmbuf_mtod(mbuf0, void *);
+ rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
+ nh[priv01.u16[0]].rewrite_len);
+
+ next0 = nh[priv01.u16[0]].tx_node;
+ ip0 = (struct rte_ipv6_hdr *)((uint8_t *)d0 +
+ sizeof(struct rte_ether_hdr));
+ ip0->hop_limits = priv01.u16[1] - 1;
+
+ /* Update next_hop rewrite ethernet hdr on mbuf1 */
+ d1 = rte_pktmbuf_mtod(mbuf1, void *);
+ rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
+ nh[priv01.u16[4]].rewrite_len);
+
+ next1 = nh[priv01.u16[4]].tx_node;
+ ip1 = (struct rte_ipv6_hdr *)((uint8_t *)d1 +
+ sizeof(struct rte_ether_hdr));
+ ip1->hop_limits = priv01.u16[5] - 1;
+
+ /* Update next_hop rewrite ethernet hdr on mbuf2 */
+ d2 = rte_pktmbuf_mtod(mbuf2, void *);
+ rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
+ nh[priv23.u16[0]].rewrite_len);
+ next2 = nh[priv23.u16[0]].tx_node;
+ ip2 = (struct rte_ipv6_hdr *)((uint8_t *)d2 +
+ sizeof(struct rte_ether_hdr));
+ ip2->hop_limits = priv23.u16[1] - 1;
+
+ /* Update next_hop rewrite ethernet hdr on mbuf3 */
+ d3 = rte_pktmbuf_mtod(mbuf3, void *);
+ rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
+ nh[priv23.u16[4]].rewrite_len);
+
+ next3 = nh[priv23.u16[4]].tx_node;
+ ip3 = (struct rte_ipv6_hdr *)((uint8_t *)d3 +
+ sizeof(struct rte_ether_hdr));
+ ip3->hop_limits = priv23.u16[5] - 1;
+
+ /* Enqueue four packets to next node */
+ rte_edge_t fix_spec =
+ ((next_index == next0) && (next0 == next1) &&
+ (next1 == next2) && (next2 == next3));
+
+ if (unlikely(fix_spec == 0)) {
+ /* Copy things successfully speculated till now */
+ rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
+ from += last_spec;
+ to_next += last_spec;
+ held += last_spec;
+ last_spec = 0;
+
+ /* next0 */
+ if (next_index == next0) {
+ to_next[0] = from[0];
+ to_next++;
+ held++;
+ } else {
+ rte_node_enqueue_x1(graph, node, next0,
+ from[0]);
+ }
+
+ /* next1 */
+ if (next_index == next1) {
+ to_next[0] = from[1];
+ to_next++;
+ held++;
+ } else {
+ rte_node_enqueue_x1(graph, node, next1,
+ from[1]);
+ }
+
+ /* next2 */
+ if (next_index == next2) {
+ to_next[0] = from[2];
+ to_next++;
+ held++;
+ } else {
+ rte_node_enqueue_x1(graph, node, next2,
+ from[2]);
+ }
+
+ /* next3 */
+ if (next_index == next3) {
+ to_next[0] = from[3];
+ to_next++;
+ held++;
+ } else {
+ rte_node_enqueue_x1(graph, node, next3,
+ from[3]);
+ }
+
+ from += 4;
+
+ /* Change speculation if last two are same */
+ if ((next_index != next3) && (next2 == next3)) {
+ /* Put the current speculated node */
+ rte_node_next_stream_put(graph, node,
+ next_index, held);
+ held = 0;
+
+ /* Get next speculated stream */
+ next_index = next3;
+ to_next = rte_node_next_stream_get(
+ graph, node, next_index, nb_objs);
+ }
+ } else {
+ last_spec += 4;
+ }
+ }
+
+ while (n_left_from > 0) {
+ mbuf0 = pkts[0];
+
+ pkts += 1;
+ n_left_from -= 1;
+
+ d0 = rte_pktmbuf_mtod(mbuf0, void *);
+ rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
+ nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
+
+ next0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node;
+ ip0 = (struct rte_ipv6_hdr *)((uint8_t *)d0 +
+ sizeof(struct rte_ether_hdr));
+ ip0->hop_limits = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
+
+ if (unlikely(next_index ^ next0)) {
+ /* Copy things successfully speculated till now */
+ rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
+ from += last_spec;
+ to_next += last_spec;
+ held += last_spec;
+ last_spec = 0;
+
+ rte_node_enqueue_x1(graph, node, next0, from[0]);
+ from += 1;
+ } else {
+ last_spec += 1;
+ }
+ }
+
+ /* !!! Home run !!! */
+ if (likely(last_spec == nb_objs)) {
+ rte_node_next_stream_move(graph, node, next_index);
+ return nb_objs;
+ }
+
+ held += last_spec;
+ rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
+ rte_node_next_stream_put(graph, node, next_index, held);
+ /* Save the last next used */
+ IP6_REWRITE_NODE_LAST_NEXT(node->ctx) = next_index;
+
+ return nb_objs;
+}
+
+static int
+ip6_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+ static bool init_once;
+
+ RTE_SET_USED(graph);
+ RTE_BUILD_BUG_ON(sizeof(struct ip6_rewrite_node_ctx) > RTE_NODE_CTX_SZ);
+
+ if (!init_once) {
+ ip6_node_mbuf_priv1_dynfield_offset = rte_mbuf_dynfield_register(
+ &node_mbuf_priv1_dynfield_desc);
+ if (ip6_node_mbuf_priv1_dynfield_offset < 0)
+ return -rte_errno;
+ init_once = true;
+ }
+ IP6_REWRITE_NODE_PRIV1_OFF(node->ctx) = ip6_node_mbuf_priv1_dynfield_offset;
+
+ node_dbg("ip6_rewrite", "Initialized ip6_rewrite node");
+
+ return 0;
+}
+
+int
+ip6_rewrite_set_next(uint16_t port_id, uint16_t next_index)
+{
+ if (ip6_rewrite_nm == NULL) {
+ ip6_rewrite_nm = rte_zmalloc(
+ "ip6_rewrite", sizeof(struct ip6_rewrite_node_main),
+ RTE_CACHE_LINE_SIZE);
+ if (ip6_rewrite_nm == NULL)
+ return -ENOMEM;
+ }
+ ip6_rewrite_nm->next_index[port_id] = next_index;
+
+ return 0;
+}
+
+int
+rte_node_ip6_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
+ uint8_t rewrite_len, uint16_t dst_port)
+{
+ struct ip6_rewrite_nh_header *nh;
+
+ if (next_hop >= RTE_GRAPH_IP6_REWRITE_MAX_NH)
+ return -EINVAL;
+
+ if (rewrite_len > RTE_GRAPH_IP6_REWRITE_MAX_LEN)
+ return -EINVAL;
+
+ if (ip6_rewrite_nm == NULL) {
+ ip6_rewrite_nm = rte_zmalloc(
+ "ip6_rewrite", sizeof(struct ip6_rewrite_node_main),
+ RTE_CACHE_LINE_SIZE);
+ if (ip6_rewrite_nm == NULL)
+ return -ENOMEM;
+ }
+
+ /* Check if dst port doesn't exist as edge */
+ if (!ip6_rewrite_nm->next_index[dst_port])
+ return -EINVAL;
+
+ /* Update next hop */
+ nh = &ip6_rewrite_nm->nh[next_hop];
+
+ memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
+ nh->tx_node = ip6_rewrite_nm->next_index[dst_port];
+ nh->rewrite_len = rewrite_len;
+ nh->enabled = true;
+
+ return 0;
+}
+
+static struct rte_node_register ip6_rewrite_node = {
+ .process = ip6_rewrite_node_process,
+ .name = "ip6_rewrite",
+ /* Default edge i.e '0' is pkt drop */
+ .nb_edges = 1,
+ .next_nodes = {
+ [0] = "pkt_drop",
+ },
+ .init = ip6_rewrite_node_init,
+};
+
+struct rte_node_register *
+ip6_rewrite_node_get(void)
+{
+ return &ip6_rewrite_node;
+}
+
+RTE_NODE_REGISTER(ip6_rewrite_node);
new file mode 100644
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+#ifndef __INCLUDE_IP6_REWRITE_PRIV_H__
+#define __INCLUDE_IP6_REWRITE_PRIV_H__
+
+#include <rte_common.h>
+
+#define RTE_GRAPH_IP6_REWRITE_MAX_NH 64
+#define RTE_GRAPH_IP6_REWRITE_MAX_LEN 56
+
+/**
+ * @internal
+ *
+ * Ipv6 rewrite next hop header data structure. Used to store port specific
+ * rewrite data.
+ */
+struct ip6_rewrite_nh_header {
+ uint16_t rewrite_len; /**< Header rewrite length. */
+ uint16_t tx_node; /**< Tx node next index identifier. */
+ uint16_t enabled; /**< NH enable flag */
+ uint16_t rsvd;
+ union {
+ struct {
+ struct rte_ether_addr dst;
+ /**< Destination mac address. */
+ struct rte_ether_addr src;
+ /**< Source mac address. */
+ };
+ uint8_t rewrite_data[RTE_GRAPH_IP6_REWRITE_MAX_LEN];
+ /**< Generic rewrite data */
+ };
+};
+
+/**
+ * @internal
+ *
+ * Ipv6 node main data structure.
+ */
+struct ip6_rewrite_node_main {
+ struct ip6_rewrite_nh_header nh[RTE_GRAPH_IP6_REWRITE_MAX_NH];
+ /**< Array of next hop header data */
+ uint16_t next_index[RTE_MAX_ETHPORTS];
+ /**< Next index of each configured port. */
+};
+
+/**
+ * @internal
+ *
+ * Get the ipv6 rewrite node.
+ *
+ * @return
+ * Pointer to the ipv6 rewrite node.
+ */
+struct rte_node_register *ip6_rewrite_node_get(void);
+
+/**
+ * @internal
+ *
+ * Set the Edge index of a given port_id.
+ *
+ * @param port_id
+ * Ethernet port identifier.
+ * @param next_index
+ * Edge index of the Given Tx node.
+ */
+int ip6_rewrite_set_next(uint16_t port_id, uint16_t next_index);
+
+#endif /* __INCLUDE_IP6_REWRITE_PRIV_H__ */
@@ -14,6 +14,7 @@ sources = files(
'ip4_lookup.c',
'ip6_lookup.c',
'ip4_rewrite.c',
+ 'ip6_rewrite.c',
'log.c',
'null.c',
'pkt_cls.c',