@@ -5,6 +5,8 @@
#ifndef COMMON_H
#define COMMON_H
+#define NR_QUEUES 4
+
extern bool enable_promiscuous_mode;
extern bool enable_flow_isolation;
extern struct rte_flow_attr flow_attr;
@@ -59,7 +59,7 @@ generate_flow_skeleton(uint16_t port_id, struct rte_flow_error *error, int use_t
struct rte_flow_action actions[MAX_ACTION_NUM] = {0};
struct rte_flow_item patterns[MAX_PATTERN_NUM] = {0};
- snippet_skeleton_flow_create_actions(actions);
+ snippet_skeleton_flow_create_actions(port_id, actions);
snippet_skeleton_flow_create_patterns(patterns);
/* >8 End of setting the common action and pattern structures. */
@@ -40,7 +40,6 @@ static int use_template_api = 1;
static volatile bool force_quit;
static uint16_t port_id;
-static uint16_t nr_queues = 5;
struct rte_mempool *mbuf_pool;
struct rte_flow *flow;
@@ -67,7 +66,7 @@ main_loop(void)
/* Reading the packets from all queues. */
while (!force_quit) {
- for (i = 0; i < nr_queues; i++) {
+ for (i = 0; i < NR_QUEUES; i++) {
nb_rx = rte_eth_rx_burst(port_id,
i, mbufs, 32);
if (nb_rx) {
@@ -179,7 +178,7 @@ init_port(void)
port_conf.txmode.offloads &= dev_info.tx_offload_capa;
printf(":: initializing port: %d\n", port_id);
ret = rte_eth_dev_configure(port_id,
- nr_queues, nr_queues, &port_conf);
+ NR_QUEUES, NR_QUEUES, &port_conf);
if (ret < 0) {
rte_exit(EXIT_FAILURE,
":: cannot configure device: err=%d, port=%u\n",
@@ -190,7 +189,7 @@ init_port(void)
rxq_conf.offloads = port_conf.rxmode.offloads;
/* Configuring number of RX and TX queues connected to single port. */
- for (i = 0; i < nr_queues; i++) {
+ for (i = 0; i < NR_QUEUES; i++) {
ret = rte_eth_rx_queue_setup(port_id, i, 512,
rte_eth_dev_socket_id(port_id),
&rxq_conf,
@@ -205,7 +204,7 @@ init_port(void)
txq_conf = dev_info.default_txconf;
txq_conf.offloads = port_conf.txmode.offloads;
- for (i = 0; i < nr_queues; i++) {
+ for (i = 0; i < NR_QUEUES; i++) {
ret = rte_eth_tx_queue_setup(port_id, i, 512,
rte_eth_dev_socket_id(port_id),
&txq_conf);
@@ -15,6 +15,7 @@ sources = files(
'snippets/snippet_match_ipv4.c',
'snippets/snippet_match_gre.c',
'snippets/snippet_match_mpls.c',
+ 'snippets/snippet_match_port_affinity.c',
'snippets/snippet_re_route_to_kernel.c',
)
@@ -15,7 +15,7 @@ snippet_init_gre(void)
}
static void
-snippet_match_gre_create_actions(struct rte_flow_action *action)
+snippet_match_gre_create_actions(__rte_unused uint16_t port_id, struct rte_flow_action *action)
{
/* Create one action that moves the packet to the selected queue. */
struct rte_flow_action_queue *queue = calloc(1, sizeof(struct rte_flow_action_queue));
@@ -19,7 +19,7 @@ snippet_init_gre(void);
#define snippet_init snippet_init_gre
static void
-snippet_match_gre_create_actions(struct rte_flow_action *action);
+snippet_match_gre_create_actions(uint16_t port_id, struct rte_flow_action *action);
#define snippet_skeleton_flow_create_actions snippet_match_gre_create_actions
static void
@@ -7,8 +7,8 @@
#include <rte_errno.h>
#include <rte_flow.h>
-#include "snippet_match_ipv4.h"
#include "../common.h"
+#include "snippet_match_ipv4.h"
void
snippet_init_ipv4(void)
@@ -17,7 +17,7 @@ snippet_init_ipv4(void)
}
void
-snippet_ipv4_flow_create_actions(struct rte_flow_action *action)
+snippet_ipv4_flow_create_actions(__rte_unused uint16_t port_id, struct rte_flow_action *action)
{
/*
* create the action sequence.
@@ -17,7 +17,7 @@ snippet_init_ipv4(void);
#define snippet_init snippet_init_ipv4
void
-snippet_ipv4_flow_create_actions(struct rte_flow_action *action);
+snippet_ipv4_flow_create_actions(uint16_t port_id, struct rte_flow_action *action);
#define snippet_skeleton_flow_create_actions snippet_ipv4_flow_create_actions
void
@@ -19,7 +19,7 @@ snippet_init_mpls(void)
}
static void
-snippet_mpls_create_actions(struct rte_flow_action *actions)
+snippet_mpls_create_actions(__rte_unused uint16_t port_id, struct rte_flow_action *actions)
{
/* Create one action that moves the packet to the selected queue. */
struct rte_flow_action_queue *queue;
@@ -19,7 +19,7 @@ snippet_init_mpls(void);
#define snippet_init snippet_init_mpls
static void
-snippet_mpls_create_actions(struct rte_flow_action *actions);
+snippet_mpls_create_actions(uint16_t port_id, struct rte_flow_action *actions);
#define snippet_skeleton_flow_create_actions snippet_mpls_create_actions
static void
new file mode 100644
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdlib.h>
+#include <rte_flow.h>
+
+#include "../common.h"
+#include "snippet_match_port_affinity.h"
+
+void
+snippet_init_match_port_affinity(void)
+{
+ init_default_snippet();
+}
+
+static void
+map_tx_queue_to_aggregated_port(uint16_t port_id)
+{
+ int ret;
+ uint16_t queues[] = {0, 1, 2, 3};
+ uint16_t affinities[] = {1, 1, 2, 2};
+ int i;
+
+ ret = rte_eth_dev_stop(port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_stop:err=%d, port=%u\n",
+ ret, port_id);
+
+ ret = rte_eth_dev_count_aggr_ports(port_id);
+ if (ret < 0) {
+ printf("Failed to count the aggregated ports: (%s)\n",
+ strerror(-ret));
+ return;
+ }
+
+ /* Configure TxQ index 0,1 with tx affinity 1 and TxQ index 2,3 with tx affinity 2 */
+ for (i = 0; i < NR_QUEUES; i++) {
+ ret = rte_eth_dev_map_aggr_tx_affinity(port_id, queues[i], affinities[i]);
+ if (ret != 0) {
+ printf("Failed to map tx queue with an aggregated port: %s\n",
+ rte_strerror(-ret));
+ return;
+ }
+ printf(":: tx queue %d mapped to aggregated port %d with affinity %d\n",
+ queues[i], port_id, affinities[i]);
+ }
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, port_id);
+}
+
+void
+snippet_match_port_affinity_create_actions(uint16_t port_id, struct rte_flow_action *action)
+{
+ /* Configure affinity in TxQ */
+ map_tx_queue_to_aggregated_port(port_id);
+
+ struct rte_flow_action_queue *queue = calloc(1, sizeof(struct rte_flow_action_queue));
+ if (queue == NULL) {
+ printf("Failed to allocate memory for queue\n");
+ return;
+ }
+ queue->index = 1;
+ /* Create the Queue action. */
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+}
+
+void
+snippet_match_port_affinity_create_patterns(struct rte_flow_item *pattern)
+{
+ struct rte_flow_item_aggr_affinity *affinity_spec =
+ calloc(1, sizeof(struct rte_flow_item_aggr_affinity));
+ if (affinity_spec == NULL) {
+ fprintf(stderr, "Failed to allocate memory for affinity_spec\n");
+ return;
+ }
+
+ struct rte_flow_item_aggr_affinity *affinity_mask =
+ calloc(1, sizeof(struct rte_flow_item_aggr_affinity));
+ if (affinity_mask == NULL) {
+ fprintf(stderr, "Failed to allocate memory for affinity_mask\n");
+ return;
+ }
+
+ affinity_spec->affinity = 2; /* Set the request affinity value. */
+ affinity_mask->affinity = 0xff; /* Set the mask for the affinity value. */
+
+ /* Create a rule that matches the port affinity values */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ // pattern[0].spec = eth;
+ // pattern[0].mask = NULL;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY;
+ pattern[1].spec = affinity_spec;
+ pattern[1].mask = affinity_mask;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
+}
+
+struct rte_flow_template_table *
+snippet_match_port_affinity_create_table(__rte_unused uint16_t port_id,
+ __rte_unused struct rte_flow_error *error)
+{
+ return NULL;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef SNIPPET_MATCH_PORT_AFFINITY_H
+#define SNIPPET_MATCH_PORT_AFFINITY_H
+
+/* Port Affinity Match
+ * indicates in the DPDK level the physical port a packet belongs to.
+ * This capability is available by using a new pattern item type for aggregated port affinity,
+ * its value reflects the physical port affinity of the received packets.
+ * Additionally, the tx_affinity setting was added by calling rte_eth_dev_map_aggr_tx_affinity(),
+ * its value reflects the physical port the packets will be sent to.
+ * This new capability is enables the app to receive the ingress port of a packet,
+ * and send the ACK out on the same port when dual ports devices are configured as a bond in Linux.
+ * This feature is used in conjunction with link aggregation, also known as port bonding,
+ * where multiple physical ports are combined into a single logical interface.
+ */
+
+#define MAX_PATTERN_NUM 3 /* Maximal number of patterns for this example. */
+#define MAX_ACTION_NUM 2 /* Maximal number of actions for this example. */
+
+void
+snippet_init_match_port_affinity(void);
+#define snippet_init snippet_init_match_port_affinity
+
+void
+snippet_match_port_affinity_create_actions(uint16_t port_id, struct rte_flow_action *action);
+#define snippet_skeleton_flow_create_actions snippet_match_port_affinity_create_actions
+
+void
+snippet_match_port_affinity_create_patterns(struct rte_flow_item *pattern);
+#define snippet_skeleton_flow_create_patterns snippet_match_port_affinity_create_patterns
+
+struct rte_flow_template_table *
+snippet_match_port_affinity_create_table(uint16_t port_id, struct rte_flow_error *error);
+#define snippet_skeleton_flow_create_table snippet_match_port_affinity_create_table
+
+#endif /* SNIPPET_MATCH_PORT_AFFINITY_H */
@@ -19,7 +19,8 @@ snippet_init_re_route_to_kernel(void)
}
void
-snippet_re_route_to_kernel_create_actions(struct rte_flow_action *action)
+snippet_re_route_to_kernel_create_actions(__rte_unused uint16_t port_id,
+ struct rte_flow_action *action)
{
action[0].type = RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL;
action[1].type = RTE_FLOW_ACTION_TYPE_END;
@@ -20,7 +20,7 @@ snippet_init_re_route_to_kernel(void);
#define snippet_init snippet_init_re_route_to_kernel
static void
-snippet_re_route_to_kernel_create_actions(struct rte_flow_action *action);
+snippet_re_route_to_kernel_create_actions(uint16_t port_id, struct rte_flow_action *action);
#define snippet_skeleton_flow_create_actions snippet_re_route_to_kernel_create_actions
static void