@@ -94,6 +94,8 @@ struct pmd_internals {
int flower_vlan_support; /* 1 if kernel supports, else 0 */
int rss_enabled; /* 1 if RSS is enabled, else 0 */
/* implicit rules set when RSS is enabled */
+ int map_fd; /* BPF RSS map fd */
+ int bpf_fd[RTE_PMD_TAP_MAX_QUEUES];/* List of bpf fds per queue */
LIST_HEAD(tap_rss_flows, rte_flow) rss_flows;
LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
/* implicit rte_flow rules set when a remote device is active */
@@ -134,7 +134,7 @@ int bpf_load_cls_q_insns(__u32 queue_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)cls_q_insns,
CLS_Q_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -174,7 +174,7 @@ int bpf_load_tailing_insns(void)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)tailing_insns,
TAILING_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -220,7 +220,7 @@ int bpf_load_hash_appending_insns(void)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)hash_appending_insns,
HASH_APPENDING_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -290,14 +290,14 @@ static struct bpf_insn queue_setting_insns[] = {
* @return
* -1 if the BPF program couldn't be loaded. An fd (int) otherwise.
*/
-int bpf_load_queue_setting_insns(__u32 queue_idx)
+int bpf_load_queue_setting_insns(__u32 key_idx)
{
- queue_setting_insns[1].imm = queue_idx;
+ queue_setting_insns[1].imm = key_idx;
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)queue_setting_insns,
QUEUE_SETTING_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -918,7 +918,7 @@ int bpf_load_l2_src_hash_insns(__u32 key_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)l2_src_hash_insns,
L2_SRC_HASH_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -1525,7 +1525,7 @@ int bpf_load_l2_dst_hash_insns(__u32 key_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)l2_dst_hash_insns,
L2_DST_HASH_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -3285,7 +3285,7 @@ int bpf_load_l3_dst_hash_insns(__u32 key_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)l3_dst_hash_insns,
L3_DST_HASH_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -5034,7 +5034,7 @@ int bpf_load_l3_src_hash_insns(__u32 key_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)l3_src_hash_insns,
L3_SRC_HASH_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
/*
@@ -5376,7 +5376,7 @@ int bpf_load_l4_src_hash_insns(__u32 key_idx)
return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
(struct bpf_insn *)l4_src_hash_insns,
L4_SRC_HASH_INSNS_CNT,
- "GPL", 0);
+ "Dual BSD/GPL", 0);
}
#ifndef __NR_bpf
@@ -5425,3 +5425,36 @@ int bpf_load(enum bpf_prog_type type,
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
+
+
+int bpf_map_create(enum bpf_map_type map_type,
+ unsigned int key_size,
+ unsigned int value_size,
+ unsigned int max_entries)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+
+ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+}
+
+int bpf_map_update_elem(enum bpf_map_type map_type,
+ int fd, void *key, void *value, __u64 flags)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+
+ attr.map_type = map_type;
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = flags;
+
+ return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+}
@@ -57,15 +57,15 @@
*/
#define QUEUE_OFFSET 1
-#define INV_KEY_IDX 0xdeadbeef
+#define KEY_IDX 0xdeadbeef
struct vlan_hdr {
__be16 proto;
__be16 tci;
};
-__section("maps")
-struct bpf_elf_map map_keys = {
+struct bpf_elf_map __section("maps")
+map_keys = {
.type = BPF_MAP_TYPE_HASH,
.id = BPF_MAP_ID_KEY,
.size_key = sizeof(__u32),
@@ -78,10 +78,10 @@ __section("cls_q") int
match_q(struct __sk_buff *skb)
{
__u32 queue = skb->cb[1];
- volatile __u32 q = INV_KEY_IDX;
+ volatile __u32 q = KEY_IDX;
__u32 match_queue = QUEUE_OFFSET + q;
- /* printt("match_q$i() queue = %d\n", queue); */
+ /* bpf_printk("match_q$i() queue = %d\n", queue); */
if (queue != match_queue)
return TC_ACT_OK;
@@ -120,7 +120,7 @@ hash(struct __sk_buff *skb, enum hash_field f)
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
__u16 proto = (__u16)skb->protocol;
- __u32 key_idx = INV_KEY_IDX;
+ __u32 key_idx = KEY_IDX;
__u32 hash = skb->cb[0];
struct rss_key *rsskey;
__u64 off = ETH_HLEN;
@@ -303,7 +303,7 @@ append_hash(struct __sk_buff *skb)
__section("queue_setting") int
set_queue(struct __sk_buff *skb)
{
- __u32 key_idx = INV_KEY_IDX;
+ __u32 key_idx = KEY_IDX;
__u64 hash = skb->cb[0];
struct rss_key *rsskey;
__u32 queue = 0;
@@ -11,6 +11,16 @@ enum hash_field {
HASH_FIELD_L4_SRC,
};
+enum bpf_fd_index {
+ BPF_L2_DST = HASH_FIELD_L2_DST,
+ BPF_L2_SRC = HASH_FIELD_L2_SRC,
+ BPF_L3_SRC = HASH_FIELD_L3_SRC,
+ BPF_L3_DST = HASH_FIELD_L3_DST,
+ BPF_L4_SRC = HASH_FIELD_L4_SRC,
+ BPF_SET_Q,
+ BPF_MAX,
+};
+
enum {
BPF_MAP_ID_KEY,
BPF_MAP_ID_SIMPLE,
@@ -27,7 +37,7 @@ struct rss_key {
int bpf_load_cls_q_insns(__u32 queue_idx);
int bpf_load_tailing_insns(void);
int bpf_load_hash_appending_insns(void);
-int bpf_load_queue_setting_insns(__u32 queue_idx);
+int bpf_load_queue_setting_insns(__u32 key_idx);
int bpf_load_l2_src_hash_insns(__u32 key_idx);
int bpf_load_l2_dst_hash_insns(__u32 key_idx);
int bpf_load_l3_dst_hash_insns(__u32 key_idx);
@@ -37,4 +47,10 @@ int bpf_load_l4_src_hash_insns(__u32 key_idx);
int bpf_load(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license, __u32 kern_version);
+int bpf_map_create(enum bpf_map_type map_type, unsigned int key_size,
+ unsigned int value_size, unsigned int max_entries);
+
+int bpf_map_update_elem(enum bpf_map_type map_type, int fd, void *key,
+ void *value, __u64 flags);
+
#endif /* __BPF_SHARED__ */
@@ -34,6 +34,7 @@
#include <errno.h>
#include <string.h>
#include <sys/queue.h>
+#include <sys/mount.h>
#include <rte_byteorder.h>
#include <rte_jhash.h>
@@ -87,11 +88,38 @@ enum {
};
#endif
+enum bpf_rss_key_e {
+ KEY_CMD_GET = 1,
+ KEY_CMD_RELEASE,
+ KEY_CMD_INIT,
+ KEY_CMD_DEINIT,
+};
+
+enum key_status_e {
+ KEY_STAT_UNSPEC,
+ KEY_STAT_USED,
+ KEY_STAT_AVAILABLE,
+};
+
#define ISOLATE_HANDLE 1
+#define MAX_RSS_KEYS 256
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define SEC_NAME_CLS_Q "cls_q"
+const char *sec_name[BPF_MAX] = {
+ [BPF_L2_DST] = "L2_DST",
+ [BPF_L2_SRC] = "L2_SRC",
+ [BPF_L3_DST] = "L3_DST",
+ [BPF_L3_SRC] = "L3_SRC",
+ [BPF_L4_SRC] = "L4_SRC",
+ [BPF_SET_Q] = "queue_setting",
+};
+
struct rte_flow {
LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
struct rte_flow *remote_flow; /* associated remote flow */
+ int bpf_fd[BPF_MAX]; /* List of bfs fds */
struct nlmsg msg;
};
@@ -120,8 +148,9 @@ struct action_data {
uint16_t queue;
} skbedit;
struct bpf {
+ struct tc_act_bpf bpf;
int bpf_fd;
- char *annotation;
+ const char *annotation;
} bpf;
};
};
@@ -157,6 +186,9 @@ tap_flow_isolate(struct rte_eth_dev *dev,
struct rte_flow_error *error);
static int rss_enable(struct pmd_internals *pmd);
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss);
static const struct rte_flow_ops tap_flow_ops = {
.validate = tap_flow_validate,
@@ -874,6 +906,9 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
strlen(adata->bpf.annotation),
adata->bpf.annotation);
+ nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
+ sizeof(adata->bpf.bpf),
+ &adata->bpf.bpf);
} else {
return -1;
}
@@ -898,7 +933,7 @@ add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
for (i = 0; i < nb_actions; i++)
if (add_action(flow, &act_index, data + i) < 0)
return -1;
- nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ nlattr_nested_finish(msg); /* nested TCA_[FLOWER|BPF|...]_ACT */
return 0;
}
@@ -1122,9 +1157,13 @@ priv_flow_process(struct pmd_internals *pmd,
if (action++)
goto exit_action_not_supported;
- if (!pmd->rss_enabled)
+ if (!pmd->rss_enabled) {
err = rss_enable(pmd);
- (void)rss;
+ if (err)
+ goto exit_action_not_supported;
+ }
+ if (rss)
+ err = rss_add_actions(flow, pmd, rss);
} else {
goto exit_action_not_supported;
}
@@ -1645,39 +1684,59 @@ tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
return 0;
}
-#define BPF_PROGRAM "tap_bpf_program.o"
-
/**
* Enable RSS on tap: create leading TC rules for queuing.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ *
+ * @return -1 if couldn't create map or add rules, 0 otherwise.
*/
static int rss_enable(struct pmd_internals *pmd)
{
struct rte_flow *rss_flow = NULL;
- char section[64];
struct nlmsg *msg = NULL;
/* 4096 is the maximum number of instructions for a BPF program */
- char annotation[256];
- int bpf_fd;
+ char annotation[64];
int i;
+ int err = 0;
+
+ /*
+ * At this point file system /sys/fs/bpf must be mounted
+ * Check for it or mount it programatically
+ */
+
+ /*
+ * Create RSS MAP
+ */
+
+ err = bpf_rss_key(KEY_CMD_INIT, NULL);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD, "Failed to initialize BPF RSS keys");
+ return -1;
+ }
+
+ pmd->map_fd = bpf_map_create(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), /* key size */
+ sizeof(struct rss_key),
+ MAX_RSS_KEYS);
+ if (pmd->map_fd < 0) {
+ RTE_LOG(ERR, PMD, "Failed to create eBPF map");
+ return -1;
+ }
/*
* Add a rule per queue to match reclassified packets and direct them to
* the correct queue.
*/
for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
- struct action_data adata = {
- .id = "skbedit",
- .skbedit = {
- .skbedit = {
- .action = TC_ACT_PIPE,
- },
- .queue = i,
- },
- };
-
- bpf_fd = bpf_load_cls_q_insns(i);
- if (bpf_fd == -1)
+ pmd->bpf_fd[i] = bpf_load_cls_q_insns(i);
+ if (pmd->bpf_fd[i] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s for queue %d",
+ SEC_NAME_CLS_Q, i);
return -1;
+ }
rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!rss_flow) {
@@ -1692,84 +1751,291 @@ static int rss_enable(struct pmd_internals *pmd)
htons(ETH_P_ALL));
msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
tap_flow_set_handle(rss_flow);
+
nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
if (nlattr_nested_start(msg, TCA_OPTIONS) < 0)
return -1;
- nlattr_add32(&msg->nh, TCA_BPF_FD, bpf_fd);
- snprintf(annotation, sizeof(annotation), "%s:[%s]",
- BPF_PROGRAM, section);
+ nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
+ snprintf(annotation, sizeof(annotation), "[%s%d]",
+ SEC_NAME_CLS_Q, i);
nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation),
annotation);
+ /* Actions */
+ {
+ struct action_data adata = {
+ .id = "skbedit",
+ .skbedit = {
+ .skbedit = {
+ .action = TC_ACT_PIPE,
+ },
+ .queue = i,
+ },
+ };
+ if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
+ return -1;
+ }
+ nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
- if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
- return -1;
- nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
/* Netlink message is now ready to be sent */
if (nl_send(pmd->nlsk_fd, &msg->nh) < 0)
return -1;
- if (nl_recv_ack(pmd->nlsk_fd) < 0)
- return -1;
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ return err;
+ }
LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
}
- /* Add a rule that adds 4 bytes at packet's end to hold the hash */
- bpf_fd = bpf_load_tailing_insns();
- if (bpf_fd == -1)
+ pmd->rss_enabled = 1;
+ return err;
+}
+
+/**
+ * Manage bpf RSS keys repository with operations: init, get, release
+ *
+ * @param[in] cmd
+ * Command on RSS keys: init, get, release
+ *
+ * @param[in, out] key_idx
+ * Pointer to RSS Key index (out for get command, in for release command)
+ *
+ * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
+ */
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
+{
+ __u32 i;
+ int err = -1;
+ static __u32 num_used_keys;
+ static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
+ static __u32 rss_keys_initialized;
+
+ switch (cmd) {
+ case KEY_CMD_GET:
+ if (!rss_keys_initialized)
+ break;
+
+ if (num_used_keys == ARRAY_SIZE(rss_keys))
+ break;
+
+ *key_idx = num_used_keys % ARRAY_SIZE(rss_keys);
+ while (rss_keys[*key_idx] == KEY_STAT_USED)
+ *key_idx = (*key_idx + 1) % ARRAY_SIZE(rss_keys);
+ rss_keys[*key_idx] = KEY_STAT_USED;
+ num_used_keys++;
+ err = 0;
+ break;
+
+ case KEY_CMD_RELEASE:
+ if (!rss_keys_initialized)
+ break;
+
+ if (rss_keys[*key_idx] == KEY_STAT_USED) {
+ rss_keys[*key_idx] = KEY_STAT_AVAILABLE;
+ num_used_keys--;
+ err = 0;
+ }
+ break;
+
+ case KEY_CMD_INIT:
+ for (i = 0; i < ARRAY_SIZE(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_AVAILABLE;
+ rss_keys_initialized = 1;
+ num_used_keys = 0;
+ err = 0;
+ break;
+
+ case KEY_CMD_DEINIT:
+ for (i = 0; i < ARRAY_SIZE(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_UNSPEC;
+ rss_keys_initialized = 0;
+ num_used_keys = 0;
+ err = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * Add RSS hash calculations and queue selection
+ *
+ * @param[in, out] pmd
+ * Command on RSS keys: init, get, release
+ *
+ * @param[in] rss
+ * Pointer to RSS flow actions
+ *
+ * @return -1 if couldn't add any action, 0 otherwise.
+ */
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss)
+{
+ /* 4096 is the maximum number of instructions for a BPF program */
+ int i;
+ __u32 key_idx;
+ int err;
+
+ struct rss_key rss_entry = {
+ .key = {
+ 0x98badcfe, 0x10325476, 0x98badcfe, 0x10325476,
+ 0x98badcfe, 0x10325476, 0x98badcfe, 0x10325476,
+ 0x98badcfe, 0x10325476, 0x98badcfe, 0x10325476,
+ 0x98badcfe, 0x10325476, 0x98badcfe, 0x10325476,
+ },
+ .key_size = 16,
+ .hash_fields =
+ 1 << HASH_FIELD_L2_SRC |
+ 1 << HASH_FIELD_L2_DST |
+ 1 << HASH_FIELD_L3_SRC |
+ 1 << HASH_FIELD_L3_DST |
+ 1 << HASH_FIELD_L4_SRC,
+ .queues = { 1, 2 },
+ .nb_queues = 2,
+ };
+ /* Get a new map key for a new RSS rule */
+ err = bpf_rss_key(KEY_CMD_GET, &key_idx);
+ if (err)
+ return -1;
+
+ /* Update RSS map entry with queues */
+ rss_entry.nb_queues = rss->num;
+ for (i = 0; i < rss->num; i++)
+ rss_entry.queues[i] = rss->queue[i];
+
+ /* Add this RSS entry to map */
+ err = bpf_map_update_elem(BPF_MAP_TYPE_HASH, pmd->map_fd,
+ &key_idx, &rss_entry, BPF_ANY);
+
+ if (err) {
+ fprintf(stderr, "RSS map entry #%u failed: %s\n", key_idx,
+ strerror(errno));
+ return -1;
+ }
+
+
+ /*
+ * Load bpf rules to calculate hash for this key_idx
+ */
+
+ // OMTODO - consider putting the repetitive pattern below within a macro
+ flow->bpf_fd[BPF_L2_DST] = bpf_load_l2_dst_hash_insns(key_idx);
+ if (flow->bpf_fd[BPF_L2_DST] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s", sec_name[BPF_L2_DST]);
return -1;
+ }
- snprintf(annotation, sizeof(annotation), "%s:[%s]", BPF_PROGRAM,
- section);
- rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
- if (!rss_flow) {
+ flow->bpf_fd[BPF_L2_SRC] = bpf_load_l2_src_hash_insns(key_idx);
+ if (flow->bpf_fd[BPF_L2_SRC] < 0) {
RTE_LOG(ERR, PMD,
- "Cannot allocate memory for rte_flow");
+ "Failed to load eBPF section %s", sec_name[BPF_L2_SRC]);
return -1;
}
- msg = &rss_flow->msg;
- tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
- NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
- msg->t.tcm_info =
- TC_H_MAKE((RTE_PMD_TAP_MAX_QUEUES + PRIORITY_OFFSET) << 16,
- htons(ETH_P_ALL));
- msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
- tap_flow_set_handle(rss_flow);
- nlattr_add(&msg->nh, TCA_KIND, sizeof("flower"), "flower");
- if (nlattr_nested_start(msg, TCA_OPTIONS) < 0)
+
+ flow->bpf_fd[BPF_L3_DST] = bpf_load_l3_dst_hash_insns(key_idx);
+ if (flow->bpf_fd[BPF_L3_DST] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s", sec_name[BPF_L3_DST]);
+ return -1;
+ }
+
+ flow->bpf_fd[BPF_L3_SRC] = bpf_load_l3_src_hash_insns(key_idx);
+ if (flow->bpf_fd[BPF_L3_SRC] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s", sec_name[BPF_L3_SRC]);
+ return -1;
+ }
+
+ flow->bpf_fd[BPF_L4_SRC] = bpf_load_l4_src_hash_insns(key_idx);
+ if (flow->bpf_fd[BPF_L4_SRC] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s", sec_name[BPF_L4_SRC]);
return -1;
+ }
- /* no fields for matching: all packets must match */
+ flow->bpf_fd[BPF_SET_Q] = bpf_load_queue_setting_insns(key_idx);
+ if (flow->bpf_fd[BPF_SET_Q] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load eBPF section %s", sec_name[BPF_SET_Q]);
+ return -1;
+ }
+
+ /* Actions */
{
- /* Actions */
- struct action_data data[2] = {
- [0] = {
+ struct action_data adata[] = {
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[BPF_L2_DST],
+ .annotation = sec_name[BPF_L2_DST],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[BPF_L2_SRC],
+ .annotation = sec_name[BPF_L2_SRC],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ {
.id = "bpf",
.bpf = {
- .bpf_fd = bpf_fd,
- .annotation = annotation,
+ .bpf_fd = flow->bpf_fd[BPF_L3_SRC],
+ .annotation = sec_name[BPF_L3_SRC],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
},
},
- [1] = {
- .id = "gact",
- .gact = {
- /* continue */
- .action = TC_ACT_UNSPEC,
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[BPF_L3_DST],
+ .annotation = sec_name[BPF_L3_DST],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[BPF_L4_SRC],
+ .annotation = sec_name[BPF_L4_SRC],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[BPF_SET_Q],
+ .annotation = sec_name[BPF_SET_Q],
+ .bpf = {
+ .action = TC_ACT_RECLASSIFY,
+ },
},
},
};
- if (add_actions(rss_flow, 2, data, TCA_FLOWER_ACT) < 0)
+ if (add_actions(flow, ARRAY_SIZE(adata), adata,
+ TCA_FLOWER_ACT) < 0)
return -1;
}
- nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
- nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
- /* Netlink message is now ready to be sent */
- if (nl_send(pmd->nlsk_fd, &msg->nh) < 0)
- return -1;
- if (nl_recv_ack(pmd->nlsk_fd) < 0)
- return -1;
- LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
- pmd->rss_enabled = 1;
return 0;
}