Add the offload support of very basic actions: mark, rss,
count, drop and output.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
doc/guides/nics/features/nfp.ini | 6 ++
doc/guides/rel_notes/release_22_11.rst | 6 ++
drivers/net/nfp/flower/nfp_flower_cmsg.h | 11 +++
drivers/net/nfp/nfp_flow.c | 112 +++++++++++++++++++++++++++++++
drivers/net/nfp/nfp_flow.h | 37 ++++++++++
5 files changed, 172 insertions(+)
@@ -29,3 +29,9 @@ Usage doc = Y
[rte_flow items]
eth = Y
port_id = Y
+
+[rte_flow actions]
+count = Y
+dec_ttl = Y
+drop = Y
+port_id = Y
@@ -109,6 +109,12 @@ New Features
* Ethernet
+ Add the support of rte_flow actions as follow:
+
+ * Count
+ * Drop
+ * TTL decrement
+
* **Updated NXP dpaa2 driver.**
* Added support for flow action REPRESENTED_PORT.
@@ -253,6 +253,17 @@ struct nfp_flower_mac_mpls {
rte_be32_t mpls_lse;
};
+struct nfp_fl_act_head {
+ uint8_t jump_id;
+ uint8_t len_lw;
+};
+
+struct nfp_fl_act_output {
+ struct nfp_fl_act_head head;
+ rte_be16_t flags;
+ rte_be32_t port;
+};
+
int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower);
int nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
struct nfp_flower_representor *repr);
@@ -558,6 +558,22 @@ struct nfp_mask_id_entry {
case RTE_FLOW_ACTION_TYPE_VOID:
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_VOID detected");
break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_MARK detected");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_DROP detected");
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_COUNT detected");
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_RSS detected");
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_PORT_ID detected");
+ key_ls->act_size += sizeof(struct nfp_fl_act_output);
+ break;
default:
PMD_DRV_LOG(ERR, "Action type %d not supported.", action->type);
return -ENOTSUP;
@@ -785,6 +801,96 @@ struct nfp_mask_id_entry {
return 0;
}
+static int
+nfp_flow_action_output(char *act_data,
+ const struct rte_flow_action *action,
+ struct nfp_fl_rule_metadata *nfp_flow_meta)
+{
+ size_t act_size;
+ struct rte_eth_dev *ethdev;
+ struct nfp_fl_act_output *output;
+ struct nfp_flower_representor *representor;
+ const struct rte_flow_action_port_id *port_id;
+
+ port_id = action->conf;
+ if (port_id == NULL || port_id->id >= RTE_MAX_ETHPORTS)
+ return -ERANGE;
+
+ ethdev = &rte_eth_devices[port_id->id];
+ representor = (struct nfp_flower_representor *)ethdev->data->dev_private;
+ act_size = sizeof(struct nfp_fl_act_output);
+
+ output = (struct nfp_fl_act_output *)act_data;
+ output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
+ output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ output->flags = rte_cpu_to_be_16(NFP_FL_OUT_FLAGS_LAST);
+ output->port = rte_cpu_to_be_32(representor->port_id);
+
+ nfp_flow_meta->shortcut = rte_cpu_to_be_32(representor->port_id);
+
+ return 0;
+}
+
+static int
+nfp_flow_compile_action(__rte_unused struct nfp_flower_representor *representor,
+ const struct rte_flow_action actions[],
+ struct rte_flow *nfp_flow)
+{
+ int ret = 0;
+ char *position;
+ char *action_data;
+ bool drop_flag = false;
+ uint32_t total_actions = 0;
+ const struct rte_flow_action *action;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ nfp_flow_meta = nfp_flow->payload.meta;
+ action_data = nfp_flow->payload.action_data;
+ position = action_data;
+
+ for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_MARK");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_DROP");
+ drop_flag = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_COUNT");
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_RSS");
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_PORT_ID");
+ ret = nfp_flow_action_output(position, action, nfp_flow_meta);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed when process"
+ " RTE_FLOW_ACTION_TYPE_PORT_ID");
+ return ret;
+ }
+
+ position += sizeof(struct nfp_fl_act_output);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
+ return -ENOTSUP;
+ }
+ total_actions++;
+ }
+
+ if (drop_flag)
+ nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP);
+ else if (total_actions > 1)
+ nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_NULL);
+
+ return 0;
+}
+
static struct rte_flow *
nfp_flow_process(struct nfp_flower_representor *representor,
const struct rte_flow_item items[],
@@ -833,6 +939,12 @@ struct nfp_mask_id_entry {
goto free_flow;
}
+ ret = nfp_flow_compile_action(representor, actions, nfp_flow);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "nfp flow action process failed.");
+ goto free_flow;
+ }
+
nfp_flow_meta = nfp_flow->payload.meta;
mask_data = nfp_flow->payload.mask_data;
mask_len = key_layer.key_size;
@@ -33,6 +33,43 @@
/* The firmware expects lengths in units of long words */
#define NFP_FL_LW_SIZ 2
+#define NFP_FL_SC_ACT_DROP 0x80000000
+#define NFP_FL_SC_ACT_USER 0x7D000000
+#define NFP_FL_SC_ACT_POPV 0x6A000000
+#define NFP_FL_SC_ACT_NULL 0x00000000
+
+/* Action opcodes */
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
+#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
+#define NFP_FL_ACTION_OPCODE_USERSPACE 5
+#define NFP_FL_ACTION_OPCODE_SET_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
+#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_PRE_GS 18
+#define NFP_FL_ACTION_OPCODE_GS 19
+#define NFP_FL_ACTION_OPCODE_PUSH_NSH 20
+#define NFP_FL_ACTION_OPCODE_POP_NSH 21
+#define NFP_FL_ACTION_OPCODE_SET_QUEUE 22
+#define NFP_FL_ACTION_OPCODE_CONNTRACK 23
+#define NFP_FL_ACTION_OPCODE_METER 24
+#define NFP_FL_ACTION_OPCODE_CT_NAT_EXT 25
+#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
+#define NFP_FL_ACTION_OPCODE_NUM 32
+
+#define NFP_FL_OUT_FLAGS_LAST (1 << 15)
+
/* Tunnel ports */
#define NFP_FL_PORT_TYPE_TUN 0x50000000