@@ -6,6 +6,7 @@
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_logs.h"
#include "../nfp_common.h"
+#include "../nfp_flow.h"
#include "nfp_flower.h"
#include "nfp_flower_cmsg.h"
#include "nfp_flower_ctrl.h"
@@ -177,3 +178,71 @@
return 0;
}
+
+int
+nfp_flower_cmsg_flow_delete(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *flow)
+{
+ char *msg;
+ uint16_t cnt;
+ uint32_t msg_len;
+ struct rte_mbuf *mbuf;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for flow delete.");
+ return -ENOMEM;
+ }
+
+ /* Copy the flow to mbuf */
+ nfp_flow_meta = flow->payload.meta;
+ msg_len = (nfp_flow_meta->key_len + nfp_flow_meta->mask_len +
+ nfp_flow_meta->act_len) << NFP_FL_LW_SIZ;
+ msg_len += sizeof(struct nfp_fl_rule_metadata);
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_FLOW_DEL, msg_len);
+ rte_memcpy(msg, flow->payload.meta, msg_len);
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nfp_flower_cmsg_flow_add(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *flow)
+{
+ char *msg;
+ uint16_t cnt;
+ uint32_t msg_len;
+ struct rte_mbuf *mbuf;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for flow add.");
+ return -ENOMEM;
+ }
+
+ /* copy the flow to mbuf */
+ nfp_flow_meta = flow->payload.meta;
+ msg_len = (nfp_flow_meta->key_len + nfp_flow_meta->mask_len +
+ nfp_flow_meta->act_len) << NFP_FL_LW_SIZ;
+ msg_len += sizeof(struct nfp_fl_rule_metadata);
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_FLOW_ADD, msg_len);
+ rte_memcpy(msg, flow->payload.meta, msg_len);
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
@@ -189,10 +189,58 @@ enum nfp_flower_cmsg_port_vnic_type {
return rte_pktmbuf_mtod(m, char *) + 4 + 4 + NFP_FLOWER_CMSG_HLEN;
}
+/*
+ * Metadata with L2 (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | key_type | mask_id | PCP |p| vlan outermost VID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^ ^
+ * NOTE: | TCI |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_meta_tci {
+ uint8_t nfp_flow_key_layer;
+ uint8_t mask_id;
+ rte_be16_t tci;
+};
+
+/*
+ * Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | nfp_flow_key_layer2 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+ rte_be32_t nfp_flow_key_layer2;
+};
+
+/*
+ * L1 Port details (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | port_ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_in_port {
+ rte_be32_t in_port;
+};
+
int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower);
int nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
struct nfp_flower_representor *repr);
int nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower,
uint32_t port_id, bool carrier_ok);
+int nfp_flower_cmsg_flow_delete(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *flow);
+int nfp_flower_cmsg_flow_add(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *flow);
#endif /* _NFP_CMSG_H_ */
@@ -10,6 +10,7 @@
#include "../nfp_logs.h"
#include "../nfp_ctrl.h"
#include "../nfp_rxtx.h"
+#include "../nfp_flow.h"
#include "../nfpcore/nfp_mip.h"
#include "../nfpcore/nfp_rtsym.h"
#include "../nfpcore/nfp_nsp.h"
@@ -590,6 +591,8 @@
.promiscuous_disable = nfp_flower_repr_promiscuous_disable,
.mac_addr_set = nfp_flower_repr_mac_addr_set,
+
+ .flow_ops_get = nfp_net_flow_ops_get,
};
static uint32_t
@@ -7,11 +7,15 @@
#include <rte_hash.h>
#include <rte_jhash.h>
#include <bus_pci_driver.h>
+#include <rte_malloc.h>
#include "nfp_common.h"
#include "nfp_flow.h"
#include "nfp_logs.h"
#include "flower/nfp_flower.h"
+#include "flower/nfp_flower_cmsg.h"
+#include "flower/nfp_flower_ctrl.h"
+#include "flower/nfp_flower_representor.h"
#include "nfpcore/nfp_mip.h"
#include "nfpcore/nfp_rtsym.h"
@@ -21,6 +25,15 @@ struct nfp_mask_id_entry {
uint8_t mask_id;
};
+static inline struct nfp_flow_priv *
+nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
+{
+ struct nfp_flower_representor *repr;
+
+ repr = (struct nfp_flower_representor *)dev->data->dev_private;
+ return repr->app_fw_flower->flow_priv;
+}
+
static int
nfp_mask_id_alloc(struct nfp_flow_priv *priv, uint8_t *mask_id)
{
@@ -160,7 +173,7 @@ struct nfp_mask_id_entry {
return entry;
}
-__rte_unused static bool
+static bool
nfp_check_mask_add(struct nfp_flow_priv *priv,
char *mask_data,
uint32_t mask_len,
@@ -187,7 +200,7 @@ struct nfp_mask_id_entry {
return true;
}
-__rte_unused static bool
+static bool
nfp_check_mask_remove(struct nfp_flow_priv *priv,
char *mask_data,
uint32_t mask_len,
@@ -215,7 +228,7 @@ struct nfp_mask_id_entry {
return true;
}
-__rte_unused static int
+static int
nfp_flow_table_add(struct nfp_flow_priv *priv,
struct rte_flow *nfp_flow)
{
@@ -234,7 +247,7 @@ struct nfp_mask_id_entry {
return 0;
}
-__rte_unused static int
+static int
nfp_flow_table_delete(struct nfp_flow_priv *priv,
struct rte_flow *nfp_flow)
{
@@ -253,7 +266,7 @@ struct nfp_mask_id_entry {
return 0;
}
-__rte_unused static struct rte_flow *
+static struct rte_flow *
nfp_flow_table_search(struct nfp_flow_priv *priv,
struct rte_flow *nfp_flow)
{
@@ -273,7 +286,7 @@ struct nfp_mask_id_entry {
return flow_find;
}
-__rte_unused static struct rte_flow *
+static struct rte_flow *
nfp_flow_alloc(struct nfp_fl_key_ls *key_layer)
{
char *tmp;
@@ -306,14 +319,14 @@ struct nfp_mask_id_entry {
return NULL;
}
-__rte_unused static void
+static void
nfp_flow_free(struct rte_flow *nfp_flow)
{
rte_free(nfp_flow->payload.meta);
rte_free(nfp_flow);
}
-__rte_unused static int
+static int
nfp_stats_id_alloc(struct nfp_flow_priv *priv, uint32_t *ctx)
{
struct circ_buf *ring;
@@ -348,7 +361,7 @@ struct nfp_mask_id_entry {
return 0;
}
-__rte_unused static int
+static int
nfp_stats_id_free(struct nfp_flow_priv *priv, uint32_t ctx)
{
struct circ_buf *ring;
@@ -366,6 +379,548 @@ struct nfp_mask_id_entry {
return 0;
}
+static void
+nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
+{
+ struct nfp_flower_meta_tci *tci_meta;
+
+ tci_meta = (struct nfp_flower_meta_tci *)mbuf_off;
+ tci_meta->nfp_flow_key_layer = key_layer->key_layer;
+ tci_meta->mask_id = ~0;
+ tci_meta->tci = rte_cpu_to_be_16(key_layer->vlan);
+}
+
+static void
+nfp_flower_update_meta_tci(char *exact, uint8_t mask_id)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+
+ meta_tci = (struct nfp_flower_meta_tci *)exact;
+ meta_tci->mask_id = mask_id;
+}
+
+static void
+nfp_flower_compile_ext_meta(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
+{
+ struct nfp_flower_ext_meta *ext_meta;
+
+ ext_meta = (struct nfp_flower_ext_meta *)mbuf_off;
+ ext_meta->nfp_flow_key_layer2 = rte_cpu_to_be_32(key_layer->key_layer_two);
+}
+
+static void
+nfp_compile_meta_port(char *mbuf_off,
+ struct nfp_fl_key_ls *key_layer,
+ bool is_mask)
+{
+ struct nfp_flower_in_port *port_meta;
+
+ port_meta = (struct nfp_flower_in_port *)mbuf_off;
+
+ if (is_mask)
+ port_meta->in_port = rte_cpu_to_be_32(~0);
+ else if (key_layer->tun_type)
+ port_meta->in_port = rte_cpu_to_be_32(NFP_FL_PORT_TYPE_TUN |
+ key_layer->tun_type);
+ else
+ port_meta->in_port = rte_cpu_to_be_32(key_layer->port);
+}
+
+static void
+nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
+ struct rte_flow *nfp_flow,
+ struct nfp_fl_key_ls *key_layer,
+ uint32_t stats_ctx)
+{
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+ char *mbuf_off_exact;
+ char *mbuf_off_mask;
+
+ /*
+ * Convert to long words as firmware expects
+ * lengths in units of NFP_FL_LW_SIZ.
+ */
+ nfp_flow_meta = nfp_flow->payload.meta;
+ nfp_flow_meta->key_len = key_layer->key_size >> NFP_FL_LW_SIZ;
+ nfp_flow_meta->mask_len = key_layer->key_size >> NFP_FL_LW_SIZ;
+ nfp_flow_meta->act_len = key_layer->act_size >> NFP_FL_LW_SIZ;
+ nfp_flow_meta->flags = 0;
+ nfp_flow_meta->host_ctx_id = rte_cpu_to_be_32(stats_ctx);
+ nfp_flow_meta->host_cookie = rte_rand();
+ nfp_flow_meta->flow_version = rte_cpu_to_be_64(priv->flower_version);
+ priv->flower_version++;
+
+ mbuf_off_exact = nfp_flow->payload.unmasked_data;
+ mbuf_off_mask = nfp_flow->payload.mask_data;
+
+ /* Populate Metadata */
+ nfp_flower_compile_meta_tci(mbuf_off_exact, key_layer);
+ nfp_flower_compile_meta_tci(mbuf_off_mask, key_layer);
+ mbuf_off_exact += sizeof(struct nfp_flower_meta_tci);
+ mbuf_off_mask += sizeof(struct nfp_flower_meta_tci);
+
+ /* Populate Extended Metadata if required */
+ if (key_layer->key_layer & NFP_FLOWER_LAYER_EXT_META) {
+ nfp_flower_compile_ext_meta(mbuf_off_exact, key_layer);
+ nfp_flower_compile_ext_meta(mbuf_off_mask, key_layer);
+ mbuf_off_exact += sizeof(struct nfp_flower_ext_meta);
+ mbuf_off_mask += sizeof(struct nfp_flower_ext_meta);
+ }
+
+ /* Populate Port Data */
+ nfp_compile_meta_port(mbuf_off_exact, key_layer, false);
+ nfp_compile_meta_port(mbuf_off_mask, key_layer, true);
+ mbuf_off_exact += sizeof(struct nfp_flower_in_port);
+ mbuf_off_mask += sizeof(struct nfp_flower_in_port);
+}
+
+static int
+nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
+ __rte_unused struct nfp_fl_key_ls *key_ls)
+{
+ const struct rte_flow_item *item;
+
+ for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
+ switch (item->type) {
+ default:
+ PMD_DRV_LOG(ERR, "Item type %d not supported.", item->type);
+ return -ENOTSUP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[],
+ struct nfp_fl_key_ls *key_ls)
+{
+ int ret = 0;
+ const struct rte_flow_action *action;
+
+ for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+ /* Make sure actions length no longer than NFP_FL_MAX_A_SIZ */
+ if (key_ls->act_size > NFP_FL_MAX_A_SIZ) {
+ PMD_DRV_LOG(ERR, "The action list is too long.");
+ ret = -ERANGE;
+ break;
+ }
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_VOID detected");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Action type %d not supported.", action->type);
+ return -ENOTSUP;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nfp_flow_key_layers_calculate(const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct nfp_fl_key_ls *key_ls)
+{
+ int ret = 0;
+
+ key_ls->key_layer_two = 0;
+ key_ls->key_layer = NFP_FLOWER_LAYER_PORT;
+ key_ls->key_size = sizeof(struct nfp_flower_meta_tci) +
+ sizeof(struct nfp_flower_in_port);
+ key_ls->act_size = 0;
+ key_ls->port = ~0;
+ key_ls->vlan = 0;
+ key_ls->tun_type = NFP_FL_TUN_NONE;
+
+ ret |= nfp_flow_key_layers_calculate_items(items, key_ls);
+ ret |= nfp_flow_key_layers_calculate_actions(actions, key_ls);
+
+ return ret;
+}
+
+static struct rte_flow *
+nfp_flow_process(struct nfp_flower_representor *representor,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[])
+{
+ int ret;
+ char *mask_data;
+ uint32_t mask_len;
+ uint32_t stats_ctx = 0;
+ uint8_t new_mask_id = 0;
+ struct rte_flow *nfp_flow;
+ struct rte_flow *flow_find;
+ struct nfp_flow_priv *priv;
+ struct nfp_fl_key_ls key_layer;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ ret = nfp_flow_key_layers_calculate(items, actions, &key_layer);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Key layers calculate failed.");
+ return NULL;
+ }
+
+ if (key_layer.port == (uint32_t)~0)
+ key_layer.port = representor->port_id;
+
+ priv = representor->app_fw_flower->flow_priv;
+ ret = nfp_stats_id_alloc(priv, &stats_ctx);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "nfp stats id alloc failed.");
+ return NULL;
+ }
+
+ nfp_flow = nfp_flow_alloc(&key_layer);
+ if (nfp_flow == NULL) {
+ PMD_DRV_LOG(ERR, "Alloc nfp flow failed.");
+ goto free_stats;
+ }
+
+ nfp_flow->install_flag = true;
+
+ nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx);
+
+ nfp_flow_meta = nfp_flow->payload.meta;
+ mask_data = nfp_flow->payload.mask_data;
+ mask_len = key_layer.key_size;
+ if (!nfp_check_mask_add(priv, mask_data, mask_len,
+ &nfp_flow_meta->flags, &new_mask_id)) {
+ PMD_DRV_LOG(ERR, "nfp mask add check failed.");
+ goto free_flow;
+ }
+
+ /* Once we have a mask_id, update the meta tci */
+ nfp_flower_update_meta_tci(nfp_flow->payload.unmasked_data, new_mask_id);
+
+ /* Find the flow in hash table */
+ flow_find = nfp_flow_table_search(priv, nfp_flow);
+ if (flow_find != NULL) {
+ PMD_DRV_LOG(ERR, "This flow is already exist.");
+ if (!nfp_check_mask_remove(priv, mask_data, mask_len,
+ &nfp_flow_meta->flags)) {
+ PMD_DRV_LOG(ERR, "nfp mask del check failed.");
+ }
+ goto free_flow;
+ }
+
+ return nfp_flow;
+
+free_flow:
+ nfp_flow_free(nfp_flow);
+free_stats:
+ nfp_stats_id_free(priv, stats_ctx);
+
+ return NULL;
+}
+
+static struct rte_flow *
+nfp_flow_setup(struct nfp_flower_representor *representor,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (attr->group != 0)
+ PMD_DRV_LOG(INFO, "Pretend we support group attribute.");
+
+ if (attr->priority != 0)
+ PMD_DRV_LOG(INFO, "Pretend we support priority attribute.");
+
+ if (attr->transfer != 0)
+ PMD_DRV_LOG(INFO, "Pretend we support transfer attribute.");
+
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "Egress is not supported.");
+ return NULL;
+ }
+
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL, "Only ingress is supported.");
+ return NULL;
+ }
+
+ return nfp_flow_process(representor, items, actions);
+}
+
+static int
+nfp_flow_teardown(struct nfp_flow_priv *priv, struct rte_flow *nfp_flow)
+{
+ char *mask_data;
+ uint32_t mask_len;
+ uint32_t stats_ctx;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ nfp_flow_meta = nfp_flow->payload.meta;
+ mask_data = nfp_flow->payload.mask_data;
+ mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ;
+ if (!nfp_check_mask_remove(priv, mask_data, mask_len,
+ &nfp_flow_meta->flags)) {
+ PMD_DRV_LOG(ERR, "nfp mask del check failed.");
+ return -EINVAL;
+ }
+
+ nfp_flow_meta->flow_version = rte_cpu_to_be_64(priv->flower_version);
+ priv->flower_version++;
+
+ stats_ctx = rte_be_to_cpu_32(nfp_flow_meta->host_ctx_id);
+ return nfp_stats_id_free(priv, stats_ctx);
+}
+
+static int
+nfp_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *nfp_flow;
+ struct nfp_flow_priv *priv;
+ struct nfp_flower_representor *representor;
+
+ representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ priv = representor->app_fw_flower->flow_priv;
+
+ nfp_flow = nfp_flow_setup(representor, attr, items, actions, error);
+ if (nfp_flow == NULL) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "This flow can not be offloaded.");
+ }
+
+ ret = nfp_flow_teardown(priv, nfp_flow);
+ if (ret != 0) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Flow resource free failed.");
+ }
+
+ nfp_flow_free(nfp_flow);
+
+ return 0;
+}
+
+static struct rte_flow *
+nfp_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *nfp_flow;
+ struct nfp_flow_priv *priv;
+ struct nfp_app_fw_flower *app_fw_flower;
+ struct nfp_flower_representor *representor;
+
+ representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_fw_flower = representor->app_fw_flower;
+ priv = app_fw_flower->flow_priv;
+
+ nfp_flow = nfp_flow_setup(representor, attr, items, actions, error);
+ if (nfp_flow == NULL) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "This flow can not be offloaded.");
+ return NULL;
+ }
+
+ /* Add the flow to hardware */
+ if (nfp_flow->install_flag) {
+ ret = nfp_flower_cmsg_flow_add(app_fw_flower, nfp_flow);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Add flow to firmware failed.");
+ goto flow_teardown;
+ }
+ }
+
+ /* Add the flow to flow hash table */
+ ret = nfp_flow_table_add(priv, nfp_flow);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Add flow to the flow table failed.");
+ goto flow_teardown;
+ }
+
+ return nfp_flow;
+
+flow_teardown:
+ nfp_flow_teardown(priv, nfp_flow);
+ nfp_flow_free(nfp_flow);
+
+ return NULL;
+}
+
+static int
+nfp_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *nfp_flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *flow_find;
+ struct nfp_flow_priv *priv;
+ struct nfp_app_fw_flower *app_fw_flower;
+ struct nfp_flower_representor *representor;
+
+ representor = (struct nfp_flower_representor *)dev->data->dev_private;
+ app_fw_flower = representor->app_fw_flower;
+ priv = app_fw_flower->flow_priv;
+
+ /* Find the flow in flow hash table */
+ flow_find = nfp_flow_table_search(priv, nfp_flow);
+ if (flow_find == NULL) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Flow does not exist.");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Update flow */
+ ret = nfp_flow_teardown(priv, nfp_flow);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Flow teardown failed.");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Delete the flow from hardware */
+ if (nfp_flow->install_flag) {
+ ret = nfp_flower_cmsg_flow_delete(app_fw_flower, nfp_flow);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Delete flow from firmware failed.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ /* Delete the flow from flow hash table */
+ ret = nfp_flow_table_delete(priv, nfp_flow);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Delete flow from the flow table failed.");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ nfp_flow_free(nfp_flow);
+
+ return ret;
+}
+
+static int
+nfp_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ void *next_data;
+ uint32_t iter = 0;
+ const void *next_key;
+ struct nfp_flow_priv *priv;
+
+ priv = nfp_flow_dev_to_priv(dev);
+
+ while (rte_hash_iterate(priv->flow_table, &next_key, &next_data, &iter) >= 0) {
+ ret = nfp_flow_destroy(dev, (struct rte_flow *)next_data, error);
+ if (ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void
+nfp_flow_stats_get(struct rte_eth_dev *dev,
+ struct rte_flow *nfp_flow,
+ void *data)
+{
+ uint32_t ctx_id;
+ struct rte_flow *flow;
+ struct nfp_flow_priv *priv;
+ struct nfp_fl_stats *stats;
+ struct rte_flow_query_count *query;
+
+ priv = nfp_flow_dev_to_priv(dev);
+ flow = nfp_flow_table_search(priv, nfp_flow);
+ if (flow == NULL) {
+ PMD_DRV_LOG(ERR, "Can not find statistics for this flow.");
+ return;
+ }
+
+ query = (struct rte_flow_query_count *)data;
+ ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
+ stats = &priv->stats[ctx_id];
+
+ rte_spinlock_lock(&priv->stats_lock);
+ if (stats->pkts && stats->bytes) {
+ query->hits = stats->pkts;
+ query->bytes = stats->bytes;
+ query->hits_set = 1;
+ query->bytes_set = 1;
+ stats->pkts = 0;
+ stats->bytes = 0;
+ }
+ rte_spinlock_unlock(&priv->stats_lock);
+}
+
+static int
+nfp_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *nfp_flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+
+ for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ nfp_flow_stats_get(dev, nfp_flow, data);
+ break;
+ default:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Unsupported action type for flow query.");
+ return -ENOTSUP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct rte_flow_ops nfp_flow_ops = {
+ .validate = nfp_flow_validate,
+ .create = nfp_flow_create,
+ .destroy = nfp_flow_destroy,
+ .flush = nfp_flow_flush,
+ .query = nfp_flow_query,
+};
+
+int
+nfp_net_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
+{
+ if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) {
+ *ops = NULL;
+ PMD_DRV_LOG(ERR, "Port is not a representor.");
+ return -EINVAL;
+ }
+
+ *ops = &nfp_flow_ops;
+
+ return 0;
+}
+
int
nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
{
@@ -6,10 +6,36 @@
#ifndef _NFP_FLOW_H_
#define _NFP_FLOW_H_
+#include <ethdev_driver.h>
+
+#define NFP_FLOWER_LAYER_EXT_META (1 << 0)
+#define NFP_FLOWER_LAYER_PORT (1 << 1)
+#define NFP_FLOWER_LAYER_MAC (1 << 2)
+#define NFP_FLOWER_LAYER_TP (1 << 3)
+#define NFP_FLOWER_LAYER_IPV4 (1 << 4)
+#define NFP_FLOWER_LAYER_IPV6 (1 << 5)
+#define NFP_FLOWER_LAYER_CT (1 << 6)
+#define NFP_FLOWER_LAYER_VXLAN (1 << 7)
+
+#define NFP_FLOWER_LAYER2_GRE (1 << 0)
+#define NFP_FLOWER_LAYER2_QINQ (1 << 4)
+#define NFP_FLOWER_LAYER2_GENEVE (1 << 5)
+#define NFP_FLOWER_LAYER2_GENEVE_OP (1 << 6)
+#define NFP_FLOWER_LAYER2_TUN_IPV6 (1 << 7)
+
#define NFP_FL_META_FLAG_MANAGE_MASK (1 << 7)
#define NFP_MASK_TABLE_ENTRIES 1024
+/* The maximum action list size (in bytes) supported by the NFP. */
+#define NFP_FL_MAX_A_SIZ 1216
+
+/* The firmware expects lengths in units of long words */
+#define NFP_FL_LW_SIZ 2
+
+/* Tunnel ports */
+#define NFP_FL_PORT_TYPE_TUN 0x50000000
+
enum nfp_flower_tun_type {
NFP_FL_TUN_NONE = 0,
NFP_FL_TUN_GRE = 1,
@@ -75,6 +101,7 @@ struct nfp_fl_stats {
struct nfp_flow_priv {
uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
+ uint64_t flower_version; /**< Flow version, always increase. */
/* mask hash table */
struct nfp_fl_mask_id mask_ids; /**< Entry for mask hash table */
struct rte_hash *mask_table; /**< Hash table to store mask ids. */
@@ -97,5 +124,6 @@ struct rte_flow {
int nfp_flow_priv_init(struct nfp_pf_dev *pf_dev);
void nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev);
+int nfp_net_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
#endif /* _NFP_FLOW_H_ */