@@ -15,6 +15,7 @@
#include "cpfl_ethdev.h"
#include <ethdev_private.h>
#include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
#define CPFL_REPRESENTOR "representor"
#define CPFL_TX_SINGLE_Q "tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+ struct rte_flow *p_flow;
+
+ while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+ TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+ if (p_flow->engine->free)
+ p_flow->engine->free(p_flow);
+ rte_free(p_flow);
+ }
+}
+
static int
cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
{
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
+ cpfl_flow_free(cpfl_vport);
idpf_vport_deinit(vport);
rte_free(cpfl_vport->p2p_q_chunks_info);
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
+{
+ struct cpfl_itf *itf;
+
+ if (!dev)
+ return -EINVAL;
+
+ itf = CPFL_DEV_TO_ITF(dev);
+
+ /* only vport support rte_flow */
+ if (itf->type != CPFL_ITF_TYPE_VPORT)
+ return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+ *ops = &cpfl_flow_ops;
+#else
+ *ops = NULL;
+ PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+ return 0;
+}
+
static int
cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .flow_ops_get = cpfl_dev_flow_ops_get,
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
goto err_vports_alloc;
}
+#ifdef RTE_HAS_JANSSON
+ ret = cpfl_flow_init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init flow module");
+ goto err_flow_init;
+ }
+#endif
adapter->cur_vports = 0;
adapter->cur_vport_nb = 0;
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
return ret;
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
cpfl_vport->itf.adapter = adapter;
cpfl_vport->itf.data = dev->data;
+ TAILQ_INIT(&cpfl_vport->itf.flow_list);
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
static void
cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
{
+#ifdef RTE_HAS_JANSSON
+ cpfl_flow_uninit(adapter);
+#endif
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
cpfl_vport_map_uninit(adapter);
idpf_adapter_deinit(&adapter->base);
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
CPFL_ITF_TYPE_REPRESENTOR,
};
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
struct cpfl_itf {
enum cpfl_itf_type type;
struct cpfl_adapter_ext *adapter;
+ struct cpfl_flow_list flow_list;
void *data;
};
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
rte_spinlock_t repr_lock;
struct rte_hash *repr_allowlist_hash;
+ struct cpfl_flow_js_parser *flow_parser;
+
struct cpfl_metadata meta;
};
new file mode 100644
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (!engine->parse_pattern_action)
+ continue;
+
+ if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+ continue;
+ return engine;
+ }
+
+ return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+ int ret;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (!engine->init) {
+ PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+ engine->type);
+ return -ENOTSUP;
+ }
+
+ ret = engine->init(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+ engine->type);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+ if (engine->uninit)
+ engine->uninit(adapter);
+ }
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr->priority > CPFL_PREC_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Only support priority 0-7.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ ret = cpfl_flow_attr_valid(attr, error);
+ if (ret)
+ return ret;
+
+ if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct cpfl_flow_engine **engine,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+ if (ret)
+ return ret;
+
+ *engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+ if (!*engine) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No matched engine.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct cpfl_flow_engine *engine = NULL;
+ int ret;
+
+ ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+ return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ struct cpfl_flow_engine *engine = NULL;
+ struct rte_flow *flow;
+ void *meta;
+ int ret;
+
+ flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+ if (ret) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ if (!engine->create) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "No matched flow creation function");
+ rte_free(flow);
+ return NULL;
+ }
+
+ ret = engine->create(dev, flow, meta, error);
+ if (ret) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ flow->engine = engine;
+ TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+ return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ int ret = 0;
+
+ if (!flow || !flow->engine || !flow->engine->destroy) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow");
+ return -rte_errno;
+ }
+
+ ret = flow->engine->destroy(dev, flow, error);
+ if (!ret)
+ TAILQ_REMOVE(&itf->flow_list, flow, next);
+ else
+ PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+ return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+ struct rte_flow *p_flow;
+ void *temp;
+ int ret = 0;
+
+ RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+ ret = cpfl_flow_destroy(dev, p_flow, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to flush flows");
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_query_count *count = data;
+ int ret = -EINVAL;
+
+ if (!flow || !flow->engine || !flow->engine->query_count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Invalid flow");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow->engine->query_count(dev, flow, count, error);
+ break;
+ default:
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ break;
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+ .validate = cpfl_flow_validate,
+ .create = cpfl_flow_create,
+ .destroy = cpfl_flow_destroy,
+ .flush = cpfl_flow_flush,
+ .query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+ int ret;
+
+ if (ad->devargs.flow_parser[0] == '\0') {
+ PMD_INIT_LOG(WARNING, "flow module is not initialized");
+ return 0;
+ }
+
+ ret = cpfl_flow_engine_init(ad);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to init flow engines");
+ goto err;
+ }
+
+ ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to create flow parser");
+ goto err;
+ }
+
+ return ret;
+
+err:
+ cpfl_flow_engine_uninit(ad);
+ return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+ if (ad->devargs.flow_parser[0] == '\0')
+ return;
+
+ cpfl_parser_destroy(ad->flow_parser);
+ cpfl_flow_engine_uninit(ad);
+}
new file mode 100644
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+ CPFL_FLOW_ENGINE_NONE = 0,
+ CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ void *meta,
+ struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta);
+
+struct cpfl_flow_engine {
+ TAILQ_ENTRY(cpfl_flow_engine) node;
+ enum cpfl_flow_engine_type type;
+ engine_init_t init;
+ engine_uninit_t uninit;
+ engine_create_t create;
+ engine_destroy_t destroy;
+ engine_query_t query_count;
+ engine_free_t free;
+ engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ struct cpfl_flow_engine *engine;
+ void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error);
+#endif
@@ -41,6 +41,7 @@ endif
if dpdk_conf.has('RTE_HAS_JANSSON')
sources += files(
+ 'cpfl_flow.c',
'cpfl_flow_parser.c',
)
ext_deps += jansson_dep