@@ -72,6 +72,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
struct cpfl_adapter_ext *ad = itf->adapter;
struct cpfl_rule_info_meta *rim = meta;
struct cpfl_vport *vport;
+ struct cpfl_repr *repr;
if (!rim)
return ret;
@@ -82,6 +83,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
* Even index is tx queue and odd index is rx queue.
*/
cpq_id = vport->base.devarg_id * 2;
+ } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+ repr = (struct cpfl_repr *)itf;
+ cpq_id = ((repr->repr_id.pf_id + repr->repr_id.vf_id) &
+ (CPFL_TX_CFGQ_NUM - 1)) * 2;
} else {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"fail to find correct control queue");
@@ -121,6 +126,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
struct cpfl_rule_info_meta *rim;
uint32_t i;
struct cpfl_vport *vport;
+ struct cpfl_repr *repr;
rim = flow->rule;
if (!rim) {
@@ -134,6 +140,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
if (itf->type == CPFL_ITF_TYPE_VPORT) {
vport = (struct cpfl_vport *)itf;
cpq_id = vport->base.devarg_id * 2;
+ } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+ repr = (struct cpfl_repr *)itf;
+ cpq_id = ((repr->repr_id.pf_id + repr->repr_id.vf_id) &
+ (CPFL_TX_CFGQ_NUM - 1)) * 2;
} else {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"fail to find correct control queue");
@@ -413,6 +423,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
return false;
}
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+ const struct rte_flow_action actions[])
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ const struct rte_flow_action_ethdev *ethdev;
+ struct cpfl_itf *target_itf;
+ bool ret;
+
+ if (itf->type == CPFL_ITF_TYPE_VPORT) {
+ ret = cpfl_metadata_write_port_id(itf);
+ if (!ret) {
+ PMD_DRV_LOG(ERR, "fail to write port id");
+ return false;
+ }
+ }
+
+ ret = cpfl_metadata_write_sourcevsi(itf);
+ if (!ret) {
+ PMD_DRV_LOG(ERR, "fail to write source vsi id");
+ return false;
+ }
+
+ ret = cpfl_metadata_write_vsi(itf);
+ if (!ret) {
+ PMD_DRV_LOG(ERR, "fail to write vsi id");
+ return false;
+ }
+
+ if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+ return false;
+
+ for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+ ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+ target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+ if (!target_itf) {
+ PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+ return false;
+ }
+ ret = cpfl_metadata_write_targetvsi(target_itf);
+ if (!ret) {
+ PMD_DRV_LOG(ERR, "fail to write target vsi id");
+ return false;
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return true;
+}
+
static int
cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -429,6 +497,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
struct cpfl_rule_info_meta *rim;
int ret;
+ ret = cpfl_fxp_get_metadata_port(itf, actions);
+ if (!ret) {
+ PMD_DRV_LOG(ERR, "Fail to save metadata.");
+ return -EINVAL;
+ }
+
ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
if (ret) {
PMD_DRV_LOG(ERR, "No Match pattern support.");
@@ -4,6 +4,8 @@
#include "cpfl_representor.h"
#include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
static int
cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
return 0;
}
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+ const struct rte_flow_ops **ops)
+{
+ if (!dev)
+ return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+ *ops = &cpfl_flow_ops;
+#else
+ *ops = NULL;
+ PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start = cpfl_repr_dev_start,
.dev_stop = cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
.tx_queue_setup = cpfl_repr_tx_queue_setup,
.link_update = cpfl_repr_link_update,
+ .flow_ops_get = cpfl_dev_repr_flow_ops_get,
};
static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
struct cpfl_repr_param *param = init_param;
struct cpfl_adapter_ext *adapter = param->adapter;
+ int ret;
repr->repr_id = param->repr_id;
repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
repr->func_up = true;
+ TAILQ_INIT(&repr->itf.flow_list);
+ memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+ memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+ ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+ sizeof(union cpfl_rule_cfg_pkt_record),
+ CPFL_FLOW_BATCH_SIZE);
+ if (ret < 0)
+ return ret;
+
eth_dev->dev_ops = &cpfl_repr_dev_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;