@@ -1118,6 +1118,15 @@ for an additional list of options shared with other mlx5 drivers.
By default, the PMD will set this value to 1.
+- ``fdb_def_rule_en`` parameter [int]
+
+ A non-zero value enables the PMD to create a dedicated rule on E-Switch root
+ table, this dedicated rule forwards all incoming packets into table 1, other
+ rules will be created in E-Switch table original table level plus one, to
+ improve the flow insertion rate due to skip root table managed by firmware.
+ If set to 0, all rules will be created on the original E-Switch table level.
+
+ By default, the PMD will set this value to 1.
Supported NICs
--------------
@@ -1554,6 +1554,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
if (priv->sh->config.dv_flow_en == 2) {
/* Only HWS requires this information. */
flow_hw_init_tags_set(eth_dev);
+ if (priv->sh->config.dv_esw_en &&
+ flow_hw_create_vport_action(eth_dev)) {
+ DRV_LOG(ERR, "port %u failed to create vport action",
+ eth_dev->data->port_id);
+ err = EINVAL;
+ goto error;
+ }
return eth_dev;
}
/* Port representor shares the same max priority with pf port. */
@@ -1614,6 +1621,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
return eth_dev;
error:
if (priv) {
+ if (eth_dev &&
+ priv->sh &&
+ priv->sh->config.dv_flow_en == 2 &&
+ priv->sh->config.dv_esw_en)
+ flow_hw_destroy_vport_action(eth_dev);
if (priv->mreg_cp_tbl)
mlx5_hlist_destroy(priv->mreg_cp_tbl);
if (priv->sh)
@@ -172,6 +172,9 @@
/* Device parameter to configure the delay drop when creating Rxqs. */
#define MLX5_DELAY_DROP "delay_drop"
+/* Device parameter to create the fdb default rule in PMD */
+#define MLX5_FDB_DEFAULT_RULE_EN "fdb_def_rule_en"
+
/* Shared memory between primary and secondary processes. */
struct mlx5_shared_data *mlx5_shared_data;
@@ -1239,6 +1242,8 @@ mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
config->decap_en = !!tmp;
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
config->allow_duplicate_pattern = !!tmp;
+ } else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) {
+ config->fdb_def_rule = !!tmp;
}
return 0;
}
@@ -1274,6 +1279,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
MLX5_RECLAIM_MEM,
MLX5_DECAP_EN,
MLX5_ALLOW_DUPLICATE_PATTERN,
+ MLX5_FDB_DEFAULT_RULE_EN,
NULL,
};
int ret = 0;
@@ -1285,6 +1291,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
config->dv_flow_en = 1;
config->decap_en = 1;
config->allow_duplicate_pattern = 1;
+ config->fdb_def_rule = 1;
if (mkvlist != NULL) {
/* Process parameters. */
ret = mlx5_kvargs_process(mkvlist, params,
@@ -1360,6 +1367,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
config->allow_duplicate_pattern);
+ DRV_LOG(DEBUG, "\"fdb_def_rule_en\" is %u.", config->fdb_def_rule);
return 0;
}
@@ -1943,6 +1951,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
#endif
flow_hw_clear_port_info(dev);
@@ -2644,6 +2653,11 @@ mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
sh->ibdev_name);
goto error;
}
+ if (sh->config.fdb_def_rule ^ config->fdb_def_rule) {
+ DRV_LOG(ERR, "\"fdb_def_rule_en\" configuration mismatch for shared %s context.",
+ sh->ibdev_name);
+ goto error;
+ }
if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
DRV_LOG(ERR, "\"l3_vxlan_en\" "
"configuration mismatch for shared %s context.",
@@ -309,6 +309,7 @@ struct mlx5_sh_config {
uint32_t allow_duplicate_pattern:1;
uint32_t lro_allowed:1; /* Whether LRO is allowed. */
/* Allow/Prevent the duplicate rules pattern. */
+ uint32_t fdb_def_rule:1; /* Create FDB default jump rule */
};
@@ -337,6 +338,8 @@ enum {
MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
};
+#define MLX5_HW_MAX_ITEMS (16)
+
/* HW steering flow management job descriptor. */
struct mlx5_hw_q_job {
uint32_t type; /* Job type. */
@@ -344,6 +347,8 @@ struct mlx5_hw_q_job {
void *user_data; /* Job user data. */
uint8_t *encap_data; /* Encap data. */
struct mlx5_modification_cmd *mhdr_cmd;
+ struct rte_flow_item *items;
+ struct rte_flow_item_ethdev port_spec;
};
/* HW steering job descriptor LIFO pool. */
@@ -1452,6 +1457,12 @@ struct mlx5_obj_ops {
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
+struct mlx5_hw_ctrl_flow {
+ LIST_ENTRY(mlx5_hw_ctrl_flow) next;
+ struct rte_eth_dev *owner_dev;
+ struct rte_flow *flow;
+};
+
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
@@ -1492,6 +1503,12 @@ struct mlx5_priv {
unsigned int reta_idx_n; /* RETA index size. */
struct mlx5_drop drop_queue; /* Flow drop queues. */
void *root_drop_action; /* Pointer to root drop action. */
+ rte_spinlock_t hw_ctrl_lock;
+ LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
+ struct mlx5dr_action **hw_vport;
+ struct rte_flow_template_table *hw_esw_sq_miss_root_tbl;
+ struct rte_flow_template_table *hw_esw_sq_miss_tbl;
+ struct rte_flow_template_table *hw_esw_zero_tbl;
struct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];
/* RTE Flow rules. */
uint32_t ctrl_flows; /* Control flow rules. */
@@ -1553,10 +1570,9 @@ struct mlx5_priv {
/* HW steering rte flow table list header. */
LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
/* HW steering global drop action. */
- struct mlx5dr_action *hw_drop[MLX5_HW_ACTION_FLAG_MAX]
- [MLX5DR_TABLE_TYPE_MAX];
- /* HW steering global drop action. */
- struct mlx5dr_action *hw_tag[MLX5_HW_ACTION_FLAG_MAX];
+ struct mlx5dr_action *hw_drop[2];
+ /* HW steering global tag action. */
+ struct mlx5dr_action *hw_tag[2];
struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
#endif
};
@@ -999,6 +999,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.flex_item_create = mlx5_flow_flex_item_create,
.flex_item_release = mlx5_flow_flex_item_release,
.info_get = mlx5_flow_info_get,
+ .pick_transfer_proxy = mlx5_flow_pick_transfer_proxy,
.configure = mlx5_flow_port_configure,
.pattern_template_create = mlx5_flow_pattern_template_create,
.pattern_template_destroy = mlx5_flow_pattern_template_destroy,
@@ -1242,7 +1243,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (!attr->group && !attr->transfer)
+ if (!attr->group && !(attr->transfer && priv->fdb_def_rule))
return priv->sh->flow_max_priority - 2;
return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
}
@@ -1269,11 +1270,14 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
uint16_t priority = (uint16_t)attr->priority;
struct mlx5_priv *priv = dev->data->dev_private;
+ /* NIC root rules */
if (!attr->group && !attr->transfer) {
if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
priority = priv->sh->flow_max_priority - 1;
return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
- } else if (!external && attr->transfer && attr->group == 0 &&
+ /* FDB root rules */
+ } else if (attr->transfer && (!external || !priv->fdb_def_rule) &&
+ attr->group == 0 &&
attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {
return (priv->sh->flow_max_priority - 1) * 3;
}
@@ -2828,8 +2832,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
- * @param[in] attr
- * Flow rule attributes.
+ * @param root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -2841,7 +2845,7 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
- const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
@@ -2878,12 +2882,11 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
if (priv->sh->steering_format_version !=
MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
!udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
- /* FDB domain & NIC domain non-zero group */
- if ((attr->transfer || attr->group) && priv->sh->misc5_cap)
+ /* non-root table */
+ if (!root && priv->sh->misc5_cap)
valid_mask = &nic_mask;
/* Group zero in NIC domain */
- if (!attr->group && !attr->transfer &&
- priv->sh->tunnel_header_0_1)
+ if (!root && priv->sh->tunnel_header_0_1)
valid_mask = &nic_mask;
}
ret = mlx5_flow_item_acceptable
@@ -3122,11 +3125,11 @@ mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
if (mask->checksum_rsvd.checksum || mask->sequence.sequence) {
if (priv->sh->steering_format_version ==
MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
- ((attr->group || attr->transfer) &&
+ ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&
!priv->sh->misc5_cap) ||
(!(priv->sh->tunnel_header_0_1 &&
priv->sh->tunnel_header_2_3) &&
- !attr->group && !attr->transfer))
+ !attr->group && (!attr->transfer || !priv->fdb_def_rule)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -6183,7 +6186,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,
}
if (qrss) {
/* Check if it is in meter suffix table. */
- mtr_sfx = attr->group == (attr->transfer ?
+ mtr_sfx = attr->group ==
+ ((attr->transfer && priv->fdb_def_rule) ?
(MLX5_FLOW_TABLE_LEVEL_METER - 1) :
MLX5_FLOW_TABLE_LEVEL_METER);
/*
@@ -11106,3 +11110,43 @@ int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
return 0;
}
+
+int
+mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
+ uint16_t *proxy_port_id,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ uint16_t port_id;
+
+ if (!priv->sh->config.dv_esw_en)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "unable to provide a proxy port"
+ " without E-Switch configured");
+ if (!priv->master && !priv->representor)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "unable to provide a proxy port"
+ " for port which is not a master"
+ " or a representor port");
+ if (priv->master) {
+ *proxy_port_id = dev->data->port_id;
+ return 0;
+ }
+ MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
+ const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id];
+ const struct mlx5_priv *port_priv = port_dev->data->dev_private;
+
+ if (port_priv->master &&
+ port_priv->domain_id == priv->domain_id) {
+ *proxy_port_id = port_id;
+ return 0;
+ }
+ }
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "unable to find a proxy port");
+}
@@ -1152,6 +1152,11 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint32_t refcnt; /* Reference counter. */
+ /*
+ * If true, then rule pattern should be prepended with
+ * represented_port pattern item.
+ */
+ bool implicit_port;
};
/* Flow action template struct. */
@@ -1227,6 +1232,7 @@ struct mlx5_hw_action_template {
/* mlx5 flow group struct. */
struct mlx5_flow_group {
struct mlx5_list_entry entry;
+ struct rte_eth_dev *dev; /* Reference to corresponding device. */
struct mlx5dr_table *tbl; /* HWS table object. */
struct mlx5_hw_jump_action jump; /* Jump action. */
enum mlx5dr_table_type type; /* Table type. */
@@ -1483,6 +1489,9 @@ void flow_hw_clear_port_info(struct rte_eth_dev *dev);
void flow_hw_init_tags_set(struct rte_eth_dev *dev);
void flow_hw_clear_tags_set(struct rte_eth_dev *dev);
+int flow_hw_create_vport_action(struct rte_eth_dev *dev);
+void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
+
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
@@ -2055,7 +2064,7 @@ int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
uint16_t udp_dport,
const struct rte_flow_item *item,
uint64_t item_flags,
- const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error);
int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
uint64_t item_flags,
@@ -2312,4 +2321,15 @@ int flow_dv_translate_items_hws(const struct rte_flow_item *items,
uint32_t key_type, uint64_t *item_flags,
uint8_t *match_criteria,
struct rte_flow_error *error);
+
+int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
+ uint16_t *proxy_port_id,
+ struct rte_flow_error *error);
+
+int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
+
+int mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev);
+int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
+ uint32_t txq);
+int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
@@ -2460,8 +2460,8 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
* Previous validated item in the pattern items.
* @param[in] gtp_item
* Previous GTP item specification.
- * @param[in] attr
- * Pointer to flow attributes.
+ * @param root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -2472,7 +2472,7 @@ static int
flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
uint64_t last_item,
const struct rte_flow_item *gtp_item,
- const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error)
{
const struct rte_flow_item_gtp *gtp_spec;
@@ -2497,7 +2497,7 @@ flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
"GTP E flag must be 1 to match GTP PSC");
/* Check the flow is not created in group zero. */
- if (!attr->transfer && !attr->group)
+ if (root)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"GTP PSC is not supported for group 0");
@@ -3362,20 +3362,19 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
/**
* Indicates whether ASO aging is supported.
*
- * @param[in] sh
- * Pointer to shared device context structure.
- * @param[in] attr
- * Attributes of flow that includes AGE action.
+ * @param[in] priv
+ * Pointer to device private context structure.
+ * @param[in] root
+ * Whether action is on root table.
*
* @return
* True when ASO aging is supported, false otherwise.
*/
static inline bool
-flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
- const struct rte_flow_attr *attr)
+flow_hit_aso_supported(const struct mlx5_priv *priv, bool root)
{
- MLX5_ASSERT(sh && attr);
- return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
+ MLX5_ASSERT(priv);
+ return (priv->sh->flow_hit_aso_en && !root);
}
/**
@@ -3387,8 +3386,8 @@ flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
* Indicator if action is shared.
* @param[in] action_flags
* Holds the actions detected until now.
- * @param[in] attr
- * Attributes of flow that includes this action.
+ * @param[in] root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -3398,7 +3397,7 @@ flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
uint64_t action_flags,
- const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -3410,7 +3409,7 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"duplicate count actions set");
if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
- !flow_hit_aso_supported(priv->sh, attr))
+ !flow_hit_aso_supported(priv, root))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"old age and indirect count combination is not supported");
@@ -3641,8 +3640,8 @@ flow_dv_validate_action_raw_encap_decap
* Holds the actions detected until now.
* @param[in] item_flags
* The items found in this flow rule.
- * @param[in] attr
- * Pointer to flow attributes.
+ * @param root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -3653,12 +3652,12 @@ static int
flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
uint64_t action_flags,
uint64_t item_flags,
- const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error)
{
RTE_SET_USED(dev);
- if (attr->group == 0 && !attr->transfer)
+ if (root)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -4908,6 +4907,8 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
* Pointer to the modify action.
* @param[in] attr
* Pointer to the flow attributes.
+ * @param root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -4920,6 +4921,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
const uint64_t action_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
+ bool root,
struct rte_flow_error *error)
{
int ret = 0;
@@ -4967,7 +4969,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
}
if (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&
action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {
- if (!attr->transfer && !attr->group)
+ if (root)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modify field action is not"
@@ -5057,8 +5059,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
action_modify_field->src.field == RTE_FLOW_FIELD_IPV4_ECN ||
action_modify_field->dst.field == RTE_FLOW_FIELD_IPV6_ECN ||
action_modify_field->src.field == RTE_FLOW_FIELD_IPV6_ECN)
- if (!hca_attr->modify_outer_ip_ecn &&
- !attr->transfer && !attr->group)
+ if (!hca_attr->modify_outer_ip_ecn && root)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"modifications of the ECN for current firmware is not supported");
@@ -5092,11 +5093,12 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
bool external, struct rte_flow_error *error)
{
uint32_t target_group, table = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
struct flow_grp_info grp_info = {
.external = !!external,
.transfer = !!attributes->transfer,
- .fdb_def_rule = 1,
+ .fdb_def_rule = !!priv->fdb_def_rule,
.std_tbl_fix = 0
};
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
@@ -5676,6 +5678,8 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
* Pointer to the COUNT action in sample action list.
* @param[out] fdb_mirror_limit
* Pointer to the FDB mirror limitation flag.
+ * @param root
+ * Whether action is on root table.
* @param[out] error
* Pointer to error structure.
*
@@ -5692,6 +5696,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
const struct rte_flow_action_rss **sample_rss,
const struct rte_flow_action_count **count,
int *fdb_mirror_limit,
+ bool root,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -5793,7 +5798,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count
(dev, false, *action_flags | sub_action_flags,
- attr, error);
+ root, error);
if (ret < 0)
return ret;
*count = act->conf;
@@ -7273,7 +7278,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
items, item_flags,
- attr, error);
+ is_root, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
@@ -7367,7 +7372,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
break;
case RTE_FLOW_ITEM_TYPE_GTP_PSC:
ret = flow_dv_validate_item_gtp_psc(items, last_item,
- gtp_item, attr,
+ gtp_item, is_root,
error);
if (ret < 0)
return ret;
@@ -7584,7 +7589,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, shared_count,
action_flags,
- attr, error);
+ is_root, error);
if (ret < 0)
return ret;
count = actions->conf;
@@ -7878,7 +7883,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
- if (!attr->transfer && !attr->group)
+ if (is_root)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -7903,7 +7908,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* Validate the regular AGE action (using counter)
* mutual exclusion with indirect counter actions.
*/
- if (!flow_hit_aso_supported(priv->sh, attr)) {
+ if (!flow_hit_aso_supported(priv, is_root)) {
if (shared_count)
return rte_flow_error_set
(error, EINVAL,
@@ -7959,6 +7964,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
rss, &sample_rss,
&sample_count,
&fdb_mirror_limit,
+ is_root,
error);
if (ret < 0)
return ret;
@@ -7975,6 +7981,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
action_flags,
actions,
attr,
+ is_root,
error);
if (ret < 0)
return ret;
@@ -7988,8 +7995,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
ret = flow_dv_validate_action_aso_ct(dev, action_flags,
- item_flags, attr,
- error);
+ item_flags,
+ is_root, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -9189,15 +9196,18 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
if (MLX5_ITEM_VALID(item, key_type))
return;
MLX5_ITEM_UPDATE(item, key_type, vxlan_v, vxlan_m, &nic_mask);
- if (item->mask == &nic_mask &&
- ((!attr->group && !priv->sh->tunnel_header_0_1) ||
- (attr->group && !priv->sh->misc5_cap)))
+ if ((item->mask == &nic_mask) &&
+ ((!attr->group && !(attr->transfer && priv->fdb_def_rule) &&
+ !priv->sh->tunnel_header_0_1) ||
+ ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&
+ !priv->sh->misc5_cap)))
vxlan_m = &rte_flow_item_vxlan_mask;
if ((priv->sh->steering_format_version ==
MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
dport != MLX5_UDP_PORT_VXLAN) ||
- (!attr->group && !attr->transfer) ||
- ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
+ (!attr->group && !(attr->transfer && priv->fdb_def_rule)) ||
+ ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&
+ !priv->sh->misc5_cap)) {
misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
size = sizeof(vxlan_m->vni);
vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
@@ -14169,7 +14179,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
*/
if (action_flags & MLX5_FLOW_ACTION_AGE) {
if ((non_shared_age && count) ||
- !flow_hit_aso_supported(priv->sh, attr)) {
+ !flow_hit_aso_supported(priv, !dev_flow->dv.group)) {
/* Creates age by counters. */
cnt_act = flow_dv_prepare_counter
(dev, dev_flow,
@@ -18318,6 +18328,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
struct rte_flow_error *err)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ /* called from RTE API */
RTE_SET_USED(conf);
switch (action->type) {
@@ -18345,7 +18356,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev,
"Indirect age action not supported");
return flow_dv_validate_action_age(0, action, dev, err);
case RTE_FLOW_ACTION_TYPE_COUNT:
- return flow_dv_validate_action_count(dev, true, 0, NULL, err);
+ return flow_dv_validate_action_count(dev, true, 0, false, err);
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
if (!priv->sh->ct_aso_en)
return rte_flow_error_set(err, ENOTSUP,
@@ -18522,6 +18533,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
bool def_green = false;
bool def_yellow = false;
const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
+ /* Called from RTE API */
+ bool is_root = !(attr->group || (attr->transfer && priv->fdb_def_rule));
if (!dev_conf->dv_esw_en)
def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -18723,7 +18736,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_dv_validate_action_modify_field(dev,
- action_flags[i], act, attr, &flow_err);
+ action_flags[i], act, attr, is_root, &flow_err);
if (ret < 0)
return -rte_mtr_error_set(error,
ENOTSUP,
@@ -20,6 +20,14 @@
/* Default queue to flush the flows. */
#define MLX5_DEFAULT_FLUSH_QUEUE 0
+/* Maximum number of rules in control flow tables */
+#define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
+
+/* Flow group for SQ miss default flows/ */
+#define MLX5_HW_SQ_MISS_GROUP (UINT32_MAX)
+
+static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
/* DR action flags with different table. */
@@ -802,6 +810,77 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
return 0;
}
+static int
+flow_hw_represented_port_compile(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *action_start,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *action_mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_dst,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *v = action->conf;
+ const struct rte_flow_action_ethdev *m = action_mask->conf;
+ int ret;
+
+ if (!attr->group)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "represented_port action cannot"
+ " be used on group 0");
+ if (!attr->transfer)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "represented_port action requires"
+ " transfer attribute");
+ if (attr->ingress || attr->egress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "represented_port action cannot"
+ " be used with direction attributes");
+ if (!priv->master)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "represented_port acton must"
+ " be used on proxy port");
+ if (m && !!m->port_id) {
+ struct mlx5_priv *port_priv;
+
+ port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
+ if (port_priv == NULL)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "port does not exist or unable to"
+ " obtain E-Switch info for port");
+ MLX5_ASSERT(priv->hw_vport != NULL);
+ if (priv->hw_vport[v->port_id]) {
+ acts->rule_acts[action_dst].action =
+ priv->hw_vport[v->port_id];
+ } else {
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot use represented_port action"
+ " with this port");
+ }
+ } else {
+ ret = __flow_hw_act_data_general_append
+ (priv, acts, action->type,
+ action - action_start, action_dst);
+ if (ret)
+ return rte_flow_error_set
+ (error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to store"
+ " vport action");
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -879,7 +958,7 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_DROP:
acts->rule_acts[i++].action =
- priv->hw_drop[!!attr->group][type];
+ priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
acts->mark = true;
@@ -1012,6 +1091,13 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
if (err)
goto err;
break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ if (flow_hw_represented_port_compile
+ (dev, attr, action_start, actions,
+ masks, acts, i, error))
+ goto err;
+ i++;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -1334,11 +1420,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
struct mlx5dr_rule_action *rule_acts,
uint32_t *acts_num)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_template_table *table = job->flow->table;
struct mlx5_action_construct_data *act_data;
const struct rte_flow_action *action;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL;
+ const struct rte_flow_action_ethdev *port_action = NULL;
uint8_t *buf = job->encap_data;
struct rte_flow_attr attr = {
.ingress = 1,
@@ -1458,6 +1546,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
if (ret)
return -1;
break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ port_action = action->conf;
+ if (!priv->hw_vport[port_action->port_id])
+ return -1;
+ rule_acts[act_data->action_dst].action =
+ priv->hw_vport[port_action->port_id];
+ break;
default:
break;
}
@@ -1470,6 +1565,52 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
return 0;
}
+static const struct rte_flow_item *
+flow_hw_get_rule_items(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *table,
+ const struct rte_flow_item items[],
+ uint8_t pattern_template_index,
+ struct mlx5_hw_q_job *job)
+{
+ if (table->its[pattern_template_index]->implicit_port) {
+ const struct rte_flow_item *curr_item;
+ unsigned int nb_items;
+ bool found_end;
+ unsigned int i;
+
+ /* Count number of pattern items. */
+ nb_items = 0;
+ found_end = false;
+ for (curr_item = items; !found_end; ++curr_item) {
+ ++nb_items;
+ if (curr_item->type == RTE_FLOW_ITEM_TYPE_END)
+ found_end = true;
+ }
+ /* Prepend represented port item. */
+ job->port_spec = (struct rte_flow_item_ethdev){
+ .port_id = dev->data->port_id,
+ };
+ job->items[0] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &job->port_spec,
+ };
+ found_end = false;
+ for (i = 1; i < MLX5_HW_MAX_ITEMS && i - 1 < nb_items; ++i) {
+ job->items[i] = items[i - 1];
+ if (items[i - 1].type == RTE_FLOW_ITEM_TYPE_END) {
+ found_end = true;
+ break;
+ }
+ }
+ if (i >= MLX5_HW_MAX_ITEMS && !found_end) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ return job->items;
+ }
+ return items;
+}
+
/**
* Enqueue HW steering flow creation.
*
@@ -1521,6 +1662,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
struct mlx5_hw_actions *hw_acts;
struct rte_flow_hw *flow;
struct mlx5_hw_q_job *job;
+ const struct rte_flow_item *rule_items;
uint32_t acts_num, flow_idx;
int ret;
@@ -1547,15 +1689,23 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
job->user_data = user_data;
rule_attr.user_data = job;
hw_acts = &table->ats[action_template_index].acts;
- /* Construct the flow action array based on the input actions.*/
- flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,
- actions, rule_acts, &acts_num);
+ /* Construct the flow actions based on the input actions.*/
+ if (flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,
+ actions, rule_acts, &acts_num)) {
+ rte_errno = EINVAL;
+ goto free;
+ }
+ rule_items = flow_hw_get_rule_items(dev, table, items,
+ pattern_template_index, job);
+ if (!rule_items)
+ goto free;
ret = mlx5dr_rule_create(table->matcher,
pattern_template_index, items,
action_template_index, rule_acts,
&rule_attr, &flow->rule);
if (likely(!ret))
return (struct rte_flow *)flow;
+free:
/* Flow created fail, return the descriptor and flow memory. */
mlx5_ipool_free(table->flow, flow_idx);
priv->hw_q[queue].job_idx++;
@@ -1736,7 +1886,9 @@ __flow_hw_pull_comp(struct rte_eth_dev *dev,
struct rte_flow_op_result comp[BURST_THR];
int ret, i, empty_loop = 0;
- flow_hw_push(dev, queue, error);
+ ret = flow_hw_push(dev, queue, error);
+ if (ret < 0)
+ return ret;
while (pending_rules) {
ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
if (ret < 0)
@@ -2021,8 +2173,12 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
int i;
+ uint32_t fidx = 1;
- if (table->refcnt) {
+ /* Build ipool allocated object bitmap. */
+ mlx5_ipool_flush_cache(table->flow);
+ /* Check if ipool has allocated objects. */
+ if (table->refcnt || mlx5_ipool_get_next(table->flow, &fidx)) {
DRV_LOG(WARNING, "Table %p is still in using.", (void *)table);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -2101,7 +2257,51 @@ flow_hw_validate_action_modify_field(const struct rte_flow_action *action,
}
static int
-flow_hw_action_validate(const struct rte_flow_action actions[],
+flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_ethdev *action_conf = action->conf;
+ const struct rte_flow_action_ethdev *mask_conf = mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->sh->config.dv_esw_en)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot use represented_port actions"
+ " without an E-Switch");
+ if (mask_conf->port_id) {
+ struct mlx5_priv *port_priv;
+ struct mlx5_priv *dev_priv;
+
+ port_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);
+ if (!port_priv)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "failed to obtain E-Switch"
+ " info for port");
+ dev_priv = mlx5_dev_to_eswitch_info(dev);
+ if (!dev_priv)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "failed to obtain E-Switch"
+ " info for transfer proxy");
+ if (port_priv->domain_id != dev_priv->domain_id)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot forward to port from"
+ " a different E-Switch");
+ }
+ return 0;
+}
+
+static int
+flow_hw_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
const struct rte_flow_action masks[],
struct rte_flow_error *error)
{
@@ -2164,6 +2364,12 @@ flow_hw_action_validate(const struct rte_flow_action actions[],
if (ret < 0)
return ret;
break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = flow_hw_validate_action_represented_port
+ (dev, action, mask, error);
+ if (ret < 0)
+ return ret;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -2205,7 +2411,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
int len, act_len, mask_len, i;
struct rte_flow_actions_template *at;
- if (flow_hw_action_validate(actions, masks, error))
+ if (flow_hw_action_validate(dev, actions, masks, error))
return NULL;
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,
NULL, 0, actions, error);
@@ -2288,6 +2494,46 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+static struct rte_flow_item *
+flow_hw_copy_prepend_port_item(const struct rte_flow_item *items,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *curr_item;
+ struct rte_flow_item *copied_items;
+ bool found_end;
+ unsigned int nb_items;
+ unsigned int i;
+ size_t size;
+
+ /* Count number of pattern items. */
+ nb_items = 0;
+ found_end = false;
+ for (curr_item = items; !found_end; ++curr_item) {
+ ++nb_items;
+ if (curr_item->type == RTE_FLOW_ITEM_TYPE_END)
+ found_end = true;
+ }
+ /* Allocate new array of items and prepend REPRESENTED_PORT item. */
+ size = sizeof(*copied_items) * (nb_items + 1);
+ copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
+ if (!copied_items) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate item template");
+ return NULL;
+ }
+ copied_items[0] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = NULL,
+ .last = NULL,
+ .mask = &rte_flow_item_ethdev_mask,
+ };
+ for (i = 1; i < nb_items + 1; ++i)
+ copied_items[i] = items[i - 1];
+ return copied_items;
+}
+
/**
* Create flow item template.
*
@@ -2311,9 +2557,35 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_pattern_template *it;
+ struct rte_flow_item *copied_items = NULL;
+ const struct rte_flow_item *tmpl_items;
+ if (priv->sh->config.dv_esw_en && attr->ingress) {
+ /*
+ * Disallow pattern template with ingress and egress/transfer
+ * attributes in order to forbid implicit port matching
+ * on egress and transfer traffic.
+ */
+ if (attr->egress || attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "item template for ingress traffic"
+ " cannot be used for egress/transfer"
+ " traffic when E-Switch is enabled");
+ return NULL;
+ }
+ copied_items = flow_hw_copy_prepend_port_item(items, error);
+ if (!copied_items)
+ return NULL;
+ tmpl_items = copied_items;
+ } else {
+ tmpl_items = items;
+ }
it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
if (!it) {
+ if (copied_items)
+ mlx5_free(copied_items);
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -2321,8 +2593,10 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
return NULL;
}
it->attr = *attr;
- it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
+ it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
if (!it->mt) {
+ if (copied_items)
+ mlx5_free(copied_items);
mlx5_free(it);
rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -2330,9 +2604,12 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
"cannot create match template");
return NULL;
}
- it->item_flags = flow_hw_rss_item_flags_get(items);
+ it->item_flags = flow_hw_rss_item_flags_get(tmpl_items);
+ it->implicit_port = !!copied_items;
__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
+ if (copied_items)
+ mlx5_free(copied_items);
return it;
}
@@ -2458,6 +2735,7 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
goto error;
grp_data->jump.root_action = jump;
}
+ grp_data->dev = dev;
grp_data->idx = idx;
grp_data->group_id = attr->group;
grp_data->type = dr_tbl_attr.type;
@@ -2526,7 +2804,8 @@ flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
struct rte_flow_attr *attr =
(struct rte_flow_attr *)ctx->data;
- return (grp_data->group_id != attr->group) ||
+ return (grp_data->dev != ctx->dev) ||
+ (grp_data->group_id != attr->group) ||
((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
attr->transfer) ||
((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
@@ -2589,6 +2868,545 @@ flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
}
+/**
+ * Create and cache a vport action for given @p dev port. vport actions
+ * cache is used in HWS with FDB flows.
+ *
+ * This function does not create any function if proxy port for @p dev port
+ * was not configured for HW Steering.
+ *
+ * This function assumes that E-Switch is enabled and PMD is running with
+ * HW Steering configured.
+ *
+ * @param dev
+ * Pointer to Ethernet device which will be the action destination.
+ *
+ * @return
+ * 0 on success, positive value otherwise.
+ */
+int
+flow_hw_create_vport_action(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ uint16_t port_id = dev->data->port_id;
+ uint16_t proxy_port_id = port_id;
+ int ret;
+
+ ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
+ if (ret)
+ return ret;
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->hw_vport)
+ return 0;
+ if (proxy_priv->hw_vport[port_id]) {
+ DRV_LOG(ERR, "port %u HWS vport action already created",
+ port_id);
+ return -EINVAL;
+ }
+ proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
+ (proxy_priv->dr_ctx, priv->dev_port,
+ MLX5DR_ACTION_FLAG_HWS_FDB);
+ if (!proxy_priv->hw_vport[port_id]) {
+ DRV_LOG(ERR, "port %u unable to create HWS vport action",
+ port_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Destroys the vport action associated with @p dev device
+ * from actions' cache.
+ *
+ * This function does not destroy any action if there is no action cached
+ * for @p dev or proxy port was not configured for HW Steering.
+ *
+ * This function assumes that E-Switch is enabled and PMD is running with
+ * HW Steering configured.
+ *
+ * @param dev
+ * Pointer to Ethernet device which will be the action destination.
+ */
+void
+flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ uint16_t port_id = dev->data->port_id;
+ uint16_t proxy_port_id = port_id;
+
+ if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
+ return;
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
+ return;
+ mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
+ proxy_priv->hw_vport[port_id] = NULL;
+}
+
+static int
+flow_hw_create_vport_actions(struct mlx5_priv *priv)
+{
+ uint16_t port_id;
+
+ MLX5_ASSERT(!priv->hw_vport);
+ priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
+ 0, SOCKET_ID_ANY);
+ if (!priv->hw_vport)
+ return -ENOMEM;
+ DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
+ DRV_LOG(DEBUG, "port %u :: domain_id=%u", priv->dev_data->port_id, priv->domain_id);
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
+ struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
+
+ if (!port_priv ||
+ port_priv->domain_id != priv->domain_id)
+ continue;
+ DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
+ priv->dev_data->port_id, port_id, port_priv->dev_port);
+ priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
+ (priv->dr_ctx, port_priv->dev_port,
+ MLX5DR_ACTION_FLAG_HWS_FDB);
+ DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
+ priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
+ if (!priv->hw_vport[port_id])
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+flow_hw_free_vport_actions(struct mlx5_priv *priv)
+{
+ uint16_t port_id;
+
+ if (!priv->hw_vport)
+ return;
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
+ if (priv->hw_vport[port_id])
+ mlx5dr_action_destroy(priv->hw_vport[port_id]);
+ mlx5_free(priv->hw_vport);
+ priv->hw_vport = NULL;
+}
+
+/**
+ * Creates a flow pattern template used to match on E-Switch Manager.
+ * This template is used to set up a table for SQ miss default flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow pattern template on success, NULL otherwise.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev)
+{
+ struct rte_flow_pattern_template_attr attr = {
+ .relaxed_matching = 0,
+ .transfer = 1,
+ };
+ struct rte_flow_item_ethdev port_spec = {
+ .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
+ };
+ struct rte_flow_item_ethdev port_mask = {
+ .port_id = UINT16_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ .mask = &port_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+
+ return flow_hw_pattern_template_create(dev, &attr, items, NULL);
+}
+
+/**
+ * Creates a flow pattern template used to match on a TX queue.
+ * This template is used to set up a table for SQ miss default flow.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow pattern template on success, NULL otherwise.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_create_ctrl_sq_pattern_template(struct rte_eth_dev *dev)
+{
+ struct rte_flow_pattern_template_attr attr = {
+ .relaxed_matching = 0,
+ .transfer = 1,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ .queue = UINT32_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .mask = &queue_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+
+ return flow_hw_pattern_template_create(dev, &attr, items, NULL);
+}
+
+/**
+ * Creates a flow pattern template with unmasked represented port matching.
+ * This template is used to set up a table for default transfer flows
+ * directing packets to group 1.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow pattern template on success, NULL otherwise.
+ */
+static struct rte_flow_pattern_template *
+flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev)
+{
+ struct rte_flow_pattern_template_attr attr = {
+ .relaxed_matching = 0,
+ .transfer = 1,
+ };
+ struct rte_flow_item_ethdev port_mask = {
+ .port_id = UINT16_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .mask = &port_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+
+ return flow_hw_pattern_template_create(dev, &attr, items, NULL);
+}
+
+/**
+ * Creates a flow actions template with an unmasked JUMP action. Flows
+ * based on this template will perform a jump to some group. This template
+ * is used to set up tables for control flows.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param group
+ * Destination group for this action template.
+ *
+ * @return
+ * Pointer to flow actions template on success, NULL otherwise.
+ */
+static struct rte_flow_actions_template *
+flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
+ uint32_t group)
+{
+ struct rte_flow_actions_template_attr attr = {
+ .transfer = 1,
+ };
+ struct rte_flow_action_jump jump_v = {
+ .group = group,
+ };
+ struct rte_flow_action_jump jump_m = {
+ .group = UINT32_MAX,
+ };
+ struct rte_flow_action actions_v[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump_v,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+ struct rte_flow_action actions_m[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump_m,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,
+ NULL);
+}
+
+/**
+ * Creates a flow action template with a unmasked REPRESENTED_PORT action.
+ * It is used to create control flow tables.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Pointer to flow action template on success, NULL otherwise.
+ */
+static struct rte_flow_actions_template *
+flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev)
+{
+ struct rte_flow_actions_template_attr attr = {
+ .transfer = 1,
+ };
+ struct rte_flow_action_ethdev port_v = {
+ .port_id = 0,
+ };
+ struct rte_flow_action actions_v[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+ .conf = &port_v,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+ struct rte_flow_action_ethdev port_m = {
+ .port_id = 0,
+ };
+ struct rte_flow_action actions_m[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+ .conf = &port_m,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+
+ return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,
+ NULL);
+}
+
+/**
+ * Creates a control flow table used to transfer traffic from E-Switch Manager
+ * and TX queues from group 0 to group 1.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param it
+ * Pointer to flow pattern template.
+ * @param at
+ * Pointer to flow actions template.
+ *
+ * @return
+ * Pointer to flow table on success, NULL otherwise.
+ */
+static struct rte_flow_template_table*
+flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *it,
+ struct rte_flow_actions_template *at)
+{
+ struct rte_flow_template_table_attr attr = {
+ .flow_attr = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 0,
+ .egress = 0,
+ .transfer = 1,
+ },
+ .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
+ };
+
+ return flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);
+}
+
+
+/**
+ * Creates a control flow table used to transfer traffic from E-Switch Manager
+ * and TX queues from group 0 to group 1.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param it
+ * Pointer to flow pattern template.
+ * @param at
+ * Pointer to flow actions template.
+ *
+ * @return
+ * Pointer to flow table on success, NULL otherwise.
+ */
+static struct rte_flow_template_table*
+flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *it,
+ struct rte_flow_actions_template *at)
+{
+ struct rte_flow_template_table_attr attr = {
+ .flow_attr = {
+ .group = MLX5_HW_SQ_MISS_GROUP,
+ .priority = 0,
+ .ingress = 0,
+ .egress = 0,
+ .transfer = 1,
+ },
+ .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
+ };
+
+ return flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);
+}
+
+/**
+ * Creates a control flow table used to transfer traffic
+ * from group 0 to group 1.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param it
+ * Pointer to flow pattern template.
+ * @param at
+ * Pointer to flow actions template.
+ *
+ * @return
+ * Pointer to flow table on success, NULL otherwise.
+ */
+static struct rte_flow_template_table *
+flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
+ struct rte_flow_pattern_template *it,
+ struct rte_flow_actions_template *at)
+{
+ struct rte_flow_template_table_attr attr = {
+ .flow_attr = {
+ .group = 0,
+ .priority = 15, /* TODO: Flow priority discovery. */
+ .ingress = 0,
+ .egress = 0,
+ .transfer = 1,
+ },
+ .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
+ };
+
+ return flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);
+}
+
+/**
+ * Creates a set of flow tables used to create control flows used
+ * when E-Switch is engaged.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, EINVAL otherwise
+ */
+static __rte_unused int
+flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;
+ struct rte_flow_pattern_template *sq_items_tmpl = NULL;
+ struct rte_flow_pattern_template *port_items_tmpl = NULL;
+ struct rte_flow_actions_template *jump_sq_actions_tmpl = NULL;
+ struct rte_flow_actions_template *port_actions_tmpl = NULL;
+ struct rte_flow_actions_template *jump_one_actions_tmpl = NULL;
+
+ /* Item templates */
+ esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev);
+ if (!esw_mgr_items_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
+ " template for control flows", dev->data->port_id);
+ goto error;
+ }
+ sq_items_tmpl = flow_hw_create_ctrl_sq_pattern_template(dev);
+ if (!sq_items_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create SQ item template for"
+ " control flows", dev->data->port_id);
+ goto error;
+ }
+ port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev);
+ if (!port_items_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create SQ item template for"
+ " control flows", dev->data->port_id);
+ goto error;
+ }
+ /* Action templates */
+ jump_sq_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev,
+ MLX5_HW_SQ_MISS_GROUP);
+ if (!jump_sq_actions_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create jump action template"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev);
+ if (!port_actions_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create port action template"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev, 1);
+ if (!jump_one_actions_tmpl) {
+ DRV_LOG(ERR, "port %u failed to create jump action template"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ /* Tables */
+ MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);
+ priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
+ (dev, esw_mgr_items_tmpl, jump_sq_actions_tmpl);
+ if (!priv->hw_esw_sq_miss_root_tbl) {
+ DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);
+ priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, sq_items_tmpl,
+ port_actions_tmpl);
+ if (!priv->hw_esw_sq_miss_tbl) {
+ DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);
+ priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,
+ jump_one_actions_tmpl);
+ if (!priv->hw_esw_zero_tbl) {
+ DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
+ " for control flows", dev->data->port_id);
+ goto error;
+ }
+ return 0;
+error:
+ if (priv->hw_esw_zero_tbl) {
+ flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);
+ priv->hw_esw_zero_tbl = NULL;
+ }
+ if (priv->hw_esw_sq_miss_tbl) {
+ flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);
+ priv->hw_esw_sq_miss_tbl = NULL;
+ }
+ if (priv->hw_esw_sq_miss_root_tbl) {
+ flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);
+ priv->hw_esw_sq_miss_root_tbl = NULL;
+ }
+ if (jump_one_actions_tmpl)
+ flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);
+ if (port_actions_tmpl)
+ flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);
+ if (jump_sq_actions_tmpl)
+ flow_hw_actions_template_destroy(dev, jump_sq_actions_tmpl, NULL);
+ if (port_items_tmpl)
+ flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);
+ if (sq_items_tmpl)
+ flow_hw_pattern_template_destroy(dev, sq_items_tmpl, NULL);
+ if (esw_mgr_items_tmpl)
+ flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);
+ return -EINVAL;
+}
+
/**
* Configure port HWS resources.
*
@@ -2606,7 +3424,6 @@ flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-
static int
flow_hw_configure(struct rte_eth_dev *dev,
const struct rte_flow_port_attr *port_attr,
@@ -2629,6 +3446,14 @@ flow_hw_configure(struct rte_eth_dev *dev,
.free = mlx5_free,
.type = "mlx5_hw_action_construct_data",
};
+ /* Adds one queue to be used by PMD.
+ * The last queue will be used by the PMD.
+ */
+ uint16_t nb_q_updated;
+ struct rte_flow_queue_attr **_queue_attr = NULL;
+ struct rte_flow_queue_attr ctrl_queue_attr = {0};
+ bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
+ int ret;
if (!port_attr || !nb_queue || !queue_attr) {
rte_errno = EINVAL;
@@ -2637,7 +3462,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
/* In case re-configuring, release existing context at first. */
if (priv->dr_ctx) {
/* */
- for (i = 0; i < nb_queue; i++) {
+ for (i = 0; i < priv->nb_queue; i++) {
hw_q = &priv->hw_q[i];
/* Make sure all queues are empty. */
if (hw_q->size != hw_q->job_idx) {
@@ -2647,26 +3472,42 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
flow_hw_resource_release(dev);
}
+ ctrl_queue_attr.size = queue_attr[0]->size;
+ nb_q_updated = nb_queue + 1;
+ _queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
+ nb_q_updated *
+ sizeof(struct rte_flow_queue_attr *),
+ 64, SOCKET_ID_ANY);
+ if (!_queue_attr) {
+ rte_errno = ENOMEM;
+ goto err;
+ }
+
+ memcpy(_queue_attr, queue_attr,
+ sizeof(void *) * nb_queue);
+ _queue_attr[nb_queue] = &ctrl_queue_attr;
priv->acts_ipool = mlx5_ipool_create(&cfg);
if (!priv->acts_ipool)
goto err;
/* Allocate the queue job descriptor LIFO. */
- mem_size = sizeof(priv->hw_q[0]) * nb_queue;
- for (i = 0; i < nb_queue; i++) {
+ mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
+ for (i = 0; i < nb_q_updated; i++) {
/*
* Check if the queues' size are all the same as the
* limitation from HWS layer.
*/
- if (queue_attr[i]->size != queue_attr[0]->size) {
+ if (_queue_attr[i]->size != _queue_attr[0]->size) {
rte_errno = EINVAL;
goto err;
}
mem_size += (sizeof(struct mlx5_hw_q_job *) +
+ sizeof(struct mlx5_hw_q_job) +
sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
sizeof(struct mlx5_modification_cmd) *
MLX5_MHDR_MAX_CMD +
- sizeof(struct mlx5_hw_q_job)) *
- queue_attr[0]->size;
+ sizeof(struct rte_flow_item) *
+ MLX5_HW_MAX_ITEMS) *
+ _queue_attr[i]->size;
}
priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
64, SOCKET_ID_ANY);
@@ -2674,58 +3515,82 @@ flow_hw_configure(struct rte_eth_dev *dev,
rte_errno = ENOMEM;
goto err;
}
- for (i = 0; i < nb_queue; i++) {
+ for (i = 0; i < nb_q_updated; i++) {
uint8_t *encap = NULL;
struct mlx5_modification_cmd *mhdr_cmd = NULL;
+ struct rte_flow_item *items = NULL;
- priv->hw_q[i].job_idx = queue_attr[i]->size;
- priv->hw_q[i].size = queue_attr[i]->size;
+ priv->hw_q[i].job_idx = _queue_attr[i]->size;
+ priv->hw_q[i].size = _queue_attr[i]->size;
if (i == 0)
priv->hw_q[i].job = (struct mlx5_hw_q_job **)
- &priv->hw_q[nb_queue];
+ &priv->hw_q[nb_q_updated];
else
priv->hw_q[i].job = (struct mlx5_hw_q_job **)
- &job[queue_attr[i - 1]->size];
+ &job[_queue_attr[i - 1]->size - 1].items
+ [MLX5_HW_MAX_ITEMS];
job = (struct mlx5_hw_q_job *)
- &priv->hw_q[i].job[queue_attr[i]->size];
- mhdr_cmd = (struct mlx5_modification_cmd *)&job[queue_attr[i]->size];
- encap = (uint8_t *)&mhdr_cmd[queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
- for (j = 0; j < queue_attr[i]->size; j++) {
+ &priv->hw_q[i].job[_queue_attr[i]->size];
+ mhdr_cmd = (struct mlx5_modification_cmd *)
+ &job[_queue_attr[i]->size];
+ encap = (uint8_t *)
+ &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];
+ items = (struct rte_flow_item *)
+ &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];
+ for (j = 0; j < _queue_attr[i]->size; j++) {
job[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];
job[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];
+ job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
priv->hw_q[i].job[j] = &job[j];
}
}
dr_ctx_attr.pd = priv->sh->cdev->pd;
- dr_ctx_attr.queues = nb_queue;
+ dr_ctx_attr.queues = nb_q_updated;
/* Queue size should all be the same. Take the first one. */
- dr_ctx_attr.queue_size = queue_attr[0]->size;
+ dr_ctx_attr.queue_size = _queue_attr[0]->size;
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
if (!dr_ctx)
goto err;
priv->dr_ctx = dr_ctx;
- priv->nb_queue = nb_queue;
+ priv->nb_queue = nb_q_updated;
+ rte_spinlock_init(&priv->hw_ctrl_lock);
+ LIST_INIT(&priv->hw_ctrl_flows);
/* Add global actions. */
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
- for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
- priv->hw_drop[i][j] = mlx5dr_action_create_dest_drop
- (priv->dr_ctx, mlx5_hw_act_flag[i][j]);
- if (!priv->hw_drop[i][j])
- goto err;
- }
+ uint32_t act_flags = 0;
+
+ act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
+ if (is_proxy)
+ act_flags |= mlx5_hw_act_flag[i][2];
+ priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
+ if (!priv->hw_drop[i])
+ goto err;
priv->hw_tag[i] = mlx5dr_action_create_tag
(priv->dr_ctx, mlx5_hw_act_flag[i][0]);
if (!priv->hw_tag[i])
goto err;
}
+ if (is_proxy) {
+ ret = flow_hw_create_vport_actions(priv);
+ if (ret) {
+ rte_errno = -ret;
+ goto err;
+ }
+ ret = flow_hw_create_ctrl_tables(dev);
+ if (ret) {
+ rte_errno = -ret;
+ goto err;
+ }
+ }
+ if (_queue_attr)
+ mlx5_free(_queue_attr);
return 0;
err:
+ flow_hw_free_vport_actions(priv);
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
- for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
- if (priv->hw_drop[i][j])
- mlx5dr_action_destroy(priv->hw_drop[i][j]);
- }
+ if (priv->hw_drop[i])
+ mlx5dr_action_destroy(priv->hw_drop[i]);
if (priv->hw_tag[i])
mlx5dr_action_destroy(priv->hw_tag[i]);
}
@@ -2737,6 +3602,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
mlx5_ipool_destroy(priv->acts_ipool);
priv->acts_ipool = NULL;
}
+ if (_queue_attr)
+ mlx5_free(_queue_attr);
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"fail to configure port");
@@ -2755,10 +3622,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
struct rte_flow_template_table *tbl;
struct rte_flow_pattern_template *it;
struct rte_flow_actions_template *at;
- int i, j;
+ int i;
if (!priv->dr_ctx)
return;
+ flow_hw_flush_all_ctrl_flows(dev);
while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
tbl = LIST_FIRST(&priv->flow_hw_tbl);
flow_hw_table_destroy(dev, tbl, NULL);
@@ -2772,13 +3640,12 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
flow_hw_actions_template_destroy(dev, at, NULL);
}
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
- for (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {
- if (priv->hw_drop[i][j])
- mlx5dr_action_destroy(priv->hw_drop[i][j]);
- }
+ if (priv->hw_drop[i])
+ mlx5dr_action_destroy(priv->hw_drop[i]);
if (priv->hw_tag[i])
mlx5dr_action_destroy(priv->hw_tag[i]);
}
+ flow_hw_free_vport_actions(priv);
if (priv->acts_ipool) {
mlx5_ipool_destroy(priv->acts_ipool);
priv->acts_ipool = NULL;
@@ -3021,4 +3888,397 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query = flow_dv_action_query,
};
+static uint32_t
+flow_hw_get_ctrl_queue(struct mlx5_priv *priv)
+{
+ MLX5_ASSERT(priv->nb_queue > 0);
+ return priv->nb_queue - 1;
+}
+
+/**
+ * Creates a control flow using flow template API on @p proxy_dev device,
+ * on behalf of @p owner_dev device.
+ *
+ * This function uses locks internally to synchronize access to the
+ * flow queue.
+ *
+ * Created flow is stored in private list associated with @p proxy_dev device.
+ *
+ * @param owner_dev
+ * Pointer to Ethernet device on behalf of which flow is created.
+ * @param proxy_dev
+ * Pointer to Ethernet device on which flow is created.
+ * @param table
+ * Pointer to flow table.
+ * @param items
+ * Pointer to flow rule items.
+ * @param item_template_idx
+ * Index of an item template associated with @p table.
+ * @param actions
+ * Pointer to flow rule actions.
+ * @param action_template_idx
+ * Index of an action template associated with @p table.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno set.
+ */
+static __rte_unused int
+flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
+ struct rte_eth_dev *proxy_dev,
+ struct rte_flow_template_table *table,
+ struct rte_flow_item items[],
+ uint8_t item_template_idx,
+ struct rte_flow_action actions[],
+ uint8_t action_template_idx)
+{
+ struct mlx5_priv *priv = proxy_dev->data->dev_private;
+ uint32_t queue = flow_hw_get_ctrl_queue(priv);
+ struct rte_flow_op_attr op_attr = {
+ .postpone = 0,
+ };
+ struct rte_flow *flow = NULL;
+ struct mlx5_hw_ctrl_flow *entry = NULL;
+ int ret;
+
+ rte_spinlock_lock(&priv->hw_ctrl_lock);
+ entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
+ 0, SOCKET_ID_ANY);
+ if (!entry) {
+ DRV_LOG(ERR, "port %u not enough memory to create control flows",
+ proxy_dev->data->port_id);
+ rte_errno = ENOMEM;
+ ret = -rte_errno;
+ goto error;
+ }
+ flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
+ items, item_template_idx,
+ actions, action_template_idx,
+ NULL, NULL);
+ if (!flow) {
+ DRV_LOG(ERR, "port %u failed to enqueue create control"
+ " flow operation", proxy_dev->data->port_id);
+ ret = -rte_errno;
+ goto error;
+ }
+ ret = flow_hw_push(proxy_dev, queue, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to drain control flow queue",
+ proxy_dev->data->port_id);
+ goto error;
+ }
+ ret = __flow_hw_pull_comp(proxy_dev, queue, 1, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to insert control flow",
+ proxy_dev->data->port_id);
+ rte_errno = EINVAL;
+ ret = -rte_errno;
+ goto error;
+ }
+ entry->owner_dev = owner_dev;
+ entry->flow = flow;
+ LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ return 0;
+error:
+ if (entry)
+ mlx5_free(entry);
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ return ret;
+}
+
+/**
+ * Destroys a control flow @p flow using flow template API on @p dev device.
+ *
+ * This function uses locks internally to synchronize access to the
+ * flow queue.
+ *
+ * If the @p flow is stored on any private list/pool, then caller must free up
+ * the relevant resources.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to flow rule.
+ *
+ * @return
+ * 0 on success, non-zero value otherwise.
+ */
+static int
+flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t queue = flow_hw_get_ctrl_queue(priv);
+ struct rte_flow_op_attr op_attr = {
+ .postpone = 0,
+ };
+ int ret;
+
+ rte_spinlock_lock(&priv->hw_ctrl_lock);
+ ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to enqueue destroy control"
+ " flow operation", dev->data->port_id);
+ goto exit;
+ }
+ ret = flow_hw_push(dev, queue, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to drain control flow queue",
+ dev->data->port_id);
+ goto exit;
+ }
+ ret = __flow_hw_pull_comp(dev, queue, 1, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to destroy control flow",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ ret = -rte_errno;
+ goto exit;
+ }
+exit:
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ return ret;
+}
+
+/**
+ * Destroys control flows created on behalf of @p owner_dev device.
+ *
+ * @param owner_dev
+ * Pointer to Ethernet device owning control flows.
+ *
+ * @return
+ * 0 on success, otherwise negative error code is returned and
+ * rte_errno is set.
+ */
+int
+mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
+{
+ struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ struct mlx5_hw_ctrl_flow *cf;
+ struct mlx5_hw_ctrl_flow *cf_next;
+ uint16_t owner_port_id = owner_dev->data->port_id;
+ uint16_t proxy_port_id = owner_dev->data->port_id;
+ int ret;
+
+ if (owner_priv->sh->config.dv_esw_en) {
+ if (rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL)) {
+ DRV_LOG(ERR, "Unable to find proxy port for port %u",
+ owner_port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ } else {
+ proxy_dev = owner_dev;
+ proxy_priv = owner_priv;
+ }
+ cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
+ while (cf != NULL) {
+ cf_next = LIST_NEXT(cf, next);
+ if (cf->owner_dev == owner_dev) {
+ ret = flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow);
+ if (ret) {
+ rte_errno = ret;
+ return -ret;
+ }
+ LIST_REMOVE(cf, next);
+ mlx5_free(cf);
+ }
+ cf = cf_next;
+ }
+ return 0;
+}
+
+/**
+ * Destroys all control flows created on @p dev device.
+ *
+ * @param owner_dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, otherwise negative error code is returned and
+ * rte_errno is set.
+ */
+static int
+flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *cf;
+ struct mlx5_hw_ctrl_flow *cf_next;
+ int ret;
+
+ cf = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (cf != NULL) {
+ cf_next = LIST_NEXT(cf, next);
+ ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
+ if (ret) {
+ rte_errno = ret;
+ return -ret;
+ }
+ LIST_REMOVE(cf, next);
+ mlx5_free(cf);
+ cf = cf_next;
+ }
+ return 0;
+}
+
+int
+mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_item_ethdev port_spec = {
+ .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
+ };
+ struct rte_flow_item_ethdev port_mask = {
+ .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ .mask = &port_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_jump jump = {
+ .group = MLX5_HW_SQ_MISS_GROUP,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+
+ MLX5_ASSERT(priv->master);
+ if (!priv->dr_ctx ||
+ !priv->hw_esw_sq_miss_root_tbl)
+ return 0;
+ return flow_hw_create_ctrl_flow(dev, dev,
+ priv->hw_esw_sq_miss_root_tbl,
+ items, 0, actions, 0);
+}
+
+int
+mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
+{
+ uint16_t port_id = dev->data->port_id;
+ struct mlx5_rte_flow_item_tx_queue queue_spec = {
+ .queue = txq,
+ };
+ struct mlx5_rte_flow_item_tx_queue queue_mask = {
+ .queue = UINT32_MAX,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
+ .spec = &queue_spec,
+ .mask = &queue_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_ethdev port = {
+ .port_id = port_id,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+ .conf = &port,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ uint16_t proxy_port_id = dev->data->port_id;
+ int ret;
+
+ RTE_SET_USED(txq);
+ ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to pick proxy port for port %u", port_id);
+ return ret;
+ }
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->dr_ctx)
+ return 0;
+ if (!proxy_priv->hw_esw_sq_miss_root_tbl ||
+ !proxy_priv->hw_esw_sq_miss_tbl) {
+ DRV_LOG(ERR, "port %u proxy port %u was configured but default"
+ " flow tables are not created",
+ port_id, proxy_port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ return flow_hw_create_ctrl_flow(dev, proxy_dev,
+ proxy_priv->hw_esw_sq_miss_tbl,
+ items, 0, actions, 0);
+}
+
+int
+mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
+{
+ uint16_t port_id = dev->data->port_id;
+ struct rte_flow_item_ethdev port_spec = {
+ .port_id = port_id,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+ .spec = &port_spec,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action_jump jump = {
+ .group = 1,
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &jump,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ }
+ };
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ uint16_t proxy_port_id = dev->data->port_id;
+ int ret;
+
+ ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to pick proxy port for port %u", port_id);
+ return ret;
+ }
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->dr_ctx)
+ return 0;
+ if (!proxy_priv->hw_esw_zero_tbl) {
+ DRV_LOG(ERR, "port %u proxy port %u was configured but default"
+ " flow tables are not created",
+ port_id, proxy_port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return flow_hw_create_ctrl_flow(dev, proxy_dev,
+ proxy_priv->hw_esw_zero_tbl,
+ items, 0, actions, 0);
+}
+
#endif
@@ -1245,12 +1245,14 @@ flow_verbs_validate(struct rte_eth_dev *dev,
uint16_t ether_type = 0;
bool is_empty_vlan = false;
uint16_t udp_dport = 0;
+ bool is_root;
if (items == NULL)
return -1;
ret = mlx5_flow_validate_attributes(dev, attr, error);
if (ret < 0)
return ret;
+ is_root = ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int ret = 0;
@@ -1380,7 +1382,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
items, item_flags,
- attr, error);
+ is_root, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
@@ -1280,6 +1280,48 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ if (priv->sh->config.dv_esw_en && priv->master) {
+ if (mlx5_flow_hw_esw_create_mgr_sq_miss_flow(dev))
+ goto error;
+ }
+ for (i = 0; i < priv->txqs_n; ++i) {
+ struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
+ uint32_t queue;
+
+ if (!txq)
+ continue;
+ if (txq->is_hairpin)
+ queue = txq->obj->sq->id;
+ else
+ queue = txq->obj->sq_obj.sq->id;
+ if ((priv->representor || priv->master) &&
+ priv->sh->config.dv_esw_en) {
+ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) {
+ mlx5_txq_release(dev, i);
+ goto error;
+ }
+ }
+ mlx5_txq_release(dev, i);
+ }
+ if ((priv->master || priv->representor) && priv->sh->config.dv_esw_en) {
+ if (mlx5_flow_hw_esw_create_default_jump_flow(dev))
+ goto error;
+ }
+ return 0;
+error:
+ ret = rte_errno;
+ mlx5_flow_hw_flush_ctrl_flows(dev);
+ rte_errno = ret;
+ return -rte_errno;
+}
+
/**
* Enable traffic flows configured by control plane
*
@@ -1316,6 +1358,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
unsigned int j;
int ret;
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_traffic_enable_hws(dev);
/*
* Hairpin txq default flow should be created no matter if it is
* isolation mode. Or else all the packets to be sent will be sent
@@ -1346,13 +1390,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
}
mlx5_txq_release(dev, i);
}
- if (priv->sh->config.dv_esw_en) {
- if (mlx5_flow_create_esw_table_zero_flow(dev))
- priv->fdb_def_rule = 1;
- else
- DRV_LOG(INFO, "port %u FDB default rule cannot be"
- " configured - only Eswitch group 0 flows are"
- " supported.", dev->data->port_id);
+ if (priv->sh->config.fdb_def_rule) {
+ if (priv->sh->config.dv_esw_en) {
+ if (mlx5_flow_create_esw_table_zero_flow(dev))
+ priv->fdb_def_rule = 1;
+ else
+ DRV_LOG(INFO, "port %u FDB default rule cannot be configured - only Eswitch group 0 flows are supported.",
+ dev->data->port_id);
+ }
+ } else {
+ DRV_LOG(INFO, "port %u FDB default rule is disabled",
+ dev->data->port_id);
}
if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
ret = mlx5_flow_lacp_miss(dev);
@@ -1470,7 +1518,12 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ mlx5_flow_hw_flush_ctrl_flows(dev);
+ else
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
}
/**