@@ -357,6 +357,9 @@ struct mlx5_hw_q {
struct mlx5_hw_q_job **job; /* LIFO header. */
} __rte_cache_aligned;
+
+
+
#define MLX5_COUNTERS_PER_POOL 512
#define MLX5_MAX_PENDING_QUERIES 4
#define MLX5_CNT_CONTAINER_RESIZE 64
@@ -782,15 +785,29 @@ struct mlx5_flow_meter_policy {
/* Is meter action in policy table. */
uint32_t hierarchy_drop_cnt:1;
/* Is any meter in hierarchy contains drop_cnt. */
+ uint32_t skip_r:1;
+ /* If red color policy is skipped. */
uint32_t skip_y:1;
/* If yellow color policy is skipped. */
uint32_t skip_g:1;
/* If green color policy is skipped. */
uint32_t mark:1;
/* If policy contains mark action. */
+ uint32_t initialized:1;
+ /* Initialized. */
+ uint16_t group;
+ /* The group. */
rte_spinlock_t sl;
uint32_t ref_cnt;
/* Use count. */
+ struct rte_flow_pattern_template *hws_item_templ;
+ /* Hardware steering item templates. */
+ struct rte_flow_actions_template *hws_act_templ[MLX5_MTR_DOMAIN_MAX];
+ /* Hardware steering action templates. */
+ struct rte_flow_template_table *hws_flow_table[MLX5_MTR_DOMAIN_MAX];
+ /* Hardware steering tables. */
+ struct rte_flow *hws_flow_rule[MLX5_MTR_DOMAIN_MAX][RTE_COLORS];
+ /* Hardware steering rules. */
struct mlx5_meter_policy_action_container act_cnt[MLX5_MTR_RTE_COLORS];
/* Policy actions container. */
void *dr_drop_action[MLX5_MTR_DOMAIN_MAX];
@@ -865,6 +882,7 @@ struct mlx5_flow_meter_info {
*/
uint32_t transfer:1;
uint32_t def_policy:1;
+ uint32_t initialized:1;
/* Meter points to default policy. */
uint32_t color_aware:1;
/* Meter is color aware mode. */
@@ -880,6 +898,10 @@ struct mlx5_flow_meter_info {
/**< Flow meter action. */
void *meter_action_y;
/**< Flow meter action for yellow init_color. */
+ uint32_t meter_offset;
+ /**< Flow meter offset. */
+ uint16_t group;
+ /**< Flow meter group. */
};
/* PPS(packets per second) map to BPS(Bytes per second).
@@ -914,6 +936,7 @@ struct mlx5_flow_meter_profile {
uint32_t ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
+ uint32_t initialized:1; /**< Initialized. */
};
/* 2 meters in each ASO cache line */
@@ -934,13 +957,20 @@ enum mlx5_aso_mtr_state {
ASO_METER_READY, /* CQE received. */
};
+/*aso flow meter type*/
+enum mlx5_aso_mtr_type {
+ ASO_METER_INDIRECT,
+ ASO_METER_DIRECT,
+};
+
/* Generic aso_flow_meter information. */
struct mlx5_aso_mtr {
LIST_ENTRY(mlx5_aso_mtr) next;
+ enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
uint8_t state; /**< ASO flow meter state. */
- uint8_t offset;
+ uint32_t offset;
};
/* Generic aso_flow_meter pool structure. */
@@ -964,6 +994,14 @@ struct mlx5_aso_mtr_pools_mng {
struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */
};
+/* Bulk management structure for ASO flow meter. */
+struct mlx5_mtr_bulk {
+ uint32_t size; /* Number of ASO objects. */
+ struct mlx5dr_action *action; /* HWS action */
+ struct mlx5_devx_obj *devx_obj; /* DEVX object. */
+ struct mlx5_aso_mtr *aso; /* Array of ASO objects. */
+};
+
/* Meter management structure for global flow meter resource. */
struct mlx5_flow_mtr_mng {
struct mlx5_aso_mtr_pools_mng pools_mng;
@@ -1017,6 +1055,7 @@ struct mlx5_flow_tbl_resource {
#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 3)
#define MLX5_FLOW_TABLE_LEVEL_POLICY (MLX5_MAX_TABLES - 4)
#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_POLICY
+#define MLX5_FLOW_TABLE_HWS_POLICY (MLX5_MAX_TABLES - 10)
#define MLX5_MAX_TABLES_FDB UINT16_MAX
#define MLX5_FLOW_TABLE_FACTOR 10
@@ -1303,6 +1342,12 @@ TAILQ_HEAD(mlx5_mtr_profiles, mlx5_flow_meter_profile);
/* MTR list. */
TAILQ_HEAD(mlx5_legacy_flow_meters, mlx5_legacy_flow_meter);
+struct mlx5_mtr_config {
+ uint32_t nb_meters; /**< Number of configured meters */
+ uint32_t nb_meter_profiles; /**< Number of configured meter profiles */
+ uint32_t nb_meter_policies; /**< Number of configured meter policies */
+};
+
/* RSS description. */
struct mlx5_flow_rss_desc {
uint32_t level;
@@ -1539,12 +1584,16 @@ struct mlx5_priv {
struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
struct mlx5_hlist *mreg_cp_tbl;
/* Hash table of Rx metadata register copy table. */
+ struct mlx5_mtr_config mtr_config; /* Meter configuration */
uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
uint8_t mtr_color_reg; /* Meter color match REG_C. */
struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
struct mlx5_l3t_tbl *mtr_profile_tbl; /* Meter index lookup table. */
+ struct mlx5_flow_meter_profile *mtr_profile_arr; /* Profile array. */
struct mlx5_l3t_tbl *policy_idx_tbl; /* Policy index lookup table. */
+ struct mlx5_flow_meter_policy *mtr_policy_arr; /* Policy array. */
struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
+ struct mlx5_mtr_bulk mtr_bulk; /* Meter index mapping for HWS */
uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
@@ -1579,6 +1628,7 @@ struct mlx5_priv {
#define PORT_ID(priv) ((priv)->dev_data->port_id)
#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+#define CTRL_QUEUE_ID(priv) ((priv)->nb_queue - 1)
struct rte_hairpin_peer_info {
uint32_t qp_id;
@@ -1890,6 +1940,10 @@ void mlx5_pmd_socket_uninit(void);
/* mlx5_flow_meter.c */
+int mlx5_flow_meter_init(struct rte_eth_dev *dev,
+ uint32_t nb_meters,
+ uint32_t nb_meter_profiles,
+ uint32_t nb_meter_policies);
int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev, void *arg);
struct mlx5_flow_meter_info *mlx5_flow_meter_find(struct mlx5_priv *priv,
uint32_t meter_id, uint32_t *mtr_idx);
@@ -1964,7 +2018,7 @@ int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
enum mlx5_access_aso_opc_mod aso_opc_mod);
int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
- struct mlx5_aso_mtr *mtr);
+ struct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk);
int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_mtr *mtr);
int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
@@ -8331,6 +8331,40 @@ mlx5_flow_port_configure(struct rte_eth_dev *dev,
return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
}
+/**
+ * Validate item template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the item template attributes.
+ * @param[in] items
+ * The template item pattern.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr fattr = {0};
+
+ if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "pattern validate with incorrect steering mode");
+ return -ENOTSUP;
+ }
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->pattern_validate(dev, attr, items, error);
+}
+
/**
* Create flow item template.
*
@@ -8396,6 +8430,43 @@ mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
return fops->pattern_template_destroy(dev, template, error);
}
+/**
+ * Validate flow actions template.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the action template attributes.
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] masks
+ * List of actions that marks which of the action's member is constant.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_actions_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr fattr = {0};
+
+ if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "actions validate with incorrect steering mode");
+ return -ENOTSUP;
+ }
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->actions_validate(dev, attr, actions, masks, error);
+}
+
/**
* Create flow item template.
*
@@ -1653,6 +1653,11 @@ typedef int (*mlx5_flow_port_configure_t)
uint16_t nb_queue,
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *err);
+typedef int (*mlx5_flow_pattern_validate_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
@@ -1662,6 +1667,12 @@ typedef int (*mlx5_flow_pattern_template_destroy_t)
(struct rte_eth_dev *dev,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error);
+typedef int (*mlx5_flow_actions_validate_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error);
typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
(struct rte_eth_dev *dev,
const struct rte_flow_actions_template_attr *attr,
@@ -1778,8 +1789,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_item_update_t item_update;
mlx5_flow_info_get_t info_get;
mlx5_flow_port_configure_t configure;
+ mlx5_flow_pattern_validate_t pattern_validate;
mlx5_flow_pattern_template_create_t pattern_template_create;
mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
+ mlx5_flow_actions_validate_t actions_validate;
mlx5_flow_actions_template_create_t actions_template_create;
mlx5_flow_actions_template_destroy_t actions_template_destroy;
mlx5_flow_table_create_t template_table_create;
@@ -1861,6 +1874,8 @@ mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
/* Decrease to original index. */
idx--;
+ if (priv->mtr_bulk.aso)
+ return priv->mtr_bulk.aso + idx;
MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
@@ -1963,6 +1978,32 @@ mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
+
+/*
+ * Convert rte_mtr_color to mlx5 color.
+ *
+ * @param[in] rcol
+ * rte_mtr_color.
+ *
+ * @return
+ * mlx5 color.
+ */
+static inline int
+rte_col_2_mlx5_col(enum rte_color rcol)
+{
+ switch (rcol) {
+ case RTE_COLOR_GREEN:
+ return MLX5_FLOW_COLOR_GREEN;
+ case RTE_COLOR_YELLOW:
+ return MLX5_FLOW_COLOR_YELLOW;
+ case RTE_COLOR_RED:
+ return MLX5_FLOW_COLOR_RED;
+ default:
+ break;
+ }
+ return MLX5_FLOW_COLOR_UNDEFINED;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
@@ -2346,4 +2387,13 @@ int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
uint32_t txq);
int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
+int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error);
+int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_pattern_template_attr *attr,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */
@@ -642,7 +642,8 @@ mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
static uint16_t
mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_sq *sq,
- struct mlx5_aso_mtr *aso_mtr)
+ struct mlx5_aso_mtr *aso_mtr,
+ struct mlx5_mtr_bulk *bulk)
{
volatile struct mlx5_aso_wqe *wqe = NULL;
struct mlx5_flow_meter_info *fm = NULL;
@@ -653,6 +654,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
uint32_t dseg_idx = 0;
struct mlx5_aso_mtr_pool *pool = NULL;
uint32_t param_le;
+ int id;
rte_spinlock_lock(&sq->sqsl);
res = size - (uint16_t)(sq->head - sq->tail);
@@ -666,14 +668,19 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
/* Fill next WQE. */
fm = &aso_mtr->fm;
sq->elts[sq->head & mask].mtr = aso_mtr;
- pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
- mtrs[aso_mtr->offset]);
- wqe->general_cseg.misc = rte_cpu_to_be_32(pool->devx_obj->id +
- (aso_mtr->offset >> 1));
- wqe->general_cseg.opcode = rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
- (ASO_OPC_MOD_POLICER <<
- WQE_CSEG_OPC_MOD_OFFSET) |
- sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
+ if (aso_mtr->type == ASO_METER_INDIRECT) {
+ pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
+ mtrs[aso_mtr->offset]);
+ id = pool->devx_obj->id;
+ } else {
+ id = bulk->devx_obj->id;
+ }
+ wqe->general_cseg.misc = rte_cpu_to_be_32(id +
+ (aso_mtr->offset >> 1));
+ wqe->general_cseg.opcode =
+ rte_cpu_to_be_32(MLX5_OPCODE_ACCESS_ASO |
+ (ASO_OPC_MOD_POLICER << WQE_CSEG_OPC_MOD_OFFSET) |
+ sq->pi << WQE_CSEG_WQE_INDEX_OFFSET);
/* There are 2 meters in one ASO cache line. */
dseg_idx = aso_mtr->offset & 0x1;
wqe->aso_cseg.data_mask =
@@ -811,14 +818,15 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
*/
int
mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh,
- struct mlx5_aso_mtr *mtr)
+ struct mlx5_aso_mtr *mtr,
+ struct mlx5_mtr_bulk *bulk)
{
struct mlx5_aso_sq *sq = &sh->mtrmng->pools_mng.sq;
uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
do {
mlx5_aso_mtr_completion_handle(sq);
- if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr))
+ if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk))
return 0;
/* Waiting for wqe resource. */
rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -216,31 +216,6 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
attr->valid = 1;
}
-/*
- * Convert rte_mtr_color to mlx5 color.
- *
- * @param[in] rcol
- * rte_mtr_color.
- *
- * @return
- * mlx5 color.
- */
-static inline int
-rte_col_2_mlx5_col(enum rte_color rcol)
-{
- switch (rcol) {
- case RTE_COLOR_GREEN:
- return MLX5_FLOW_COLOR_GREEN;
- case RTE_COLOR_YELLOW:
- return MLX5_FLOW_COLOR_YELLOW;
- case RTE_COLOR_RED:
- return MLX5_FLOW_COLOR_RED;
- default:
- break;
- }
- return MLX5_FLOW_COLOR_UNDEFINED;
-}
-
struct field_modify_info modify_eth[] = {
{4, 0, MLX5_MODI_OUT_DMAC_47_16},
{2, 4, MLX5_MODI_OUT_DMAC_15_0},
@@ -903,6 +903,38 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline int
+flow_hw_meter_compile(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *cfg,
+ uint32_t start_pos, const struct rte_flow_action *action,
+ struct mlx5_hw_actions *acts, uint32_t *end_pos,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_mtr *aso_mtr;
+ const struct rte_flow_action_meter *meter = action->conf;
+ uint32_t pos = start_pos;
+ uint32_t group = cfg->attr.flow_attr.group;
+
+ aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
+ acts->rule_acts[pos].action = priv->mtr_bulk.action;
+ acts->rule_acts[pos].aso_meter.offset = aso_mtr->offset;
+ acts->jump = flow_hw_jump_action_register
+ (dev, cfg, aso_mtr->fm.group, error);
+ if (!acts->jump) {
+ *end_pos = start_pos;
+ return -ENOMEM;
+ }
+ acts->rule_acts[++pos].action = (!!group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
+ *end_pos = pos;
+ if (mlx5_aso_mtr_wait(priv->sh, aso_mtr)) {
+ *end_pos = start_pos;
+ return -ENOMEM;
+ }
+ return 0;
+}
/**
* Translate rte_flow actions to DR action.
*
@@ -1131,6 +1163,21 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
i++;
break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ if (actions->conf && masks->conf &&
+ ((const struct rte_flow_action_meter *)
+ masks->conf)->mtr_id) {
+ err = flow_hw_meter_compile(dev, cfg,
+ i, actions, acts, &i, error);
+ if (err)
+ goto err;
+ } else if (__flow_hw_act_data_general_append(priv, acts,
+ actions->type,
+ actions - action_start,
+ i))
+ goto err;
+ i++;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -1461,6 +1508,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL;
const struct rte_flow_action_ethdev *port_action = NULL;
+ const struct rte_flow_action_meter *meter = NULL;
uint8_t *buf = job->encap_data;
struct rte_flow_attr attr = {
.ingress = 1,
@@ -1468,6 +1516,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
uint32_t ft_flag;
size_t encap_len = 0;
int ret;
+ struct mlx5_aso_mtr *mtr;
+ uint32_t mtr_id;
memcpy(rule_acts, hw_acts->rule_acts,
sizeof(*rule_acts) * hw_acts->acts_num);
@@ -1587,6 +1637,29 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
rule_acts[act_data->action_dst].action =
priv->hw_vport[port_action->port_id];
break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ meter = action->conf;
+ mtr_id = meter->mtr_id;
+ mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
+ rule_acts[act_data->action_dst].action =
+ priv->mtr_bulk.action;
+ rule_acts[act_data->action_dst].aso_meter.offset =
+ mtr->offset;
+ jump = flow_hw_jump_action_register
+ (dev, &table->cfg, mtr->fm.group, NULL);
+ if (!jump)
+ return -1;
+ MLX5_ASSERT
+ (!rule_acts[act_data->action_dst + 1].action);
+ rule_acts[act_data->action_dst + 1].action =
+ (!!attr.group) ? jump->hws_action :
+ jump->root_action;
+ job->flow->jump = jump;
+ job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
+ (*acts_num)++;
+ if (mlx5_aso_mtr_wait(priv->sh, mtr))
+ return -1;
+ break;
default:
break;
}
@@ -2483,7 +2556,7 @@ flow_hw_action_meta_copy_insert(const struct rte_flow_action actions[],
}
static int
-flow_hw_action_validate(struct rte_eth_dev *dev,
+flow_hw_actions_validate(struct rte_eth_dev *dev,
const struct rte_flow_actions_template_attr *attr,
const struct rte_flow_action actions[],
const struct rte_flow_action masks[],
@@ -2549,6 +2622,9 @@ flow_hw_action_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
/* TODO: Validation logic */
break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ /* TODO: Validation logic */
+ break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_hw_validate_action_modify_field(action,
mask,
@@ -2642,7 +2718,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
.conf = &rx_mreg_mask,
};
- if (flow_hw_action_validate(dev, attr, actions, masks, error))
+ if (flow_hw_actions_validate(dev, attr, actions, masks, error))
return NULL;
if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
priv->sh->config.dv_esw_en) {
@@ -2988,15 +3064,27 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
- struct rte_flow_port_info *port_info __rte_unused,
- struct rte_flow_queue_info *queue_info __rte_unused,
+flow_hw_info_get(struct rte_eth_dev *dev,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
struct rte_flow_error *error __rte_unused)
{
- /* Nothing to be updated currently. */
+ uint16_t port_id = dev->data->port_id;
+ struct rte_mtr_capabilities mtr_cap;
+ int ret;
+
memset(port_info, 0, sizeof(*port_info));
/* Queue size is unlimited from low-level. */
+ port_info->max_nb_queues = UINT32_MAX;
queue_info->max_size = UINT32_MAX;
+
+ memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
+ ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
+ if (!ret) {
+ port_info->max_nb_meters = mtr_cap.n_max;
+ port_info->max_nb_meter_profiles = UINT32_MAX;
+ port_info->max_nb_meter_policies = UINT32_MAX;
+ }
return 0;
}
@@ -4191,6 +4279,13 @@ flow_hw_configure(struct rte_eth_dev *dev,
priv->nb_queue = nb_q_updated;
rte_spinlock_init(&priv->hw_ctrl_lock);
LIST_INIT(&priv->hw_ctrl_flows);
+ /* Initialize meter library*/
+ if (port_attr->nb_meters)
+ if (mlx5_flow_meter_init(dev,
+ port_attr->nb_meters,
+ port_attr->nb_meter_profiles,
+ port_attr->nb_meter_policies))
+ goto err;
/* Add global actions. */
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
uint32_t act_flags = 0;
@@ -4505,8 +4600,10 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
+ .pattern_validate = flow_hw_pattern_validate,
.pattern_template_create = flow_hw_pattern_template_create,
.pattern_template_destroy = flow_hw_pattern_template_destroy,
+ .actions_validate = flow_hw_actions_validate,
.actions_template_create = flow_hw_actions_template_create,
.actions_template_destroy = flow_hw_actions_template_destroy,
.template_table_create = flow_hw_template_table_create,
@@ -4562,7 +4659,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
uint8_t action_template_idx)
{
struct mlx5_priv *priv = proxy_dev->data->dev_private;
- uint32_t queue = priv->nb_queue - 1;
+ uint32_t queue = CTRL_QUEUE_ID(priv);
struct rte_flow_op_attr op_attr = {
.postpone = 0,
};
@@ -4637,7 +4734,7 @@ static int
flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t queue = priv->nb_queue - 1;
+ uint32_t queue = CTRL_QUEUE_ID(priv);
struct rte_flow_op_attr op_attr = {
.postpone = 0,
};
@@ -18,6 +18,157 @@
static int mlx5_flow_meter_disable(struct rte_eth_dev *dev,
uint32_t meter_id, struct rte_mtr_error *error);
+static void
+mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->mtr_policy_arr) {
+ mlx5_free(priv->mtr_policy_arr);
+ priv->mtr_policy_arr = NULL;
+ }
+ if (priv->mtr_profile_arr) {
+ mlx5_free(priv->mtr_profile_arr);
+ priv->mtr_profile_arr = NULL;
+ }
+ if (priv->mtr_bulk.aso) {
+ mlx5_free(priv->mtr_bulk.aso);
+ priv->mtr_bulk.aso = NULL;
+ priv->mtr_bulk.size = 0;
+ mlx5_aso_queue_uninit(priv->sh, ASO_OPC_MOD_POLICER);
+ }
+ if (priv->mtr_bulk.action) {
+ mlx5dr_action_destroy(priv->mtr_bulk.action);
+ priv->mtr_bulk.action = NULL;
+ }
+ if (priv->mtr_bulk.devx_obj) {
+ claim_zero(mlx5_devx_cmd_destroy(priv->mtr_bulk.devx_obj));
+ priv->mtr_bulk.devx_obj = NULL;
+ }
+}
+
+int
+mlx5_flow_meter_init(struct rte_eth_dev *dev,
+ uint32_t nb_meters,
+ uint32_t nb_meter_profiles,
+ uint32_t nb_meter_policies)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_devx_obj *dcs = NULL;
+ uint32_t log_obj_size;
+ int ret = 0;
+ int reg_id;
+ struct mlx5_aso_mtr *aso;
+ uint32_t i;
+ struct rte_mtr_error error;
+
+ if (!nb_meters || !nb_meter_profiles || !nb_meter_policies) {
+ ret = ENOTSUP;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter configuration is invalid.");
+ goto err;
+ }
+ if (!priv->mtr_en || !priv->sh->meter_aso_en) {
+ ret = ENOTSUP;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO is not supported.");
+ goto err;
+ }
+ priv->mtr_config.nb_meters = nb_meters;
+ if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO queue allocation failed.");
+ goto err;
+ }
+ log_obj_size = rte_log2_u32(nb_meters >> 1);
+ dcs = mlx5_devx_cmd_create_flow_meter_aso_obj
+ (priv->sh->cdev->ctx, priv->sh->cdev->pdn,
+ log_obj_size);
+ if (!dcs) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO object allocation failed.");
+ goto err;
+ }
+ priv->mtr_bulk.devx_obj = dcs;
+ reg_id = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, NULL);
+ if (reg_id < 0) {
+ ret = ENOTSUP;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter register is not available.");
+ goto err;
+ }
+ priv->mtr_bulk.action = mlx5dr_action_create_aso_meter
+ (priv->dr_ctx, (struct mlx5dr_devx_obj *)dcs,
+ reg_id - REG_C_0, MLX5DR_ACTION_FLAG_HWS_RX |
+ MLX5DR_ACTION_FLAG_HWS_TX |
+ MLX5DR_ACTION_FLAG_HWS_FDB);
+ if (!priv->mtr_bulk.action) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter action creation failed.");
+ goto err;
+ }
+ priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_aso_mtr) * nb_meters,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!priv->mtr_bulk.aso) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter bulk ASO allocation failed.");
+ goto err;
+ }
+ priv->mtr_bulk.size = nb_meters;
+ aso = priv->mtr_bulk.aso;
+ for (i = 0; i < priv->mtr_bulk.size; i++) {
+ aso->type = ASO_METER_DIRECT;
+ aso->state = ASO_METER_WAIT;
+ aso->offset = i;
+ aso++;
+ }
+ priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
+ priv->mtr_profile_arr =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_flow_meter_profile) *
+ nb_meter_profiles,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!priv->mtr_profile_arr) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter profile allocation failed.");
+ goto err;
+ }
+ priv->mtr_config.nb_meter_policies = nb_meter_policies;
+ priv->mtr_policy_arr =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_flow_meter_policy) *
+ nb_meter_policies,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!priv->mtr_policy_arr) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter policy allocation failed.");
+ goto err;
+ }
+ return 0;
+err:
+ mlx5_flow_meter_uninit(dev);
+ return ret;
+}
+
/**
* Create the meter action.
*
@@ -98,6 +249,8 @@ mlx5_flow_meter_profile_find(struct mlx5_priv *priv, uint32_t meter_profile_id)
union mlx5_l3t_data data;
int32_t ret;
+ if (priv->mtr_profile_arr)
+ return &priv->mtr_profile_arr[meter_profile_id];
if (mlx5_l3t_get_entry(priv->mtr_profile_tbl,
meter_profile_id, &data) || !data.ptr)
return NULL;
@@ -145,17 +298,29 @@ mlx5_flow_meter_profile_validate(struct rte_eth_dev *dev,
RTE_MTR_ERROR_TYPE_METER_PROFILE,
NULL, "Meter profile is null.");
/* Meter profile ID must be valid. */
- if (meter_profile_id == UINT32_MAX)
- return -rte_mtr_error_set(error, EINVAL,
- RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
- NULL, "Meter profile id not valid.");
- /* Meter profile must not exist. */
- fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id);
- if (fmp)
- return -rte_mtr_error_set(error, EEXIST,
- RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
- NULL,
- "Meter profile already exists.");
+ if (priv->mtr_profile_arr) {
+ if (meter_profile_id >= priv->mtr_config.nb_meter_profiles)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile id not valid.");
+ fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id);
+ /* Meter profile must not exist. */
+ if (fmp->initialized)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile already exists.");
+ } else {
+ if (meter_profile_id == UINT32_MAX)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile id not valid.");
+ fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id);
+ /* Meter profile must not exist. */
+ if (fmp)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile already exists.");
+ }
if (!priv->sh->meter_aso_en) {
/* Old version is even not supported. */
if (!priv->sh->cdev->config.hca_attr.qos.flow_meter_old)
@@ -574,6 +739,96 @@ mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Callback to add MTR profile with HWS.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] meter_profile_id
+ * Meter profile id.
+ * @param[in] profile
+ * Pointer to meter profile detail.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_profile_hws_add(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_profile *fmp;
+ int ret;
+
+ if (!priv->mtr_profile_arr)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter profile array is not allocated");
+ /* Check input params. */
+ ret = mlx5_flow_meter_profile_validate(dev, meter_profile_id,
+ profile, error);
+ if (ret)
+ return ret;
+ fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id);
+ /* Fill profile info. */
+ fmp->id = meter_profile_id;
+ fmp->profile = *profile;
+ fmp->initialized = 1;
+ /* Fill the flow meter parameters for the PRM. */
+ return mlx5_flow_meter_param_fill(fmp, error);
+}
+
+/**
+ * Callback to delete MTR profile with HWS.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] meter_profile_id
+ * Meter profile id.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_profile_hws_delete(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_profile *fmp;
+
+ if (!priv->mtr_profile_arr)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter profile array is not allocated");
+ /* Meter id must be valid. */
+ if (meter_profile_id >= priv->mtr_config.nb_meter_profiles)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ &meter_profile_id,
+ "Meter profile id not valid.");
+ /* Meter profile must exist. */
+ fmp = mlx5_flow_meter_profile_find(priv, meter_profile_id);
+ if (!fmp->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ &meter_profile_id,
+ "Meter profile id is invalid.");
+ /* Check profile is unused. */
+ if (fmp->ref_cnt)
+ return -rte_mtr_error_set(error, EBUSY,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile is in use.");
+ memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile));
+ return 0;
+}
+
/**
* Find policy by id.
*
@@ -594,6 +849,11 @@ mlx5_flow_meter_policy_find(struct rte_eth_dev *dev,
struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
union mlx5_l3t_data data;
+ if (priv->mtr_policy_arr) {
+ if (policy_idx)
+ *policy_idx = policy_id;
+ return &priv->mtr_policy_arr[policy_id];
+ }
if (policy_id > MLX5_MAX_SUB_POLICY_TBL_NUM || !priv->policy_idx_tbl)
return NULL;
if (mlx5_l3t_get_entry(priv->policy_idx_tbl, policy_id, &data) ||
@@ -710,6 +970,43 @@ mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Callback to check MTR policy action validate for HWS
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] actions
+ * Pointer to meter policy action detail.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev,
+ struct rte_mtr_meter_policy_params *policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_actions_template_attr attr = {
+ .transfer = priv->sh->config.dv_esw_en ? 1 : 0 };
+ int ret;
+ int i;
+
+ if (!priv->mtr_en || !priv->sh->meter_aso_en)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "meter policy unsupported.");
+ for (i = 0; i < RTE_COLORS; i++) {
+ ret = mlx5_flow_actions_validate(dev, &attr, policy->actions[i],
+ policy->actions[i], NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int
__mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,
uint32_t policy_id,
@@ -1004,6 +1301,338 @@ mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Callback to delete MTR policy for HWS.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] policy_id
+ * Meter policy id.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_hws_delete(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_policy *mtr_policy;
+ uint32_t i, j;
+ uint32_t nb_flows = 0;
+ int ret;
+ struct rte_flow_op_attr op_attr = { .postpone = 1 };
+ struct rte_flow_op_result result[RTE_COLORS * MLX5_MTR_DOMAIN_MAX];
+
+ if (!priv->mtr_policy_arr)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter policy array is not allocated");
+ /* Meter id must be valid. */
+ if (policy_id >= priv->mtr_config.nb_meter_policies)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ &policy_id,
+ "Meter policy id not valid.");
+ /* Meter policy must exist. */
+ mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL);
+ if (!mtr_policy->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID, NULL,
+ "Meter policy does not exists.");
+ /* Check policy is unused. */
+ if (mtr_policy->ref_cnt)
+ return -rte_mtr_error_set(error, EBUSY,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy is in use.");
+ rte_spinlock_lock(&priv->hw_ctrl_lock);
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ for (j = 0; j < RTE_COLORS; j++) {
+ if (mtr_policy->hws_flow_rule[i][j]) {
+ ret = rte_flow_async_destroy(dev->data->port_id,
+ CTRL_QUEUE_ID(priv), &op_attr,
+ mtr_policy->hws_flow_rule[i][j],
+ NULL, NULL);
+ if (ret < 0)
+ continue;
+ nb_flows++;
+ }
+ }
+ }
+ ret = rte_flow_push(dev->data->port_id, CTRL_QUEUE_ID(priv), NULL);
+ while (nb_flows && (ret >= 0)) {
+ ret = rte_flow_pull(dev->data->port_id,
+ CTRL_QUEUE_ID(priv), result,
+ nb_flows, NULL);
+ nb_flows -= ret;
+ }
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ if (mtr_policy->hws_flow_table[i])
+ rte_flow_template_table_destroy(dev->data->port_id,
+ mtr_policy->hws_flow_table[i], NULL);
+ }
+ for (i = 0; i < RTE_COLORS; i++) {
+ if (mtr_policy->hws_act_templ[i])
+ rte_flow_actions_template_destroy(dev->data->port_id,
+ mtr_policy->hws_act_templ[i], NULL);
+ }
+ if (mtr_policy->hws_item_templ)
+ rte_flow_pattern_template_destroy(dev->data->port_id,
+ mtr_policy->hws_item_templ, NULL);
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ memset(mtr_policy, 0, sizeof(struct mlx5_flow_meter_policy));
+ return 0;
+}
+
+/**
+ * Callback to add MTR policy for HWS.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] policy_id
+ * Pointer to policy id
+ * @param[in] actions
+ * Pointer to meter policy action detail.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_policy_hws_add(struct rte_eth_dev *dev,
+ uint32_t policy_id,
+ struct rte_mtr_meter_policy_params *policy,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_policy *mtr_policy = NULL;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_meter *mtr;
+ struct mlx5_flow_meter_info *fm;
+ struct mlx5_flow_meter_policy *plc;
+ uint8_t domain_color = MLX5_MTR_ALL_DOMAIN_BIT;
+ bool is_rss = false;
+ bool is_hierarchy = false;
+ int i, j;
+ uint32_t nb_colors = 0;
+ uint32_t nb_flows = 0;
+ int color;
+ int ret;
+ struct rte_flow_pattern_template_attr pta = {0};
+ struct rte_flow_actions_template_attr ata = {0};
+ struct rte_flow_template_table_attr ta = { {0}, 0 };
+ struct rte_flow_op_attr op_attr = { .postpone = 1 };
+ struct rte_flow_op_result result[RTE_COLORS * MLX5_MTR_DOMAIN_MAX];
+ const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
+ int color_reg_c_idx = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR,
+ 0, NULL);
+ struct rte_flow_item_tag tag_spec = {
+ .data = 0,
+ .index = color_reg_c_idx
+ };
+ struct rte_flow_item_tag tag_mask = {
+ .data = color_mask,
+ .index = 0xff};
+ struct rte_flow_item pattern[] = {
+ [0] = {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_TAG,
+ .spec = &tag_spec,
+ .mask = &tag_mask,
+ },
+ [1] = { .type = RTE_FLOW_ITEM_TYPE_END }
+ };
+
+ if (!priv->mtr_policy_arr)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Meter policy array is not allocated.");
+ if (policy_id >= priv->mtr_config.nb_meter_policies)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy id not valid.");
+ mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL);
+ if (mtr_policy->initialized)
+ return -rte_mtr_error_set(error, EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy already exists.");
+ if (!policy ||
+ !policy->actions[RTE_COLOR_RED] ||
+ !policy->actions[RTE_COLOR_YELLOW] ||
+ !policy->actions[RTE_COLOR_GREEN])
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL, "Meter policy actions are not valid.");
+ if (policy->actions[RTE_COLOR_RED] == RTE_FLOW_ACTION_TYPE_END)
+ mtr_policy->skip_r = 1;
+ if (policy->actions[RTE_COLOR_YELLOW] == RTE_FLOW_ACTION_TYPE_END)
+ mtr_policy->skip_y = 1;
+ if (policy->actions[RTE_COLOR_GREEN] == RTE_FLOW_ACTION_TYPE_END)
+ mtr_policy->skip_g = 1;
+ if (mtr_policy->skip_r && mtr_policy->skip_y && mtr_policy->skip_g)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy actions are empty.");
+ for (i = 0; i < RTE_COLORS; i++) {
+ act = policy->actions[i];
+ while (act && act->type != RTE_FLOW_ACTION_TYPE_END) {
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ /* fall-through. */
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ domain_color &= ~(MLX5_MTR_DOMAIN_INGRESS_BIT |
+ MLX5_MTR_DOMAIN_EGRESS_BIT);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ is_rss = true;
+ /* fall-through. */
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ domain_color &= ~(MLX5_MTR_DOMAIN_EGRESS_BIT |
+ MLX5_MTR_DOMAIN_TRANSFER_BIT);
+ break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ is_hierarchy = true;
+ mtr = act->conf;
+ fm = mlx5_flow_meter_find(priv,
+ mtr->mtr_id, NULL);
+ if (!fm)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_ID, NULL,
+ "Meter not found in meter hierarchy.");
+ plc = mlx5_flow_meter_policy_find(dev,
+ fm->policy_id,
+ NULL);
+ MLX5_ASSERT(plc);
+ domain_color &= MLX5_MTR_ALL_DOMAIN_BIT &
+ (plc->ingress <<
+ MLX5_MTR_DOMAIN_INGRESS);
+ domain_color &= MLX5_MTR_ALL_DOMAIN_BIT &
+ (plc->egress <<
+ MLX5_MTR_DOMAIN_EGRESS);
+ domain_color &= MLX5_MTR_ALL_DOMAIN_BIT &
+ (plc->transfer <<
+ MLX5_MTR_DOMAIN_TRANSFER);
+ break;
+ default:
+ break;
+ }
+ act++;
+ }
+ }
+ if (!domain_color)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy domains are conflicting.");
+ mtr_policy->is_rss = is_rss;
+ mtr_policy->ingress = !!(domain_color & MLX5_MTR_DOMAIN_INGRESS_BIT);
+ pta.ingress = mtr_policy->ingress;
+ mtr_policy->egress = !!(domain_color & MLX5_MTR_DOMAIN_EGRESS_BIT);
+ pta.egress = mtr_policy->egress;
+ mtr_policy->transfer = !!(domain_color & MLX5_MTR_DOMAIN_TRANSFER_BIT);
+ pta.transfer = mtr_policy->transfer;
+ mtr_policy->group = MLX5_FLOW_TABLE_HWS_POLICY - policy_id;
+ mtr_policy->is_hierarchy = is_hierarchy;
+ mtr_policy->initialized = 1;
+ rte_spinlock_lock(&priv->hw_ctrl_lock);
+ mtr_policy->hws_item_templ =
+ rte_flow_pattern_template_create(dev->data->port_id,
+ &pta, pattern, NULL);
+ if (!mtr_policy->hws_item_templ)
+ goto policy_add_err;
+ for (i = 0; i < RTE_COLORS; i++) {
+ if (mtr_policy->skip_g && i == RTE_COLOR_GREEN)
+ continue;
+ if (mtr_policy->skip_y && i == RTE_COLOR_YELLOW)
+ continue;
+ if (mtr_policy->skip_r && i == RTE_COLOR_RED)
+ continue;
+ mtr_policy->hws_act_templ[nb_colors] =
+ rte_flow_actions_template_create(dev->data->port_id,
+ &ata, policy->actions[i],
+ policy->actions[i], NULL);
+ if (!mtr_policy->hws_act_templ[nb_colors])
+ goto policy_add_err;
+ nb_colors++;
+ }
+ for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
+ memset(&ta, 0, sizeof(ta));
+ ta.nb_flows = RTE_COLORS;
+ ta.flow_attr.group = mtr_policy->group;
+ if (i == MLX5_MTR_DOMAIN_INGRESS) {
+ if (!mtr_policy->ingress)
+ continue;
+ ta.flow_attr.ingress = 1;
+ } else if (i == MLX5_MTR_DOMAIN_EGRESS) {
+ if (!mtr_policy->egress)
+ continue;
+ ta.flow_attr.egress = 1;
+ } else if (i == MLX5_MTR_DOMAIN_TRANSFER) {
+ if (!mtr_policy->transfer)
+ continue;
+ ta.flow_attr.transfer = 1;
+ }
+ mtr_policy->hws_flow_table[i] =
+ rte_flow_template_table_create(dev->data->port_id,
+ &ta, &mtr_policy->hws_item_templ, 1,
+ mtr_policy->hws_act_templ, nb_colors,
+ NULL);
+ if (!mtr_policy->hws_flow_table[i])
+ goto policy_add_err;
+ nb_colors = 0;
+ for (j = 0; j < RTE_COLORS; j++) {
+ if (mtr_policy->skip_g && j == RTE_COLOR_GREEN)
+ continue;
+ if (mtr_policy->skip_y && j == RTE_COLOR_YELLOW)
+ continue;
+ if (mtr_policy->skip_r && j == RTE_COLOR_RED)
+ continue;
+ color = rte_col_2_mlx5_col((enum rte_color)j);
+ tag_spec.data = color;
+ mtr_policy->hws_flow_rule[i][j] =
+ rte_flow_async_create(dev->data->port_id,
+ CTRL_QUEUE_ID(priv), &op_attr,
+ mtr_policy->hws_flow_table[i],
+ pattern, 0, policy->actions[j],
+ nb_colors, NULL, NULL);
+ if (!mtr_policy->hws_flow_rule[i][j])
+ goto policy_add_err;
+ nb_colors++;
+ nb_flows++;
+ }
+ ret = rte_flow_push(dev->data->port_id,
+ CTRL_QUEUE_ID(priv), NULL);
+ if (ret < 0)
+ goto policy_add_err;
+ while (nb_flows) {
+ ret = rte_flow_pull(dev->data->port_id,
+ CTRL_QUEUE_ID(priv), result,
+ nb_flows, NULL);
+ if (ret < 0)
+ goto policy_add_err;
+ for (j = 0; j < ret; j++) {
+ if (result[j].status == RTE_FLOW_OP_ERROR)
+ goto policy_add_err;
+ }
+ nb_flows -= ret;
+ }
+ }
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ return 0;
+policy_add_err:
+ rte_spinlock_unlock(&priv->hw_ctrl_lock);
+ ret = mlx5_flow_meter_policy_hws_delete(dev, policy_id, error);
+ memset(mtr_policy, 0, sizeof(struct mlx5_flow_meter_policy));
+ if (ret)
+ return ret;
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to create meter policy.");
+}
+
/**
* Check meter validation.
*
@@ -1087,7 +1716,8 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
if (priv->sh->meter_aso_en) {
fm->is_enable = !!is_enable;
aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
- ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr);
+ ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr,
+ &priv->mtr_bulk);
if (ret)
return ret;
ret = mlx5_aso_mtr_wait(priv->sh, aso_mtr);
@@ -1336,7 +1966,8 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
/* If ASO meter supported, update ASO flow meter by wqe. */
if (priv->sh->meter_aso_en) {
aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
- ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr);
+ ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr,
+ &priv->mtr_bulk);
if (ret)
goto error;
if (!priv->mtr_idx_tbl) {
@@ -1369,6 +2000,90 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
NULL, "Failed to create devx meter.");
}
+/**
+ * Create meter rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] meter_id
+ * Meter id.
+ * @param[in] params
+ * Pointer to rte meter parameters.
+ * @param[in] shared
+ * Meter shared with other flow or not.
+ * @param[out] error
+ * Pointer to rte meter error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id,
+ struct rte_mtr_params *params, int shared,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_meter_profile *profile;
+ struct mlx5_flow_meter_info *fm;
+ struct mlx5_flow_meter_policy *policy = NULL;
+ struct mlx5_aso_mtr *aso_mtr;
+ int ret;
+
+ if (!priv->mtr_profile_arr ||
+ !priv->mtr_policy_arr ||
+ !priv->mtr_bulk.aso)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Meter bulk array is not allocated.");
+ /* Meter profile must exist. */
+ profile = mlx5_flow_meter_profile_find(priv, params->meter_profile_id);
+ if (!profile->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL, "Meter profile id not valid.");
+ /* Meter policy must exist. */
+ policy = mlx5_flow_meter_policy_find(dev,
+ params->meter_policy_id, NULL);
+ if (!policy->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_METER_POLICY_ID,
+ NULL, "Meter policy id not valid.");
+ /* Meter ID must be valid. */
+ if (meter_id >= priv->mtr_config.nb_meters)
+ return -rte_mtr_error_set(error, EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL, "Meter id not valid.");
+ /* Find ASO object. */
+ aso_mtr = mlx5_aso_meter_by_idx(priv, meter_id);
+ fm = &aso_mtr->fm;
+ if (fm->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL, "Meter object already exists.");
+ /* Fill the flow meter parameters. */
+ fm->meter_id = meter_id;
+ fm->policy_id = params->meter_policy_id;
+ fm->profile = profile;
+ fm->meter_offset = meter_id;
+ fm->group = policy->group;
+ /* Add to the flow meter list. */
+ fm->active_state = 1; /* Config meter starts as active. */
+ fm->is_enable = params->meter_enable;
+ fm->shared = !!shared;
+ fm->initialized = 1;
+ /* Update ASO flow meter by wqe. */
+ ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr,
+ &priv->mtr_bulk);
+ if (ret)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to create devx meter.");
+ fm->active_state = params->meter_enable;
+ __atomic_add_fetch(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ return 0;
+}
+
static int
mlx5_flow_meter_params_flush(struct rte_eth_dev *dev,
struct mlx5_flow_meter_info *fm,
@@ -1475,6 +2190,58 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
return 0;
}
+/**
+ * Destroy meter rules.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] meter_id
+ * Meter id.
+ * @param[out] error
+ * Pointer to rte meter error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_meter_hws_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
+ struct rte_mtr_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_mtr *aso_mtr;
+ struct mlx5_flow_meter_info *fm;
+ struct mlx5_flow_meter_policy *policy;
+
+ if (!priv->mtr_profile_arr ||
+ !priv->mtr_policy_arr ||
+ !priv->mtr_bulk.aso)
+ return -rte_mtr_error_set(error, ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
+ "Meter bulk array is not allocated.");
+ /* Find ASO object. */
+ aso_mtr = mlx5_aso_meter_by_idx(priv, meter_id);
+ fm = &aso_mtr->fm;
+ if (!fm->initialized)
+ return -rte_mtr_error_set(error, ENOENT,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL, "Meter object id not valid.");
+ /* Meter object must not have any owner. */
+ if (fm->ref_cnt > 0)
+ return -rte_mtr_error_set(error, EBUSY,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter object is being used.");
+ /* Destroy the meter profile. */
+ __atomic_sub_fetch(&fm->profile->ref_cnt,
+ 1, __ATOMIC_RELAXED);
+ /* Destroy the meter policy. */
+ policy = mlx5_flow_meter_policy_find(dev,
+ fm->policy_id, NULL);
+ __atomic_sub_fetch(&policy->ref_cnt,
+ 1, __ATOMIC_RELAXED);
+ memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
+ return 0;
+}
+
/**
* Modify meter state.
*
@@ -1798,6 +2565,23 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = {
.stats_read = mlx5_flow_meter_stats_read,
};
+static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = {
+ .capabilities_get = mlx5_flow_mtr_cap_get,
+ .meter_profile_add = mlx5_flow_meter_profile_hws_add,
+ .meter_profile_delete = mlx5_flow_meter_profile_hws_delete,
+ .meter_policy_validate = mlx5_flow_meter_policy_hws_validate,
+ .meter_policy_add = mlx5_flow_meter_policy_hws_add,
+ .meter_policy_delete = mlx5_flow_meter_policy_hws_delete,
+ .create = mlx5_flow_meter_hws_create,
+ .destroy = mlx5_flow_meter_hws_destroy,
+ .meter_enable = mlx5_flow_meter_enable,
+ .meter_disable = mlx5_flow_meter_disable,
+ .meter_profile_update = mlx5_flow_meter_profile_update,
+ .meter_dscp_table_update = NULL,
+ .stats_update = NULL,
+ .stats_read = NULL,
+};
+
/**
* Get meter operations.
*
@@ -1812,7 +2596,12 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = {
int
mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
{
- *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops;
+ else
+ *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops;
return 0;
}
@@ -1841,6 +2630,12 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id,
union mlx5_l3t_data data;
uint16_t n_valid;
+ if (priv->mtr_bulk.aso) {
+ if (mtr_idx)
+ *mtr_idx = meter_id;
+ aso_mtr = priv->mtr_bulk.aso + meter_id;
+ return &aso_mtr->fm;
+ }
if (priv->sh->meter_aso_en) {
rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
n_valid = pools_mng->n_valid;
@@ -2185,6 +2980,7 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
struct mlx5_flow_meter_profile *fmp;
struct mlx5_legacy_flow_meter *legacy_fm;
struct mlx5_flow_meter_info *fm;
+ struct mlx5_flow_meter_policy *policy;
struct mlx5_flow_meter_sub_policy *sub_policy;
void *tmp;
uint32_t i, mtr_idx, policy_idx;
@@ -2219,6 +3015,14 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
NULL, "MTR object meter profile invalid.");
}
}
+ if (priv->mtr_bulk.aso) {
+ for (i = 1; i <= priv->mtr_config.nb_meter_profiles; i++) {
+ aso_mtr = mlx5_aso_meter_by_idx(priv, i);
+ fm = &aso_mtr->fm;
+ if (fm->initialized)
+ mlx5_flow_meter_hws_destroy(dev, i, error);
+ }
+ }
if (priv->policy_idx_tbl) {
MLX5_L3T_FOREACH(priv->policy_idx_tbl, i, entry) {
policy_idx = *(uint32_t *)entry;
@@ -2244,6 +3048,15 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
mlx5_l3t_destroy(priv->policy_idx_tbl);
priv->policy_idx_tbl = NULL;
}
+ if (priv->mtr_policy_arr) {
+ for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) {
+ policy = mlx5_flow_meter_policy_find(dev, i,
+ &policy_idx);
+ if (policy->initialized)
+ mlx5_flow_meter_policy_hws_delete(dev, i,
+ error);
+ }
+ }
if (priv->mtr_profile_tbl) {
MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) {
fmp = entry;
@@ -2257,9 +3070,19 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
mlx5_l3t_destroy(priv->mtr_profile_tbl);
priv->mtr_profile_tbl = NULL;
}
+ if (priv->mtr_profile_arr) {
+ for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) {
+ fmp = mlx5_flow_meter_profile_find(priv, i);
+ if (fmp->initialized)
+ mlx5_flow_meter_profile_hws_delete(dev, i,
+ error);
+ }
+ }
/* Delete default policy table. */
mlx5_flow_destroy_def_policy(dev);
if (priv->sh->refcnt == 1)
mlx5_flow_destroy_mtr_drop_tbls(dev);
+ /* Destroy HWS configuration. */
+ mlx5_flow_meter_uninit(dev);
return 0;
}