@@ -969,12 +969,16 @@ enum mlx5_aso_mtr_type {
/* Generic aso_flow_meter information. */
struct mlx5_aso_mtr {
- LIST_ENTRY(mlx5_aso_mtr) next;
+ union {
+ LIST_ENTRY(mlx5_aso_mtr) next;
+ struct mlx5_aso_mtr_pool *pool;
+ };
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
uint8_t state; /**< ASO flow meter state. */
uint32_t offset;
+ enum rte_color init_color;
};
/* Generic aso_flow_meter pool structure. */
@@ -983,6 +987,8 @@ struct mlx5_aso_mtr_pool {
/*Must be the first in pool*/
struct mlx5_devx_obj *devx_obj;
/* The devx object of the minimum aso flow meter ID. */
+ struct mlx5dr_action *action; /* HWS action. */
+ struct mlx5_indexed_pool *idx_pool; /* HWS index pool. */
uint32_t index; /* Pool index in management structure. */
};
@@ -1670,6 +1676,7 @@ struct mlx5_priv {
struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
struct mlx5_aso_ct_pool *hws_ctpool; /* HW steering's CT pool. */
+ struct mlx5_aso_mtr_pool *hws_mpool; /* Meter mark indexed pool. */
#endif
};
@@ -1112,6 +1112,7 @@ struct rte_flow_hw {
struct rte_flow_template_table *table; /* The table flow allcated from. */
struct mlx5dr_rule rule; /* HWS layer data struct. */
uint32_t cnt_id;
+ uint32_t mtr_id;
} __rte_packed;
/* rte flow action translate to DR action struct. */
@@ -1241,6 +1242,7 @@ struct mlx5_hw_actions {
uint16_t encap_decap_pos; /* Encap/Decap action position. */
uint32_t mark:1; /* Indicate the mark action. */
uint32_t cnt_id; /* Counter id. */
+ uint32_t mtr_id; /* Meter id. */
/* Translated DR action array from action template. */
struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
};
@@ -700,8 +700,11 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh,
fm = &aso_mtr->fm;
sq->elts[sq->head & mask].mtr = aso_mtr;
if (aso_mtr->type == ASO_METER_INDIRECT) {
- pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
- mtrs[aso_mtr->offset]);
+ if (likely(sh->config.dv_flow_en == 2))
+ pool = aso_mtr->pool;
+ else
+ pool = container_of(aso_mtr, struct mlx5_aso_mtr_pool,
+ mtrs[aso_mtr->offset]);
id = pool->devx_obj->id;
} else {
id = bulk->devx_obj->id;
@@ -395,6 +395,10 @@ __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
acts->cnt_id = 0;
}
+ if (acts->mtr_id) {
+ mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
+ acts->mtr_id = 0;
+ }
}
/**
@@ -1096,6 +1100,70 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
#endif
}
+static __rte_always_inline struct mlx5_aso_mtr *
+flow_hw_meter_mark_alloc(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+ const struct rte_flow_action_meter_mark *meter_mark = action->conf;
+ struct mlx5_aso_mtr *aso_mtr;
+ struct mlx5_flow_meter_info *fm;
+ uint32_t mtr_id;
+
+ aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id);
+ if (!aso_mtr)
+ return NULL;
+ /* Fill the flow meter parameters. */
+ aso_mtr->type = ASO_METER_INDIRECT;
+ fm = &aso_mtr->fm;
+ fm->meter_id = mtr_id;
+ fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
+ fm->is_enable = meter_mark->state;
+ fm->color_aware = meter_mark->color_mode;
+ aso_mtr->pool = pool;
+ aso_mtr->state = ASO_METER_WAIT;
+ aso_mtr->offset = mtr_id - 1;
+ aso_mtr->init_color = (meter_mark->color_mode) ?
+ meter_mark->init_color : RTE_COLOR_GREEN;
+ /* Update ASO flow meter by wqe. */
+ if (mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr, &priv->mtr_bulk)) {
+ mlx5_ipool_free(pool->idx_pool, mtr_id);
+ return NULL;
+ }
+ /* Wait for ASO object completion. */
+ if (mlx5_aso_mtr_wait(priv->sh, aso_mtr)) {
+ mlx5_ipool_free(pool->idx_pool, mtr_id);
+ return NULL;
+ }
+ return aso_mtr;
+}
+
+static __rte_always_inline int
+flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
+ uint16_t aso_mtr_pos,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *acts,
+ uint32_t *index)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+ struct mlx5_aso_mtr *aso_mtr;
+
+ aso_mtr = flow_hw_meter_mark_alloc(dev, action);
+ if (!aso_mtr)
+ return -1;
+
+ /* Compile METER_MARK action */
+ acts[aso_mtr_pos].action = pool->action;
+ acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
+ acts[aso_mtr_pos].aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)
+ rte_col_2_mlx5_col(aso_mtr->init_color);
+ *index = aso_mtr->fm.meter_id;
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1403,6 +1471,23 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
}
break;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ action_pos = at->actions_off[actions - action_start];
+ if (actions->conf && masks->conf &&
+ ((const struct rte_flow_action_meter_mark *)
+ masks->conf)->profile) {
+ ret = flow_hw_meter_mark_compile(dev,
+ action_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id);
+ if (ret)
+ goto err;
+ } else if (__flow_hw_act_data_general_append(priv, acts,
+ actions->type,
+ actions - action_start,
+ action_pos))
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -1788,7 +1873,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
size_t encap_len = 0;
int ret;
struct mlx5_aso_mtr *mtr;
- uint32_t mtr_id;
rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
attr.group = table->grp->group_id;
@@ -1822,6 +1906,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
struct mlx5_hrxq *hrxq;
uint32_t ct_idx;
cnt_id_t cnt_id;
+ uint32_t mtr_id;
action = &actions[act_data->action_src];
/*
@@ -1928,13 +2013,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_METER:
meter = action->conf;
mtr_id = meter->mtr_id;
- mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
+ aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
rule_acts[act_data->action_dst].action =
priv->mtr_bulk.action;
rule_acts[act_data->action_dst].aso_meter.offset =
- mtr->offset;
+ aso_mtr->offset;
jump = flow_hw_jump_action_register
- (dev, &table->cfg, mtr->fm.group, NULL);
+ (dev, &table->cfg, aso_mtr->fm.group, NULL);
if (!jump)
return -1;
MLX5_ASSERT
@@ -1944,7 +2029,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
jump->root_action;
job->flow->jump = jump;
job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
- if (mlx5_aso_mtr_wait(priv->sh, mtr))
+ if (mlx5_aso_mtr_wait(priv->sh, aso_mtr))
return -1;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1980,6 +2065,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
&rule_acts[act_data->action_dst]))
return -1;
break;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ ret = flow_hw_meter_mark_compile(dev,
+ act_data->action_dst, action,
+ rule_acts, &job->flow->mtr_id);
+ if (ret != 0)
+ return ret;
+ break;
default:
break;
}
@@ -2242,6 +2334,7 @@ flow_hw_pull(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
struct mlx5_hw_q_job *job;
int ret, i;
@@ -2266,6 +2359,10 @@ flow_hw_pull(struct rte_eth_dev *dev,
&job->flow->cnt_id);
job->flow->cnt_id = 0;
}
+ if (job->flow->mtr_id) {
+ mlx5_ipool_free(pool->idx_pool, job->flow->mtr_id);
+ job->flow->mtr_id = 0;
+ }
mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
}
priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
@@ -3059,6 +3156,9 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_METER:
/* TODO: Validation logic */
break;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ /* TODO: Validation logic */
+ break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_hw_validate_action_modify_field(action,
mask,
@@ -3243,6 +3343,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ at->actions_off[i] = curr_off;
+ action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
+ if (curr_off >= MLX5_HW_MAX_ACTS)
+ goto err_actions_num;
+ break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
at->actions_off[i] = curr_off;
@@ -17,6 +17,13 @@
static int mlx5_flow_meter_disable(struct rte_eth_dev *dev,
uint32_t meter_id, struct rte_mtr_error *error);
+/*
+ * The default ipool threshold value indicates which per_core_cache
+ * value to set.
+ */
+#define MLX5_MTR_IPOOL_SIZE_THRESHOLD (1 << 19)
+/* The default min local cache size. */
+#define MLX5_MTR_IPOOL_CACHE_MIN (1 << 9)
static void
mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
@@ -31,6 +38,11 @@ mlx5_flow_meter_uninit(struct rte_eth_dev *dev)
mlx5_free(priv->mtr_profile_arr);
priv->mtr_profile_arr = NULL;
}
+ if (priv->hws_mpool) {
+ mlx5_ipool_destroy(priv->hws_mpool->idx_pool);
+ mlx5_free(priv->hws_mpool);
+ priv->hws_mpool = NULL;
+ }
if (priv->mtr_bulk.aso) {
mlx5_free(priv->mtr_bulk.aso);
priv->mtr_bulk.aso = NULL;
@@ -62,27 +74,39 @@ mlx5_flow_meter_init(struct rte_eth_dev *dev,
uint32_t i;
struct rte_mtr_error error;
uint32_t flags;
+ uint32_t nb_mtrs = rte_align32pow2(nb_meters);
+ struct mlx5_indexed_pool_config cfg = {
+ .size = sizeof(struct mlx5_aso_mtr),
+ .trunk_size = 1 << 12,
+ .per_core_cache = 1 << 13,
+ .need_lock = 1,
+ .release_mem_en = !!priv->sh->config.reclaim_mode,
+ .malloc = mlx5_malloc,
+ .max_idx = nb_meters,
+ .free = mlx5_free,
+ .type = "mlx5_hw_mtr_mark_action",
+ };
if (!nb_meters || !nb_meter_profiles || !nb_meter_policies) {
ret = ENOTSUP;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter configuration is invalid.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter configuration is invalid.");
goto err;
}
if (!priv->mtr_en || !priv->sh->meter_aso_en) {
ret = ENOTSUP;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter ASO is not supported.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO is not supported.");
goto err;
}
priv->mtr_config.nb_meters = nb_meters;
if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter ASO queue allocation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO queue allocation failed.");
goto err;
}
log_obj_size = rte_log2_u32(nb_meters >> 1);
@@ -92,8 +116,8 @@ mlx5_flow_meter_init(struct rte_eth_dev *dev,
if (!dcs) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter ASO object allocation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ASO object allocation failed.");
goto err;
}
priv->mtr_bulk.devx_obj = dcs;
@@ -101,8 +125,8 @@ mlx5_flow_meter_init(struct rte_eth_dev *dev,
if (reg_id < 0) {
ret = ENOTSUP;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter register is not available.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter register is not available.");
goto err;
}
flags = MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
@@ -114,19 +138,20 @@ mlx5_flow_meter_init(struct rte_eth_dev *dev,
if (!priv->mtr_bulk.action) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter action creation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter action creation failed.");
goto err;
}
priv->mtr_bulk.aso = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_aso_mtr) * nb_meters,
- RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ sizeof(struct mlx5_aso_mtr) *
+ nb_meters,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (!priv->mtr_bulk.aso) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter bulk ASO allocation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter bulk ASO allocation failed.");
goto err;
}
priv->mtr_bulk.size = nb_meters;
@@ -137,32 +162,56 @@ mlx5_flow_meter_init(struct rte_eth_dev *dev,
aso->offset = i;
aso++;
}
+ priv->hws_mpool = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_aso_mtr_pool),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!priv->hws_mpool) {
+ ret = ENOMEM;
+ rte_mtr_error_set(&error, ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter ipool allocation failed.");
+ goto err;
+ }
+ priv->hws_mpool->devx_obj = priv->mtr_bulk.devx_obj;
+ priv->hws_mpool->action = priv->mtr_bulk.action;
+ /*
+ * No need for local cache if Meter number is a small number.
+ * Since flow insertion rate will be very limited in that case.
+ * Here let's set the number to less than default trunk size 4K.
+ */
+ if (nb_mtrs <= cfg.trunk_size) {
+ cfg.per_core_cache = 0;
+ cfg.trunk_size = nb_mtrs;
+ } else if (nb_mtrs <= MLX5_MTR_IPOOL_SIZE_THRESHOLD) {
+ cfg.per_core_cache = MLX5_MTR_IPOOL_CACHE_MIN;
+ }
+ priv->hws_mpool->idx_pool = mlx5_ipool_create(&cfg);
priv->mtr_config.nb_meter_profiles = nb_meter_profiles;
priv->mtr_profile_arr =
mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_flow_meter_profile) *
- nb_meter_profiles,
- RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ sizeof(struct mlx5_flow_meter_profile) *
+ nb_meter_profiles,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (!priv->mtr_profile_arr) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter profile allocation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter profile allocation failed.");
goto err;
}
priv->mtr_config.nb_meter_policies = nb_meter_policies;
priv->mtr_policy_arr =
mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(struct mlx5_flow_meter_policy) *
- nb_meter_policies,
- RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ sizeof(struct mlx5_flow_meter_policy) *
+ nb_meter_policies,
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (!priv->mtr_policy_arr) {
ret = ENOMEM;
rte_mtr_error_set(&error, ENOMEM,
- RTE_MTR_ERROR_TYPE_UNSPECIFIED,
- NULL, "Meter policy allocation failed.");
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Meter policy allocation failed.");
goto err;
}
return 0;