@@ -497,6 +497,12 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
uint32_t i;
struct mlx5_age_info *age_info;
+ /*
+ * In HW steering, aging information structure is initialized later
+ * during configure function.
+ */
+ if (sh->config.dv_flow_en == 2)
+ return;
for (i = 0; i < sh->max_port; i++) {
age_info = &sh->port[i].age_info;
age_info->flags = 0;
@@ -540,8 +546,8 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
hca_attr->flow_counter_bulk_alloc_bitmap);
/* Initialize fallback mode only on the port initializes sh. */
if (sh->refcnt == 1)
- sh->cmng.counter_fallback = fallback;
- else if (fallback != sh->cmng.counter_fallback)
+ sh->sws_cmng.counter_fallback = fallback;
+ else if (fallback != sh->sws_cmng.counter_fallback)
DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
"with others:%d.", PORT_ID(priv), fallback);
#endif
@@ -556,17 +562,38 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
static void
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
{
- int i;
+ int i, j;
+
+ if (sh->config.dv_flow_en < 2) {
+ memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
+ TAILQ_INIT(&sh->sws_cmng.flow_counters);
+ sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
+ sh->sws_cmng.max_id = -1;
+ sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
+ rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
+ for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
+ TAILQ_INIT(&sh->sws_cmng.counters[i]);
+ rte_spinlock_init(&sh->sws_cmng.csl[i]);
+ }
+ } else {
+ struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
+ uint32_t fw_max_nb_cnts = attr->max_flow_counter;
+ uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;
+ uint32_t max_nb_cnts = 0;
+
+ for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {
+ int log_dcs_i = log_dcs - i;
- memset(&sh->cmng, 0, sizeof(sh->cmng));
- TAILQ_INIT(&sh->cmng.flow_counters);
- sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
- sh->cmng.max_id = -1;
- sh->cmng.last_pool_idx = POOL_IDX_INVALID;
- rte_spinlock_init(&sh->cmng.pool_update_sl);
- for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
- TAILQ_INIT(&sh->cmng.counters[i]);
- rte_spinlock_init(&sh->cmng.csl[i]);
+ if (log_dcs_i < 0)
+ break;
+ if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >
+ fw_max_nb_cnts)
+ continue;
+ max_nb_cnts |= RTE_BIT32(log_dcs_i);
+ j++;
+ }
+ sh->hws_max_log_bulk_sz = log_dcs;
+ sh->hws_max_nb_counters = max_nb_cnts;
}
}
@@ -607,13 +634,13 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
rte_pause();
}
- if (sh->cmng.pools) {
+ if (sh->sws_cmng.pools) {
struct mlx5_flow_counter_pool *pool;
- uint16_t n_valid = sh->cmng.n_valid;
- bool fallback = sh->cmng.counter_fallback;
+ uint16_t n_valid = sh->sws_cmng.n_valid;
+ bool fallback = sh->sws_cmng.counter_fallback;
for (i = 0; i < n_valid; ++i) {
- pool = sh->cmng.pools[i];
+ pool = sh->sws_cmng.pools[i];
if (!fallback && pool->min_dcs)
claim_zero(mlx5_devx_cmd_destroy
(pool->min_dcs));
@@ -632,14 +659,14 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
}
mlx5_free(pool);
}
- mlx5_free(sh->cmng.pools);
+ mlx5_free(sh->sws_cmng.pools);
}
- mng = LIST_FIRST(&sh->cmng.mem_mngs);
+ mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
while (mng) {
mlx5_flow_destroy_counter_stat_mem_mng(mng);
- mng = LIST_FIRST(&sh->cmng.mem_mngs);
+ mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
}
- memset(&sh->cmng, 0, sizeof(sh->cmng));
+ memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
}
/**
@@ -639,12 +639,45 @@ struct mlx5_geneve_tlv_option_resource {
/* Current time in seconds. */
#define MLX5_CURR_TIME_SEC (rte_rdtsc() / rte_get_tsc_hz())
+/*
+ * HW steering queue oriented AGE info.
+ * It contains an array of rings, one for each HWS queue.
+ */
+struct mlx5_hws_q_age_info {
+ uint16_t nb_rings; /* Number of aged-out ring lists. */
+ struct rte_ring *aged_lists[]; /* Aged-out lists. */
+};
+
+/*
+ * HW steering AGE info.
+ * It has a ring list containing all aged out flow rules.
+ */
+struct mlx5_hws_age_info {
+ struct rte_ring *aged_list; /* Aged out lists. */
+};
+
/* Aging information for per port. */
struct mlx5_age_info {
uint8_t flags; /* Indicate if is new event or need to be triggered. */
- struct mlx5_counters aged_counters; /* Aged counter list. */
- struct aso_age_list aged_aso; /* Aged ASO actions list. */
- rte_spinlock_t aged_sl; /* Aged flow list lock. */
+ union {
+ /* SW/FW steering AGE info. */
+ struct {
+ struct mlx5_counters aged_counters;
+ /* Aged counter list. */
+ struct aso_age_list aged_aso;
+ /* Aged ASO actions list. */
+ rte_spinlock_t aged_sl; /* Aged flow list lock. */
+ };
+ struct {
+ struct mlx5_indexed_pool *ages_ipool;
+ union {
+ struct mlx5_hws_age_info hw_age;
+ /* HW steering AGE info. */
+ struct mlx5_hws_q_age_info *hw_q_age;
+ /* HW steering queue oriented AGE info. */
+ };
+ };
+ };
};
/* Per port data of shared IB device. */
@@ -1302,6 +1335,9 @@ struct mlx5_dev_ctx_shared {
uint32_t hws_tags:1; /* Check if tags info for HWS initialized. */
uint32_t shared_mark_enabled:1;
/* If mark action is enabled on Rxqs (shared E-Switch domain). */
+ uint32_t hws_max_log_bulk_sz:5;
+ /* Log of minimal HWS counters created hard coded. */
+ uint32_t hws_max_nb_counters; /* Maximal number for HWS counters. */
uint32_t max_port; /* Maximal IB device port index. */
struct mlx5_bond_info bond; /* Bonding information. */
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
@@ -1342,7 +1378,8 @@ struct mlx5_dev_ctx_shared {
struct mlx5_list *dest_array_list;
struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
/* List of destination array actions. */
- struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
+ struct mlx5_flow_counter_mng sws_cmng;
+ /* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
@@ -1670,6 +1707,9 @@ struct mlx5_priv {
LIST_HEAD(flow_hw_at, rte_flow_actions_template) flow_hw_at;
struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
/* HW steering queue polling mechanism job descriptor LIFO. */
+ uint32_t hws_strict_queue:1;
+ /**< Whether all operations strictly happen on the same HWS queue. */
+ uint32_t hws_age_req:1; /**< Whether this port has AGE indexed pool. */
struct mlx5_hw_q *hw_q;
/* HW steering rte flow table list header. */
LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
@@ -1985,6 +2025,9 @@ int mlx5_validate_action_ct(struct rte_eth_dev *dev,
const struct rte_flow_action_conntrack *conntrack,
struct rte_flow_error *error);
+int mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
+ void **contexts, uint32_t nb_contexts,
+ struct rte_flow_error *error);
/* mlx5_mp_os.c */
@@ -43,6 +43,9 @@
#define MLX5_PMD_SOFT_COUNTERS 1
#endif
+/* Maximum number of DCS created per port. */
+#define MLX5_HWS_CNT_DCS_NUM 4
+
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
@@ -987,6 +987,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.isolate = mlx5_flow_isolate,
.query = mlx5_flow_query,
.dev_dump = mlx5_flow_dev_dump,
+ .get_q_aged_flows = mlx5_flow_get_q_aged_flows,
.get_aged_flows = mlx5_flow_get_aged_flows,
.action_handle_create = mlx5_action_handle_create,
.action_handle_destroy = mlx5_action_handle_destroy,
@@ -8942,11 +8943,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
- LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
+ LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws,
mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
next);
- LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
- sh->cmng.mem_mng = mem_mng;
+ LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next);
+ sh->sws_cmng.mem_mng = mem_mng;
return 0;
}
@@ -8965,7 +8966,7 @@ static int
mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
struct mlx5_flow_counter_pool *pool)
{
- struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
/* Resize statistic memory once used out. */
if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
mlx5_flow_create_counter_stat_mem_mng(sh)) {
@@ -8994,14 +8995,14 @@ mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
- sh->cmng.query_thread_on = 0;
+ sh->sws_cmng.query_thread_on = 0;
DRV_LOG(ERR, "Cannot reinitialize query alarm");
} else {
- sh->cmng.query_thread_on = 1;
+ sh->sws_cmng.query_thread_on = 1;
}
}
@@ -9017,12 +9018,12 @@ mlx5_flow_query_alarm(void *arg)
{
struct mlx5_dev_ctx_shared *sh = arg;
int ret;
- uint16_t pool_index = sh->cmng.pool_index;
- struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+ uint16_t pool_index = sh->sws_cmng.pool_index;
+ struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
struct mlx5_flow_counter_pool *pool;
uint16_t n_valid;
- if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
+ if (sh->sws_cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
goto set_alarm;
rte_spinlock_lock(&cmng->pool_update_sl);
pool = cmng->pools[pool_index];
@@ -9035,7 +9036,7 @@ mlx5_flow_query_alarm(void *arg)
/* There is a pool query in progress. */
goto set_alarm;
pool->raw_hw =
- LIST_FIRST(&sh->cmng.free_stat_raws);
+ LIST_FIRST(&sh->sws_cmng.free_stat_raws);
if (!pool->raw_hw)
/* No free counter statistics raw memory. */
goto set_alarm;
@@ -9061,12 +9062,12 @@ mlx5_flow_query_alarm(void *arg)
goto set_alarm;
}
LIST_REMOVE(pool->raw_hw, next);
- sh->cmng.pending_queries++;
+ sh->sws_cmng.pending_queries++;
pool_index++;
if (pool_index >= n_valid)
pool_index = 0;
set_alarm:
- sh->cmng.pool_index = pool_index;
+ sh->sws_cmng.pool_index = pool_index;
mlx5_set_query_alarm(sh);
}
@@ -9149,7 +9150,7 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
struct mlx5_counter_stats_raw *raw_to_free;
uint8_t query_gen = pool->query_gen ^ 1;
- struct mlx5_flow_counter_mng *cmng = &sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
enum mlx5_counter_type cnt_type =
pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
MLX5_COUNTER_TYPE_ORIGIN;
@@ -9172,9 +9173,9 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
rte_spinlock_unlock(&cmng->csl[cnt_type]);
}
}
- LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
+ LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next);
pool->raw_hw = NULL;
- sh->cmng.pending_queries--;
+ sh->sws_cmng.pending_queries--;
}
static int
@@ -9534,7 +9535,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
struct mlx5_list_inconst *l_inconst;
struct mlx5_list_entry *e;
int lcore_index;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
uint32_t max;
void *action;
@@ -9705,18 +9706,58 @@ mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
{
const struct mlx5_flow_driver_ops *fops;
struct rte_flow_attr attr = { .transfer = 0 };
+ enum mlx5_flow_drv_type type = flow_get_drv_type(dev, &attr);
- if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
- fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
- return fops->get_aged_flows(dev, contexts, nb_contexts,
- error);
+ if (type == MLX5_FLOW_TYPE_DV || type == MLX5_FLOW_TYPE_HW) {
+ fops = flow_get_drv_ops(type);
+ return fops->get_aged_flows(dev, contexts, nb_contexts, error);
}
- DRV_LOG(ERR,
- "port %u get aged flows is not supported.",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u get aged flows is not supported.",
+ dev->data->port_id);
return -ENOTSUP;
}
+/**
+ * Get aged-out flows per HWS queue.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] queue_id
+ * Flow queue to query.
+ * @param[in] context
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_countexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * how many contexts get in success, otherwise negative errno value.
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array.
+ */
+int
+mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
+ void **contexts, uint32_t nb_contexts,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { 0 };
+
+ if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) {
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->get_q_aged_flows(dev, queue_id, contexts,
+ nb_contexts, error);
+ }
+ DRV_LOG(ERR, "port %u queue %u get aged flows is not supported.",
+ dev->data->port_id, queue_id);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "get Q aged flows with incorrect steering mode");
+}
+
/* Wrapper for driver action_validate op callback */
static int
flow_drv_action_validate(struct rte_eth_dev *dev,
@@ -293,6 +293,8 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
#define MLX5_FLOW_ACTION_CT (1ull << 41)
+#define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 42)
+#define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 43)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
@@ -1099,6 +1101,22 @@ struct rte_flow {
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
+/*
+ * HWS COUNTER ID's layout
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | T | | D | |
+ * ~ Y | | C | IDX ~
+ * | P | | S | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
+ * Bit 25:24 = DCS index
+ * Bit 23:00 = IDX in this counter belonged DCS bulk.
+ */
+typedef uint32_t cnt_id_t;
+
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
/* HWS flow struct. */
@@ -1112,7 +1130,8 @@ struct rte_flow_hw {
};
struct rte_flow_template_table *table; /* The table flow allcated from. */
struct mlx5dr_rule rule; /* HWS layer data struct. */
- uint32_t cnt_id;
+ uint32_t age_idx;
+ cnt_id_t cnt_id;
uint32_t mtr_id;
} __rte_packed;
@@ -1158,7 +1177,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Shared action index. */
} shared_rss;
struct {
- uint32_t id;
+ cnt_id_t id;
} shared_counter;
struct {
uint32_t id;
@@ -1189,6 +1208,7 @@ struct rte_flow_actions_template {
struct rte_flow_action *actions; /* Cached flow actions. */
struct rte_flow_action *masks; /* Cached action masks.*/
struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
+ uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
uint16_t *actions_off; /* DR action offset for given rte action offset. */
@@ -1245,7 +1265,7 @@ struct mlx5_hw_actions {
struct mlx5_hw_encap_decap_action *encap_decap;
uint16_t encap_decap_pos; /* Encap/Decap action position. */
uint32_t mark:1; /* Indicate the mark action. */
- uint32_t cnt_id; /* Counter id. */
+ cnt_id_t cnt_id; /* Counter id. */
uint32_t mtr_id; /* Meter id. */
/* Translated DR action array from action template. */
struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
@@ -1619,6 +1639,12 @@ typedef int (*mlx5_flow_get_aged_flows_t)
void **context,
uint32_t nb_contexts,
struct rte_flow_error *error);
+typedef int (*mlx5_flow_get_q_aged_flows_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ void **context,
+ uint32_t nb_contexts,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_action_validate_t)
(struct rte_eth_dev *dev,
const struct rte_flow_indir_action_conf *conf,
@@ -1825,6 +1851,7 @@ struct mlx5_flow_driver_ops {
mlx5_flow_counter_free_t counter_free;
mlx5_flow_counter_query_t counter_query;
mlx5_flow_get_aged_flows_t get_aged_flows;
+ mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
mlx5_flow_action_validate_t action_validate;
mlx5_flow_action_create_t action_create;
mlx5_flow_action_destroy_t action_destroy;
@@ -5524,7 +5524,7 @@ flow_dv_validate_action_age(uint64_t action_flags,
const struct rte_flow_action_age *age = action->conf;
if (!priv->sh->cdev->config.devx ||
- (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
+ (priv->sh->sws_cmng.counter_fallback && !priv->sh->aso_age_mng))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -6085,7 +6085,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
struct mlx5_flow_counter_pool **ppool)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
struct mlx5_flow_counter_pool *pool;
/* Decrease to original index and clear shared bit. */
@@ -6179,7 +6179,7 @@ static int
flow_dv_container_resize(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
void *old_pools = cmng->pools;
uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
@@ -6225,7 +6225,7 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
MLX5_ASSERT(pool);
- if (priv->sh->cmng.counter_fallback)
+ if (priv->sh->sws_cmng.counter_fallback)
return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
0, pkts, bytes, 0, NULL, NULL, 0);
rte_spinlock_lock(&pool->sl);
@@ -6262,8 +6262,8 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
- bool fallback = priv->sh->cmng.counter_fallback;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
+ bool fallback = priv->sh->sws_cmng.counter_fallback;
uint32_t size = sizeof(*pool);
size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
@@ -6324,14 +6324,14 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
uint32_t age)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
struct mlx5_flow_counter_pool *pool;
struct mlx5_counters tmp_tq;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
enum mlx5_counter_type cnt_type =
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
- bool fallback = priv->sh->cmng.counter_fallback;
+ bool fallback = priv->sh->sws_cmng.counter_fallback;
uint32_t i;
if (fallback) {
@@ -6395,8 +6395,8 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt_free = NULL;
- bool fallback = priv->sh->cmng.counter_fallback;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ bool fallback = priv->sh->sws_cmng.counter_fallback;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
enum mlx5_counter_type cnt_type =
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
uint32_t cnt_idx;
@@ -6442,7 +6442,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
goto err;
- if (!fallback && !priv->sh->cmng.query_thread_on)
+ if (!fallback && !priv->sh->sws_cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
/*
@@ -6570,7 +6570,7 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
* this case, lock will not be needed as query callback and release
* function both operate with the different list.
*/
- if (!priv->sh->cmng.counter_fallback) {
+ if (!priv->sh->sws_cmng.counter_fallback) {
rte_spinlock_lock(&pool->csl);
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
rte_spinlock_unlock(&pool->csl);
@@ -6578,10 +6578,10 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
cnt->dcs_when_free = cnt->dcs_when_active;
cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
MLX5_COUNTER_TYPE_ORIGIN;
- rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
- TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
+ rte_spinlock_lock(&priv->sh->sws_cmng.csl[cnt_type]);
+ TAILQ_INSERT_TAIL(&priv->sh->sws_cmng.counters[cnt_type],
cnt, next);
- rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
+ rte_spinlock_unlock(&priv->sh->sws_cmng.csl[cnt_type]);
}
}
@@ -477,7 +477,8 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
enum rte_flow_action_type type,
uint16_t action_src,
uint16_t action_dst)
-{ struct mlx5_action_construct_data *act_data;
+{
+ struct mlx5_action_construct_data *act_data;
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
@@ -512,7 +513,8 @@ __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
uint16_t action_src,
uint16_t action_dst,
uint16_t len)
-{ struct mlx5_action_construct_data *act_data;
+{
+ struct mlx5_action_construct_data *act_data;
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
@@ -582,7 +584,8 @@ __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
uint16_t action_dst,
uint32_t idx,
struct mlx5_shared_action_rss *rss)
-{ struct mlx5_action_construct_data *act_data;
+{
+ struct mlx5_action_construct_data *act_data;
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
@@ -621,7 +624,8 @@ __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
uint16_t action_src,
uint16_t action_dst,
cnt_id_t cnt_id)
-{ struct mlx5_action_construct_data *act_data;
+{
+ struct mlx5_action_construct_data *act_data;
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
@@ -717,6 +721,10 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
action_src, action_dst, act_idx))
return -1;
break;
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ /* Not supported, prevent by validate function. */
+ MLX5_ASSERT(0);
+ break;
case MLX5_INDIRECT_ACTION_TYPE_CT:
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
idx, &acts->rule_acts[action_dst]))
@@ -1109,7 +1117,7 @@ flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t start_pos,
cnt_id_t cnt_id;
int ret;
- ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id);
+ ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
if (ret != 0)
return ret;
ret = mlx5_hws_cnt_pool_get_action_offset
@@ -1250,8 +1258,6 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
* Pointer to the rte_eth_dev structure.
* @param[in] cfg
* Pointer to the table configuration.
- * @param[in] item_templates
- * Item template array to be binded to the table.
* @param[in/out] acts
* Pointer to the template HW steering DR actions.
* @param[in] at
@@ -1260,7 +1266,7 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
* Pointer to error structure.
*
* @return
- * Table on success, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno otherwise and rte_errno is set.
*/
static int
__flow_hw_actions_translate(struct rte_eth_dev *dev,
@@ -1289,6 +1295,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t jump_pos;
uint32_t ct_idx;
int err;
+ uint32_t target_grp = 0;
flow_hw_modify_field_init(&mhdr, at);
if (attr->transfer)
@@ -1516,8 +1523,42 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
action_pos))
goto err;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ flow_hw_translate_group(dev, cfg, attr->group,
+ &target_grp, error);
+ if (target_grp == 0) {
+ __flow_hw_action_template_destroy(dev, acts);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Age action on root table is not supported in HW steering mode");
+ }
+ action_pos = at->actions_off[actions - at->actions];
+ if (__flow_hw_act_data_general_append(priv, acts,
+ actions->type,
+ actions - action_start,
+ action_pos))
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- action_pos = at->actions_off[actions - action_start];
+ flow_hw_translate_group(dev, cfg, attr->group,
+ &target_grp, error);
+ if (target_grp == 0) {
+ __flow_hw_action_template_destroy(dev, acts);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Counter action on root table is not supported in HW steering mode");
+ }
+ if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
+ (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
+ /*
+ * When both COUNT and AGE are requested, it is
+ * saved as AGE action which creates also the
+ * counter.
+ */
+ break;
+ action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
@@ -1744,6 +1785,10 @@ flow_hw_shared_action_get(struct rte_eth_dev *dev,
* Pointer to the flow table.
* @param[in] it_idx
* Item template index the action template refer to.
+ * @param[in] action_flags
+ * Actions bit-map detected in this template.
+ * @param[in, out] flow
+ * Pointer to the flow containing the counter.
* @param[in] rule_act
* Pointer to the shared action's destination rule DR action.
*
@@ -1754,7 +1799,8 @@ static __rte_always_inline int
flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_action *action,
struct rte_flow_template_table *table,
- const uint8_t it_idx,
+ const uint8_t it_idx, uint64_t action_flags,
+ struct rte_flow_hw *flow,
struct mlx5dr_rule_action *rule_act)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1762,11 +1808,14 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
struct mlx5_action_construct_data act_data;
struct mlx5_shared_action_rss *shared_rss;
struct mlx5_aso_mtr *aso_mtr;
+ struct mlx5_age_info *age_info;
+ struct mlx5_hws_age_param *param;
uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
uint32_t idx = act_idx &
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
uint64_t item_flags;
+ cnt_id_t age_cnt;
memset(&act_data, 0, sizeof(act_data));
switch (type) {
@@ -1792,6 +1841,44 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
&rule_act->action,
&rule_act->counter.offset))
return -1;
+ flow->cnt_id = act_idx;
+ break;
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ /*
+ * Save the index with the indirect type, to recognize
+ * it in flow destroy.
+ */
+ flow->age_idx = act_idx;
+ if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
+ /*
+ * The mutual update for idirect AGE & COUNT will be
+ * performed later after we have ID for both of them.
+ */
+ break;
+ age_info = GET_PORT_AGE_INFO(priv);
+ param = mlx5_ipool_get(age_info->ages_ipool, idx);
+ if (param == NULL)
+ return -1;
+ if (action_flags & MLX5_FLOW_ACTION_COUNT) {
+ if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
+ ¶m->queue_id, &age_cnt,
+ idx) < 0)
+ return -1;
+ flow->cnt_id = age_cnt;
+ param->nb_cnts++;
+ } else {
+ /*
+ * Get the counter of this indirect AGE or create one
+ * if doesn't exist.
+ */
+ age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
+ if (age_cnt == 0)
+ return -1;
+ }
+ if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
+ age_cnt, &rule_act->action,
+ &rule_act->counter.offset))
+ return -1;
break;
case MLX5_INDIRECT_ACTION_TYPE_CT:
if (flow_hw_ct_compile(dev, queue, idx, rule_act))
@@ -1952,7 +2039,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
const uint8_t it_idx,
const struct rte_flow_action actions[],
struct mlx5dr_rule_action *rule_acts,
- uint32_t queue)
+ uint32_t queue,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
@@ -1965,6 +2053,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
const struct rte_flow_item *enc_item = NULL;
const struct rte_flow_action_ethdev *port_action = NULL;
const struct rte_flow_action_meter *meter = NULL;
+ const struct rte_flow_action_age *age = NULL;
uint8_t *buf = job->encap_data;
struct rte_flow_attr attr = {
.ingress = 1,
@@ -1972,6 +2061,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
uint32_t ft_flag;
size_t encap_len = 0;
int ret;
+ uint32_t age_idx = 0;
struct mlx5_aso_mtr *aso_mtr;
rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
@@ -2024,6 +2114,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
+ at->action_flags, job->flow,
&rule_acts[act_data->action_dst]))
return -1;
break;
@@ -2132,9 +2223,32 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))
return -1;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ age = action->conf;
+ /*
+ * First, create the AGE parameter, then create its
+ * counter later:
+ * Regular counter - in next case.
+ * Indirect counter - update it after the loop.
+ */
+ age_idx = mlx5_hws_age_action_create(priv, queue, 0,
+ age,
+ job->flow->idx,
+ error);
+ if (age_idx == 0)
+ return -rte_errno;
+ job->flow->age_idx = age_idx;
+ if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
+ /*
+ * When AGE uses indirect counter, no need to
+ * create counter but need to update it with the
+ * AGE parameter, will be done after the loop.
+ */
+ break;
+ /* Fall-through. */
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue,
- &cnt_id);
+ &cnt_id, age_idx);
if (ret != 0)
return ret;
ret = mlx5_hws_cnt_pool_get_action_offset
@@ -2191,6 +2305,25 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
break;
}
}
+ if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
+ if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
+ age_idx = job->flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
+ if (mlx5_hws_cnt_age_get(priv->hws_cpool,
+ job->flow->cnt_id) != age_idx)
+ /*
+ * This is first use of this indirect counter
+ * for this indirect AGE, need to increase the
+ * number of counters.
+ */
+ mlx5_hws_age_nb_cnt_increase(priv, age_idx);
+ }
+ /*
+ * Update this indirect counter the indirect/direct AGE in which
+ * using it.
+ */
+ mlx5_hws_cnt_age_set(priv->hws_cpool, job->flow->cnt_id,
+ age_idx);
+ }
if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
rule_acts[hw_acts->encap_decap_pos].reformat.offset =
job->flow->idx - 1;
@@ -2340,8 +2473,10 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
* No need to copy and contrust a new "actions" list based on the
* user's input, in order to save the cost.
*/
- if (flow_hw_actions_construct(dev, job, &table->ats[action_template_index],
- pattern_template_index, actions, rule_acts, queue)) {
+ if (flow_hw_actions_construct(dev, job,
+ &table->ats[action_template_index],
+ pattern_template_index, actions,
+ rule_acts, queue, error)) {
rte_errno = EINVAL;
goto free;
}
@@ -2426,6 +2561,49 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
"fail to create rte flow");
}
+/**
+ * Release the AGE and counter for given flow.
+ *
+ * @param[in] priv
+ * Pointer to the port private data structure.
+ * @param[in] queue
+ * The queue to release the counter.
+ * @param[in, out] flow
+ * Pointer to the flow containing the counter.
+ * @param[out] error
+ * Pointer to error structure.
+ */
+static void
+flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
+ struct rte_flow_hw *flow,
+ struct rte_flow_error *error)
+{
+ if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
+ if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) {
+ /* Remove this AGE parameter from indirect counter. */
+ mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
+ /* Release the AGE parameter. */
+ mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
+ flow->age_idx = 0;
+ }
+ return;
+ }
+ /* Put the counter first to reduce the race risk in BG thread. */
+ mlx5_hws_cnt_pool_put(priv->hws_cpool, &queue, &flow->cnt_id);
+ flow->cnt_id = 0;
+ if (flow->age_idx) {
+ if (mlx5_hws_age_is_indirect(flow->age_idx)) {
+ uint32_t idx = flow->age_idx & MLX5_HWS_AGE_IDX_MASK;
+
+ mlx5_hws_age_nb_cnt_decrease(priv, idx);
+ } else {
+ /* Release the AGE parameter. */
+ mlx5_hws_age_action_destroy(priv, flow->age_idx, error);
+ }
+ flow->age_idx = 0;
+ }
+}
+
/**
* Pull the enqueued flows.
*
@@ -2472,13 +2650,9 @@ flow_hw_pull(struct rte_eth_dev *dev,
flow_hw_jump_release(dev, job->flow->jump);
else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
mlx5_hrxq_obj_release(dev, job->flow->hrxq);
- if (mlx5_hws_cnt_id_valid(job->flow->cnt_id) &&
- mlx5_hws_cnt_is_shared
- (priv->hws_cpool, job->flow->cnt_id) == false) {
- mlx5_hws_cnt_pool_put(priv->hws_cpool, &queue,
- &job->flow->cnt_id);
- job->flow->cnt_id = 0;
- }
+ if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
+ flow_hw_age_count_release(priv, queue,
+ job->flow, error);
if (job->flow->mtr_id) {
mlx5_ipool_free(pool->idx_pool, job->flow->mtr_id);
job->flow->mtr_id = 0;
@@ -3131,100 +3305,315 @@ flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
return 0;
}
-static inline int
-flow_hw_action_meta_copy_insert(const struct rte_flow_action actions[],
- const struct rte_flow_action masks[],
- const struct rte_flow_action *ins_actions,
- const struct rte_flow_action *ins_masks,
- struct rte_flow_action *new_actions,
- struct rte_flow_action *new_masks,
- uint16_t *ins_pos)
+/**
+ * Validate AGE action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the indirect action.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] fixed_cnt
+ * Indicator if this list has a fixed COUNT action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_age(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint64_t action_flags, bool fixed_cnt,
+ struct rte_flow_error *error)
{
- uint16_t idx, total = 0;
- uint16_t end_idx = UINT16_MAX;
- bool act_end = false;
- bool modify_field = false;
- bool rss_or_queue = false;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
- MLX5_ASSERT(actions && masks);
- MLX5_ASSERT(new_actions && new_masks);
- MLX5_ASSERT(ins_actions && ins_masks);
- for (idx = 0; !act_end; idx++) {
- switch (actions[idx].type) {
- case RTE_FLOW_ACTION_TYPE_RSS:
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- /* It is assumed that application provided only single RSS/QUEUE action. */
- MLX5_ASSERT(!rss_or_queue);
- rss_or_queue = true;
- break;
- case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- modify_field = true;
- break;
- case RTE_FLOW_ACTION_TYPE_END:
- end_idx = idx;
- act_end = true;
- break;
- default:
- break;
+ if (!priv->sh->cdev->config.devx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "AGE action not supported");
+ if (age_info->ages_ipool == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "aging pool not initialized");
+ if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
+ (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "duplicate AGE actions set");
+ if (fixed_cnt)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "AGE and fixed COUNT combination is not supported");
+ return 0;
+}
+
+/**
+ * Validate count action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the indirect action.
+ * @param[in] mask
+ * Pointer to the indirect action mask.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ uint64_t action_flags,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_count *count = mask->conf;
+
+ if (!priv->sh->cdev->config.devx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "count action not supported");
+ if (!priv->hws_cpool)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "counters pool not initialized");
+ if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
+ (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "duplicate count actions set");
+ if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, mask,
+ "AGE and COUNT action shared by mask combination is not supported");
+ return 0;
+}
+
+/**
+ * Validate meter_mark action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the indirect action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ RTE_SET_USED(action);
+
+ if (!priv->sh->cdev->config.devx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "meter_mark action not supported");
+ if (!priv->hws_mpool)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "meter_mark pool not initialized");
+ return 0;
+}
+
+/**
+ * Validate indirect action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the indirect action.
+ * @param[in] mask
+ * Pointer to the indirect action mask.
+ * @param[in, out] action_flags
+ * Holds the actions detected until now.
+ * @param[in, out] fixed_cnt
+ * Pointer to indicator if this list has a fixed COUNT action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ uint64_t *action_flags, bool *fixed_cnt,
+ struct rte_flow_error *error)
+{
+ uint32_t type;
+ int ret;
+
+ if (!mask)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Unable to determine indirect action type without a mask specified");
+ type = mask->type;
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ ret = flow_hw_validate_action_meter_mark(dev, mask, error);
+ if (ret < 0)
+ return ret;
+ *action_flags |= MLX5_FLOW_ACTION_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ /* TODO: Validation logic (same as flow_hw_actions_validate) */
+ *action_flags |= MLX5_FLOW_ACTION_RSS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ /* TODO: Validation logic (same as flow_hw_actions_validate) */
+ *action_flags |= MLX5_FLOW_ACTION_CT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (action->conf && mask->conf) {
+ if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
+ (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
+ /*
+ * AGE cannot use indirect counter which is
+ * shared with enother flow rules.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "AGE and fixed COUNT combination is not supported");
+ *fixed_cnt = true;
}
+ ret = flow_hw_validate_action_count(dev, action, mask,
+ *action_flags, error);
+ if (ret < 0)
+ return ret;
+ *action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_hw_validate_action_age(dev, action, *action_flags,
+ *fixed_cnt, error);
+ if (ret < 0)
+ return ret;
+ *action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
+ break;
+ default:
+ DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
+ "Unsupported indirect action type");
}
- if (!rss_or_queue)
- return 0;
- else if (idx >= MLX5_HW_MAX_ACTS)
- return -1; /* No more space. */
- total = idx;
- /*
- * If actions template contains MODIFY_FIELD action, then meta copy action can be inserted
- * at the template's end. Position of MODIFY_HDR action is based on the position of the
- * first MODIFY_FIELD flow action.
- */
- if (modify_field) {
- *ins_pos = end_idx;
- goto insert_meta_copy;
- }
- /*
- * If actions template does not contain MODIFY_FIELD action, then meta copy action must be
- * inserted at aplace conforming with action order defined in steering/mlx5dr_action.c.
+ return 0;
+}
+
+/**
+ * Validate raw_encap action.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the indirect action.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_raw_encap *raw_encap_data = action->conf;
+
+ if (!raw_encap_data || !raw_encap_data->size || !raw_encap_data->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "invalid raw_encap_data");
+ return 0;
+}
+
+static inline uint16_t
+flow_hw_template_expand_modify_field(const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ const struct rte_flow_action *mf_action,
+ const struct rte_flow_action *mf_mask,
+ struct rte_flow_action *new_actions,
+ struct rte_flow_action *new_masks,
+ uint64_t flags, uint32_t act_num)
+{
+ uint32_t i, tail;
+
+ MLX5_ASSERT(actions && masks);
+ MLX5_ASSERT(new_actions && new_masks);
+ MLX5_ASSERT(mf_action && mf_mask);
+ if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
+ /*
+ * Application action template already has Modify Field.
+ * It's location will be used in DR.
+ * Expanded MF action can be added before the END.
+ */
+ i = act_num - 1;
+ goto insert;
+ }
+ /**
+ * Locate the first action positioned BEFORE the new MF.
+ *
+ * Search for a place to insert modify header
+ * from the END action backwards:
+ * 1. END is always present in actions array
+ * 2. END location is always at action[act_num - 1]
+ * 3. END always positioned AFTER modify field location
+ *
+ * Relative actions order is the same for RX, TX and FDB.
+ *
+ * Current actions order (draft-3)
+ * @see action_order_arr[]
*/
- act_end = false;
- for (idx = 0; !act_end; idx++) {
- switch (actions[idx].type) {
- case RTE_FLOW_ACTION_TYPE_COUNT:
- case RTE_FLOW_ACTION_TYPE_METER:
- case RTE_FLOW_ACTION_TYPE_METER_MARK:
- case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ for (i = act_num - 2; (int)i >= 0; i--) {
+ enum rte_flow_action_type type = actions[i].type;
+
+ if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
+ type = masks[i].type;
+ switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ case RTE_FLOW_ACTION_TYPE_JUMP:
case RTE_FLOW_ACTION_TYPE_QUEUE:
- *ins_pos = idx;
- act_end = true;
- break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
- act_end = true;
break;
default:
+ i++; /* new MF inserted AFTER actions[i] */
+ goto insert;
break;
}
}
-insert_meta_copy:
- MLX5_ASSERT(*ins_pos != UINT16_MAX);
- MLX5_ASSERT(*ins_pos < total);
- /* Before the position, no change for the actions. */
- for (idx = 0; idx < *ins_pos; idx++) {
- new_actions[idx] = actions[idx];
- new_masks[idx] = masks[idx];
- }
- /* Insert the new action and mask to the position. */
- new_actions[idx] = *ins_actions;
- new_masks[idx] = *ins_masks;
- /* Remaining content is right shifted by one position. */
- for (; idx < total; idx++) {
- new_actions[idx + 1] = actions[idx];
- new_masks[idx + 1] = masks[idx];
- }
- return 0;
+ i = 0;
+insert:
+ tail = act_num - i; /* num action to move */
+ memcpy(new_actions, actions, sizeof(actions[0]) * i);
+ new_actions[i] = *mf_action;
+ memcpy(new_actions + i + 1, actions + i, sizeof(actions[0]) * tail);
+ memcpy(new_masks, masks, sizeof(masks[0]) * i);
+ new_masks[i] = *mf_mask;
+ memcpy(new_masks + i + 1, masks + i, sizeof(masks[0]) * tail);
+ return i;
}
static int
@@ -3295,13 +3684,17 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
}
static int
-flow_hw_actions_validate(struct rte_eth_dev *dev,
- const struct rte_flow_actions_template_attr *attr,
- const struct rte_flow_action actions[],
- const struct rte_flow_action masks[],
- struct rte_flow_error *error)
+mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ uint64_t *act_flags,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_count *count_mask = NULL;
+ bool fixed_cnt = false;
+ uint64_t action_flags = 0;
uint16_t i;
bool actions_end = false;
int ret;
@@ -3327,46 +3720,70 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- /* TODO: Validation logic */
+ ret = flow_hw_validate_action_indirect(dev, action,
+ mask,
+ &action_flags,
+ &fixed_cnt,
+ error);
+ if (ret < 0)
+ return ret;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_MARK;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_RSS;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
- /* TODO: Validation logic */
+ ret = flow_hw_validate_action_raw_encap(dev, action, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_METER:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_METER;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- /* TODO: Validation logic */
+ ret = flow_hw_validate_action_meter_mark(dev, action,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_METER;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
ret = flow_hw_validate_action_modify_field(action,
@@ -3374,21 +3791,43 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
error);
if (ret < 0)
return ret;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
ret = flow_hw_validate_action_represented_port
(dev, action, mask, error);
if (ret < 0)
return ret;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ if (count_mask && count_mask->id)
+ fixed_cnt = true;
+ ret = flow_hw_validate_action_age(dev, action,
+ action_flags,
+ fixed_cnt, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- /* TODO: Validation logic */
+ ret = flow_hw_validate_action_count(dev, action, mask,
+ action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ count_mask = mask->conf;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
/* TODO: Validation logic */
+ action_flags |= MLX5_FLOW_ACTION_CT;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
ret = flow_hw_validate_action_push_vlan
@@ -3398,6 +3837,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
i += is_of_vlan_pcp_present(action) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
@@ -3409,9 +3849,23 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,
"action not supported in template API");
}
}
+ if (act_flags != NULL)
+ *act_flags = action_flags;
return 0;
}
+static int
+flow_hw_actions_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_actions_template_attr *attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error)
+{
+ return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL,
+ error);
+}
+
+
static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
@@ -3424,7 +3878,6 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_TNL_L2_TO_L2,
[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
- [RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,
[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
@@ -3434,7 +3887,7 @@ static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off,
+ uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
uint32_t type;
@@ -3451,10 +3904,18 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
*curr_off = *curr_off + 1;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
+ /*
+ * Both AGE and COUNT action need counter, the first one fills
+ * the action_types array, and the second only saves the offset.
+ */
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
+ action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
+ *curr_off = *curr_off + 1;
+ }
+ at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
at->actions_off[action_src] = *curr_off;
@@ -3493,6 +3954,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_TNL_L2_TO_L2;
uint16_t reformat_off = UINT16_MAX;
uint16_t mhdr_off = UINT16_MAX;
+ uint16_t cnt_off = UINT16_MAX;
int ret;
for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -3505,9 +3967,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- ret = flow_hw_dr_actions_template_handle_shared(&at->masks[i], i,
- action_types,
- &curr_off, at);
+ ret = flow_hw_dr_actions_template_handle_shared
+ (&at->masks[i],
+ i,
+ action_types,
+ &curr_off,
+ &cnt_off, at);
if (ret)
return NULL;
break;
@@ -3563,6 +4028,19 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ /*
+ * Both AGE and COUNT action need counter, the first
+ * one fills the action_types array, and the second only
+ * saves the offset.
+ */
+ if (cnt_off == UINT16_MAX) {
+ cnt_off = curr_off++;
+ action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
+ }
+ at->actions_off[i] = cnt_off;
+ break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
at->actions_off[i] = curr_off;
@@ -3703,6 +4181,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
unsigned int i;
struct rte_flow_actions_template *at = NULL;
uint16_t pos = UINT16_MAX;
+ uint64_t action_flags = 0;
struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
struct rte_flow_action *ra = (void *)(uintptr_t)actions;
@@ -3745,22 +4224,9 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
.conf = &rx_mreg_mask,
};
- if (flow_hw_actions_validate(dev, attr, actions, masks, error))
+ if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
+ &action_flags, error))
return NULL;
- if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
- priv->sh->config.dv_esw_en) {
- /* Application should make sure only one Q/RSS exist in one rule. */
- if (flow_hw_action_meta_copy_insert(actions, masks, &rx_cpy, &rx_cpy_mask,
- tmp_action, tmp_mask, &pos)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "Failed to concatenate new action/mask");
- return NULL;
- } else if (pos != UINT16_MAX) {
- ra = tmp_action;
- rm = tmp_mask;
- }
- }
for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
switch (ra[i].type) {
/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
@@ -3786,6 +4252,29 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
return NULL;
}
+ if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ priv->sh->config.dv_esw_en &&
+ (action_flags &
+ (RTE_FLOW_ACTION_TYPE_QUEUE | RTE_FLOW_ACTION_TYPE_RSS))) {
+ /* Insert META copy */
+ if (act_num + 1 > MLX5_HW_MAX_ACTS) {
+ rte_flow_error_set(error, E2BIG,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "cannot expand: too many actions");
+ return NULL;
+ }
+ /* Application should make sure only one Q/RSS exist in one rule. */
+ pos = flow_hw_template_expand_modify_field(actions, masks,
+ &rx_cpy,
+ &rx_cpy_mask,
+ tmp_action, tmp_mask,
+ action_flags,
+ act_num);
+ ra = tmp_action;
+ rm = tmp_mask;
+ act_num++;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ }
if (set_vlan_vid_ix != -1) {
/* If temporary action buffer was not used, copy template actions to it */
if (ra == actions && rm == masks) {
@@ -3856,6 +4345,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->tmpl = flow_hw_dr_actions_template_create(at);
if (!at->tmpl)
goto error;
+ at->action_flags = action_flags;
__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
@@ -4199,6 +4689,7 @@ flow_hw_info_get(struct rte_eth_dev *dev,
struct rte_flow_queue_info *queue_info,
struct rte_flow_error *error __rte_unused)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
uint16_t port_id = dev->data->port_id;
struct rte_mtr_capabilities mtr_cap;
int ret;
@@ -4215,6 +4706,8 @@ flow_hw_info_get(struct rte_eth_dev *dev,
port_info->max_nb_meter_profiles = UINT32_MAX;
port_info->max_nb_meter_policies = UINT32_MAX;
}
+ port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
+ port_info->max_nb_aging_objects = port_info->max_nb_counters;
return 0;
}
@@ -5593,8 +6086,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
}
- if (_queue_attr)
- mlx5_free(_queue_attr);
if (port_attr->nb_conn_tracks) {
mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
sizeof(*priv->ct_mng);
@@ -5611,13 +6102,35 @@ flow_hw_configure(struct rte_eth_dev *dev,
}
if (port_attr->nb_counters) {
priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
- nb_queue);
+ nb_queue);
if (priv->hws_cpool == NULL)
goto err;
}
+ if (port_attr->nb_aging_objects) {
+ if (port_attr->nb_counters == 0) {
+ /*
+ * Aging management uses counter. Number counters
+ * requesting should take into account a counter for
+ * each flow rules containing AGE without counter.
+ */
+ DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
+ "without counters requesting.",
+ dev->data->port_id,
+ port_attr->nb_aging_objects);
+ rte_errno = EINVAL;
+ goto err;
+ }
+ ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);
+ if (ret < 0)
+ goto err;
+ }
ret = flow_hw_create_vlan(dev);
if (ret)
goto err;
+ if (_queue_attr)
+ mlx5_free(_queue_attr);
+ if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
+ priv->hws_strict_queue = 1;
return 0;
err:
if (priv->hws_ctpool) {
@@ -5628,6 +6141,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
flow_hw_ct_mng_destroy(dev, priv->ct_mng);
priv->ct_mng = NULL;
}
+ if (priv->hws_age_req)
+ mlx5_hws_age_pool_destroy(priv);
+ if (priv->hws_cpool)
+ mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
flow_hw_free_vport_actions(priv);
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
if (priv->hw_drop[i])
@@ -5701,6 +6218,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
mlx5_ipool_destroy(priv->acts_ipool);
priv->acts_ipool = NULL;
}
+ if (priv->hws_age_req)
+ mlx5_hws_age_pool_destroy(priv);
if (priv->hws_cpool)
mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
if (priv->hws_ctpool) {
@@ -6037,13 +6556,53 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
MLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);
}
+/**
+ * Validate shared action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ * Which queue to be used.
+ * @param[in] attr
+ * Operation attribute.
+ * @param[in] conf
+ * Indirect action configuration.
+ * @param[in] action
+ * rte_flow action detail.
+ * @param[in] user_data
+ * Pointer to the user_data.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ RTE_SET_USED(attr);
+ RTE_SET_USED(queue);
+ RTE_SET_USED(user_data);
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return flow_hw_validate_action_meter_mark(dev, action, error);
+ default:
+ return flow_dv_action_validate(dev, conf, action, error);
+ }
+}
+
/**
* Create shared action.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] queue
- * Which queue to be used..
+ * Which queue to be used.
* @param[in] attr
* Operation attribute.
* @param[in] conf
@@ -6068,16 +6627,32 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct rte_flow_action_handle *handle = NULL;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_age *age;
struct mlx5_aso_mtr *aso_mtr;
cnt_id_t cnt_id;
uint32_t mtr_id;
+ uint32_t age_idx;
- RTE_SET_USED(queue);
RTE_SET_USED(attr);
RTE_SET_USED(user_data);
switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ age = action->conf;
+ age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
+ 0, error);
+ if (age_idx == 0) {
+ rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "AGE are not configured!");
+ } else {
+ age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
+ MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
+ handle =
+ (struct rte_flow_action_handle *)(uintptr_t)age_idx;
+ }
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id))
+ if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
rte_flow_error_set(error, ENODEV,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -6097,8 +6672,13 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);
handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;
break;
- default:
+ case RTE_FLOW_ACTION_TYPE_RSS:
handle = flow_dv_action_create(dev, conf, action, error);
+ break;
+ default:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "action type not supported");
+ return NULL;
}
return handle;
}
@@ -6109,7 +6689,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] queue
- * Which queue to be used..
+ * Which queue to be used.
* @param[in] attr
* Operation attribute.
* @param[in] handle
@@ -6132,7 +6712,6 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
void *user_data,
struct rte_flow_error *error)
{
- RTE_SET_USED(queue);
RTE_SET_USED(attr);
RTE_SET_USED(user_data);
struct mlx5_priv *priv = dev->data->dev_private;
@@ -6147,6 +6726,8 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ return mlx5_hws_age_action_update(priv, idx, update, error);
case MLX5_INDIRECT_ACTION_TYPE_CT:
return flow_hw_conntrack_update(dev, queue, update, act_idx, error);
case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
@@ -6180,11 +6761,15 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "Unable to wait for ASO meter CQE");
- return 0;
- default:
break;
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
+ return flow_dv_action_update(dev, handle, update, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "action type not supported");
}
- return flow_dv_action_update(dev, handle, update, error);
+ return 0;
}
/**
@@ -6193,7 +6778,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] queue
- * Which queue to be used..
+ * Which queue to be used.
* @param[in] attr
* Operation attribute.
* @param[in] handle
@@ -6215,6 +6800,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
{
uint32_t act_idx = (uint32_t)(uintptr_t)handle;
uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
@@ -6225,7 +6811,16 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
RTE_SET_USED(attr);
RTE_SET_USED(user_data);
switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ return mlx5_hws_age_action_destroy(priv, age_idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
+ age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
+ if (age_idx != 0)
+ /*
+ * If this counter belongs to indirect AGE, here is the
+ * time to update the AGE.
+ */
+ mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
return mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
case MLX5_INDIRECT_ACTION_TYPE_CT:
return flow_hw_conntrack_destroy(dev, act_idx, error);
@@ -6250,10 +6845,15 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "Unable to wait for ASO meter CQE");
mlx5_ipool_free(pool->idx_pool, idx);
- return 0;
- default:
+ break;
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
return flow_dv_action_destroy(dev, handle, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "action type not supported");
}
+ return 0;
}
static int
@@ -6263,13 +6863,14 @@ flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hws_cnt *cnt;
struct rte_flow_query_count *qc = data;
- uint32_t iidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);
+ uint32_t iidx;
uint64_t pkts, bytes;
if (!mlx5_hws_cnt_id_valid(counter))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"counter are not available");
+ iidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);
cnt = &priv->hws_cpool->pool[iidx];
__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
qc->hits_set = 1;
@@ -6283,12 +6884,64 @@ flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
return 0;
}
+/**
+ * Query a flow rule AGE action for aging information.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] age_idx
+ * Index of AGE action parameter.
+ * @param[out] data
+ * Data retrieved by the query.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
static int
-flow_hw_query(struct rte_eth_dev *dev,
- struct rte_flow *flow __rte_unused,
- const struct rte_flow_action *actions __rte_unused,
- void *data __rte_unused,
- struct rte_flow_error *error __rte_unused)
+flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
+ struct rte_flow_query_age *resp = data;
+
+ if (!param || !param->timeout)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "age data not available");
+ switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ case HWS_AGE_AGED_OUT_REPORTED:
+ case HWS_AGE_AGED_OUT_NOT_REPORTED:
+ resp->aged = 1;
+ break;
+ case HWS_AGE_CANDIDATE:
+ case HWS_AGE_CANDIDATE_INSIDE_RING:
+ resp->aged = 0;
+ break;
+ case HWS_AGE_FREE:
+ /*
+ * When state is FREE the flow itself should be invalid.
+ * Fall-through.
+ */
+ default:
+ MLX5_ASSERT(0);
+ break;
+ }
+ resp->sec_since_last_hit_valid = !resp->aged;
+ if (resp->sec_since_last_hit_valid)
+ resp->sec_since_last_hit = __atomic_load_n
+ (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ return 0;
+}
+
+static int
+flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *actions, void *data,
+ struct rte_flow_error *error)
{
int ret = -EINVAL;
struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
@@ -6299,7 +6952,11 @@ flow_hw_query(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
- error);
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_hw_query_age(dev, hw_flow->age_idx, data,
+ error);
break;
default:
return rte_flow_error_set(error, ENOTSUP,
@@ -6311,6 +6968,32 @@ flow_hw_query(struct rte_eth_dev *dev,
return ret;
}
+/**
+ * Validate indirect action.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] conf
+ * Shared action configuration.
+ * @param[in] action
+ * Action specification used to create indirect action.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * 0 on success, otherwise negative errno value.
+ */
+static int
+flow_hw_action_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *err)
+{
+ return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
+ conf, action, NULL, err);
+}
+
/**
* Create indirect action.
*
@@ -6334,6 +7017,12 @@ flow_hw_action_create(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
struct rte_flow_error *err)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->hws_strict_queue)
+ DRV_LOG(WARNING,
+ "port %u create indirect action called in strict queue mode.",
+ dev->data->port_id);
return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
NULL, conf, action, NULL, err);
}
@@ -6400,17 +7089,118 @@ flow_hw_action_query(struct rte_eth_dev *dev,
{
uint32_t act_idx = (uint32_t)(uintptr_t)handle;
uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
+ uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_AGE:
+ return flow_hw_query_age(dev, age_idx, data, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_hw_query_counter(dev, act_idx, data, error);
case MLX5_INDIRECT_ACTION_TYPE_CT:
return flow_hw_conntrack_query(dev, act_idx, data, error);
- default:
+ case MLX5_INDIRECT_ACTION_TYPE_RSS:
return flow_dv_action_query(dev, handle, data, error);
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "action type not supported");
}
}
+/**
+ * Get aged-out flows of a given port on the given HWS flow queue.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] queue_id
+ * Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
+ * @param[in, out] contexts
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_contexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array, otherwise negative errno value.
+ */
+static int
+flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
+ void **contexts, uint32_t nb_contexts,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct rte_ring *r;
+ int nb_flows = 0;
+
+ if (nb_contexts && !contexts)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "empty context");
+ if (priv->hws_strict_queue) {
+ if (queue_id >= age_info->hw_q_age->nb_rings)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid queue id");
+ r = age_info->hw_q_age->aged_lists[queue_id];
+ } else {
+ r = age_info->hw_age.aged_list;
+ MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
+ }
+ if (nb_contexts == 0)
+ return rte_ring_count(r);
+ while ((uint32_t)nb_flows < nb_contexts) {
+ uint32_t age_idx;
+
+ if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
+ break;
+ /* get the AGE context if the aged-out index is still valid. */
+ contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
+ if (!contexts[nb_flows])
+ continue;
+ nb_flows++;
+ }
+ return nb_flows;
+}
+
+/**
+ * Get aged-out flows.
+ *
+ * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] contexts
+ * The address of an array of pointers to the aged-out flows contexts.
+ * @param[in] nb_contexts
+ * The length of context array pointers.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Initialized in case of
+ * error only.
+ *
+ * @return
+ * how many contexts get in success, otherwise negative errno value.
+ * if nb_contexts is 0, return the amount of all aged contexts.
+ * if nb_contexts is not 0 , return the amount of aged flows reported
+ * in the context array.
+ */
+static int
+flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
+ uint32_t nb_contexts, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->hws_strict_queue)
+ DRV_LOG(WARNING,
+ "port %u get aged flows called in strict queue mode.",
+ dev->data->port_id);
+ return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -6429,12 +7219,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.async_action_create = flow_hw_action_handle_create,
.async_action_destroy = flow_hw_action_handle_destroy,
.async_action_update = flow_hw_action_handle_update,
- .action_validate = flow_dv_action_validate,
+ .action_validate = flow_hw_action_validate,
.action_create = flow_hw_action_create,
.action_destroy = flow_hw_action_destroy,
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.query = flow_hw_query,
+ .get_aged_flows = flow_hw_get_aged_flows,
+ .get_q_aged_flows = flow_hw_get_q_aged_flows,
};
/**
@@ -122,7 +122,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
struct mlx5_flow_counter_pool **ppool)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
struct mlx5_flow_counter_pool *pool;
idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
@@ -215,7 +215,7 @@ static uint32_t
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+ struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt = NULL;
uint32_t n_valid = cmng->n_valid;
@@ -8,6 +8,7 @@
#include <rte_ring.h>
#include <mlx5_devx_cmds.h>
#include <rte_cycles.h>
+#include <rte_eal_paging.h>
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -26,8 +27,8 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
uint32_t preload;
uint32_t q_num = cpool->cache->q_num;
uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
- cnt_id_t cnt_id, iidx = 0;
- uint32_t qidx;
+ cnt_id_t cnt_id;
+ uint32_t qidx, iidx = 0;
struct rte_ring *qcache = NULL;
/*
@@ -86,6 +87,174 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
} while (reset_cnt_num > 0);
}
+/**
+ * Release AGE parameter.
+ *
+ * @param priv
+ * Pointer to the port private data structure.
+ * @param own_cnt_index
+ * Counter ID to created only for this AGE to release.
+ * Zero means there is no such counter.
+ * @param age_ipool
+ * Pointer to AGE parameter indexed pool.
+ * @param idx
+ * Index of AGE parameter in the indexed pool.
+ */
+static void
+mlx5_hws_age_param_free(struct mlx5_priv *priv, cnt_id_t own_cnt_index,
+ struct mlx5_indexed_pool *age_ipool, uint32_t idx)
+{
+ if (own_cnt_index) {
+ struct mlx5_hws_cnt_pool *cpool = priv->hws_cpool;
+
+ MLX5_ASSERT(mlx5_hws_cnt_is_shared(cpool, own_cnt_index));
+ mlx5_hws_cnt_shared_put(cpool, &own_cnt_index);
+ }
+ mlx5_ipool_free(age_ipool, idx);
+}
+
+/**
+ * Check and callback event for new aged flow in the HWS counter pool.
+ *
+ * @param[in] priv
+ * Pointer to port private object.
+ * @param[in] cpool
+ * Pointer to current counter pool.
+ */
+static void
+mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct flow_counter_stats *stats = cpool->raw_mng->raw;
+ struct mlx5_hws_age_param *param;
+ struct rte_ring *r;
+ const uint64_t curr_time = MLX5_CURR_TIME_SEC;
+ const uint32_t time_delta = curr_time - cpool->time_of_last_age_check;
+ uint32_t nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(cpool);
+ uint16_t expected1 = HWS_AGE_CANDIDATE;
+ uint16_t expected2 = HWS_AGE_CANDIDATE_INSIDE_RING;
+ uint32_t i;
+
+ cpool->time_of_last_age_check = curr_time;
+ for (i = 0; i < nb_alloc_cnts; ++i) {
+ uint32_t age_idx = cpool->pool[i].age_idx;
+ uint64_t hits;
+
+ if (!cpool->pool[i].in_used || age_idx == 0)
+ continue;
+ param = mlx5_ipool_get(age_info->ages_ipool, age_idx);
+ if (unlikely(param == NULL)) {
+ /*
+ * When AGE which used indirect counter it is user
+ * responsibility not using this indirect counter
+ * without this AGE.
+ * If this counter is used after the AGE was freed, the
+ * AGE index is invalid and using it here will cause a
+ * segmentation fault.
+ */
+ DRV_LOG(WARNING,
+ "Counter %u is lost his AGE, it is unused.", i);
+ continue;
+ }
+ if (param->timeout == 0)
+ continue;
+ switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ case HWS_AGE_AGED_OUT_NOT_REPORTED:
+ case HWS_AGE_AGED_OUT_REPORTED:
+ /* Already aged-out, no action is needed. */
+ continue;
+ case HWS_AGE_CANDIDATE:
+ case HWS_AGE_CANDIDATE_INSIDE_RING:
+ /* This AGE candidate to be aged-out, go to checking. */
+ break;
+ case HWS_AGE_FREE:
+ /*
+ * AGE parameter with state "FREE" couldn't be pointed
+ * by any counter since counter is destroyed first.
+ * Fall-through.
+ */
+ default:
+ MLX5_ASSERT(0);
+ continue;
+ }
+ hits = rte_be_to_cpu_64(stats[i].hits);
+ if (param->nb_cnts == 1) {
+ if (hits != param->accumulator_last_hits) {
+ __atomic_store_n(¶m->sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ param->accumulator_last_hits = hits;
+ continue;
+ }
+ } else {
+ param->accumulator_hits += hits;
+ param->accumulator_cnt++;
+ if (param->accumulator_cnt < param->nb_cnts)
+ continue;
+ param->accumulator_cnt = 0;
+ if (param->accumulator_last_hits !=
+ param->accumulator_hits) {
+ __atomic_store_n(¶m->sec_since_last_hit,
+ 0, __ATOMIC_RELAXED);
+ param->accumulator_last_hits =
+ param->accumulator_hits;
+ param->accumulator_hits = 0;
+ continue;
+ }
+ param->accumulator_hits = 0;
+ }
+ if (__atomic_add_fetch(¶m->sec_since_last_hit, time_delta,
+ __ATOMIC_RELAXED) <=
+ __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ continue;
+ /* Prepare the relevant ring for this AGE parameter */
+ if (priv->hws_strict_queue)
+ r = age_info->hw_q_age->aged_lists[param->queue_id];
+ else
+ r = age_info->hw_age.aged_list;
+ /* Changing the state atomically and insert it into the ring. */
+ if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ HWS_AGE_AGED_OUT_NOT_REPORTED,
+ false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
+ int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
+ sizeof(uint32_t),
+ 1, NULL);
+
+ /*
+ * The ring doesn't have enough room for this entry,
+ * it replace back the state for the next second.
+ *
+ * FIXME: if until next sec it get traffic, we are going
+ * to lose this "aged out", will be fixed later
+ * when optimise it to fill ring in bulks.
+ */
+ expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
+ if (ret < 0 &&
+ !__atomic_compare_exchange_n(¶m->state,
+ &expected2, expected1,
+ false,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED) &&
+ expected2 == HWS_AGE_FREE)
+ mlx5_hws_age_param_free(priv,
+ param->own_cnt_index,
+ age_info->ages_ipool,
+ age_idx);
+ /* The event is irrelevant in strict queue mode. */
+ if (!priv->hws_strict_queue)
+ MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
+ } else {
+ __atomic_compare_exchange_n(¶m->state, &expected2,
+ HWS_AGE_AGED_OUT_NOT_REPORTED,
+ false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+ }
+ }
+ /* The event is irrelevant in strict queue mode. */
+ if (!priv->hws_strict_queue)
+ mlx5_age_event_prepare(priv->sh);
+}
+
static void
mlx5_hws_cnt_raw_data_free(struct mlx5_dev_ctx_shared *sh,
struct mlx5_hws_cnt_raw_data_mng *mng)
@@ -104,12 +273,14 @@ mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n)
struct mlx5_hws_cnt_raw_data_mng *mng = NULL;
int ret;
size_t sz = n * sizeof(struct flow_counter_stats);
+ size_t pgsz = rte_mem_page_size();
+ MLX5_ASSERT(pgsz > 0);
mng = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*mng), 0,
SOCKET_ID_ANY);
if (mng == NULL)
goto error;
- mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, 0,
+ mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, pgsz,
SOCKET_ID_ANY);
if (mng->raw == NULL)
goto error;
@@ -146,6 +317,9 @@ mlx5_hws_cnt_svc(void *opaque)
opriv->sh == sh &&
opriv->hws_cpool != NULL) {
__mlx5_hws_cnt_svc(sh, opriv->hws_cpool);
+ if (opriv->hws_age_req)
+ mlx5_hws_aging_check(opriv,
+ opriv->hws_cpool);
}
}
query_cycle = rte_rdtsc() - start_cycle;
@@ -158,8 +332,9 @@ mlx5_hws_cnt_svc(void *opaque)
}
struct mlx5_hws_cnt_pool *
-mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
- const struct mlx5_hws_cache_param *ccfg)
+mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ const struct mlx5_hws_cnt_pool_cfg *pcfg,
+ const struct mlx5_hws_cache_param *ccfg)
{
char mz_name[RTE_MEMZONE_NAMESIZE];
struct mlx5_hws_cnt_pool *cntp;
@@ -185,16 +360,26 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
cntp->cache->preload_sz = ccfg->preload_sz;
cntp->cache->threshold = ccfg->threshold;
cntp->cache->q_num = ccfg->q_num;
+ if (pcfg->request_num > sh->hws_max_nb_counters) {
+ DRV_LOG(ERR, "Counter number %u "
+ "is greater than the maximum supported (%u).",
+ pcfg->request_num, sh->hws_max_nb_counters);
+ goto error;
+ }
cnt_num = pcfg->request_num * (100 + pcfg->alloc_factor) / 100;
if (cnt_num > UINT32_MAX) {
DRV_LOG(ERR, "counter number %"PRIu64" is out of 32bit range",
cnt_num);
goto error;
}
+ /*
+ * When counter request number is supported, but the factor takes it
+ * out of size, the factor is reduced.
+ */
+ cnt_num = RTE_MIN((uint32_t)cnt_num, sh->hws_max_nb_counters);
cntp->pool = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
- sizeof(struct mlx5_hws_cnt) *
- pcfg->request_num * (100 + pcfg->alloc_factor) / 100,
- 0, SOCKET_ID_ANY);
+ sizeof(struct mlx5_hws_cnt) * cnt_num,
+ 0, SOCKET_ID_ANY);
if (cntp->pool == NULL)
goto error;
snprintf(mz_name, sizeof(mz_name), "%s_F_RING", pcfg->name);
@@ -231,6 +416,8 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
if (cntp->cache->qcache[qidx] == NULL)
goto error;
}
+ /* Initialize the time for aging-out calculation. */
+ cntp->time_of_last_age_check = MLX5_CURR_TIME_SEC;
return cntp;
error:
mlx5_hws_cnt_pool_deinit(cntp);
@@ -297,19 +484,17 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
struct mlx5_hws_cnt_pool *cpool)
{
struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
- uint32_t max_log_bulk_sz = 0;
+ uint32_t max_log_bulk_sz = sh->hws_max_log_bulk_sz;
uint32_t log_bulk_sz;
- uint32_t idx, alloced = 0;
+ uint32_t idx, alloc_candidate, alloced = 0;
unsigned int cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
struct mlx5_devx_counter_attr attr = {0};
struct mlx5_devx_obj *dcs;
if (hca_attr->flow_counter_bulk_log_max_alloc == 0) {
- DRV_LOG(ERR,
- "Fw doesn't support bulk log max alloc");
+ DRV_LOG(ERR, "Fw doesn't support bulk log max alloc");
return -1;
}
- max_log_bulk_sz = 23; /* hard code to 8M (1 << 23). */
cnt_num = RTE_ALIGN_CEIL(cnt_num, 4); /* minimal 4 counter in bulk. */
log_bulk_sz = RTE_MIN(max_log_bulk_sz, rte_log2_u32(cnt_num));
attr.pd = sh->cdev->pdn;
@@ -327,18 +512,23 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
cpool->dcs_mng.dcs[0].iidx = 0;
alloced = cpool->dcs_mng.dcs[0].batch_sz;
if (cnt_num > cpool->dcs_mng.dcs[0].batch_sz) {
- for (; idx < MLX5_HWS_CNT_DCS_NUM; idx++) {
+ while (idx < MLX5_HWS_CNT_DCS_NUM) {
attr.flow_counter_bulk_log_size = --max_log_bulk_sz;
+ alloc_candidate = RTE_BIT32(max_log_bulk_sz);
+ if (alloced + alloc_candidate > sh->hws_max_nb_counters)
+ continue;
dcs = mlx5_devx_cmd_flow_counter_alloc_general
(sh->cdev->ctx, &attr);
if (dcs == NULL)
goto error;
cpool->dcs_mng.dcs[idx].obj = dcs;
- cpool->dcs_mng.dcs[idx].batch_sz =
- (1 << max_log_bulk_sz);
+ cpool->dcs_mng.dcs[idx].batch_sz = alloc_candidate;
cpool->dcs_mng.dcs[idx].iidx = alloced;
alloced += cpool->dcs_mng.dcs[idx].batch_sz;
cpool->dcs_mng.batch_total++;
+ if (alloced >= cnt_num)
+ break;
+ idx++;
}
}
return 0;
@@ -445,7 +635,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
dev->data->port_id);
pcfg.name = mp_name;
pcfg.request_num = pattr->nb_counters;
- cpool = mlx5_hws_cnt_pool_init(&pcfg, &cparam);
+ cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam);
if (cpool == NULL)
goto error;
ret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool);
@@ -525,4 +715,484 @@ mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh)
sh->cnt_svc = NULL;
}
+/**
+ * Destroy AGE action.
+ *
+ * @param priv
+ * Pointer to the port private data structure.
+ * @param idx
+ * Index of AGE parameter.
+ * @param error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
+ struct rte_flow_error *error)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
+
+ if (param == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "invalid AGE parameter index");
+ switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
+ __ATOMIC_RELAXED)) {
+ case HWS_AGE_CANDIDATE:
+ case HWS_AGE_AGED_OUT_REPORTED:
+ mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
+ break;
+ case HWS_AGE_AGED_OUT_NOT_REPORTED:
+ case HWS_AGE_CANDIDATE_INSIDE_RING:
+ /*
+ * In both cases AGE is inside the ring. Change the state here
+ * and destroy it later when it is taken out of ring.
+ */
+ break;
+ case HWS_AGE_FREE:
+ /*
+ * If index is valid and state is FREE, it says this AGE has
+ * been freed for the user but not for the PMD since it is
+ * inside the ring.
+ */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "this AGE has already been released");
+ default:
+ MLX5_ASSERT(0);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Create AGE action parameter.
+ *
+ * @param[in] priv
+ * Pointer to the port private data structure.
+ * @param[in] queue_id
+ * Which HWS queue to be used.
+ * @param[in] shared
+ * Whether it indirect AGE action.
+ * @param[in] flow_idx
+ * Flow index from indexed pool.
+ * For indirect AGE action it doesn't affect.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Index to AGE action parameter on success, 0 otherwise.
+ */
+uint32_t
+mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
+ bool shared, const struct rte_flow_action_age *age,
+ uint32_t flow_idx, struct rte_flow_error *error)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param;
+ uint32_t age_idx;
+
+ param = mlx5_ipool_malloc(ipool, &age_idx);
+ if (param == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate AGE parameter");
+ return 0;
+ }
+ MLX5_ASSERT(__atomic_load_n(¶m->state,
+ __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ if (shared) {
+ param->nb_cnts = 0;
+ param->accumulator_hits = 0;
+ param->accumulator_cnt = 0;
+ flow_idx = age_idx;
+ } else {
+ param->nb_cnts = 1;
+ }
+ param->context = age->context ? age->context :
+ (void *)(uintptr_t)flow_idx;
+ param->timeout = age->timeout;
+ param->queue_id = queue_id;
+ param->accumulator_last_hits = 0;
+ param->own_cnt_index = 0;
+ param->sec_since_last_hit = 0;
+ param->state = HWS_AGE_CANDIDATE;
+ return age_idx;
+}
+
+/**
+ * Update indirect AGE action parameter.
+ *
+ * @param[in] priv
+ * Pointer to the port private data structure.
+ * @param[in] idx
+ * Index of AGE parameter.
+ * @param[in] update
+ * Update value.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
+ const void *update, struct rte_flow_error *error)
+{
+ const struct rte_flow_update_age *update_ade = update;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
+ bool sec_since_last_hit_reset = false;
+ bool state_update = false;
+
+ if (param == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "invalid AGE parameter index");
+ if (update_ade->timeout_valid) {
+ uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ update_ade->timeout,
+ __ATOMIC_RELAXED);
+
+ if (old_timeout == 0)
+ sec_since_last_hit_reset = true;
+ else if (old_timeout < update_ade->timeout ||
+ update_ade->timeout == 0)
+ /*
+ * When timeout is increased, aged-out flows might be
+ * active again and state should be updated accordingly.
+ * When new timeout is 0, we update the state for not
+ * reporting aged-out stopped.
+ */
+ state_update = true;
+ }
+ if (update_ade->touch) {
+ sec_since_last_hit_reset = true;
+ state_update = true;
+ }
+ if (sec_since_last_hit_reset)
+ __atomic_store_n(¶m->sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
+ if (state_update) {
+ uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
+
+ /*
+ * Change states of aged-out flows to active:
+ * - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
+ * - AGED_OUT_REPORTED -> CANDIDATE
+ */
+ if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ HWS_AGE_CANDIDATE_INSIDE_RING,
+ false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED) &&
+ expected == HWS_AGE_AGED_OUT_REPORTED)
+ __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ }
+ return 0;
+}
+
+/**
+ * Get the AGE context if the aged-out index is still valid.
+ *
+ * @param priv
+ * Pointer to the port private data structure.
+ * @param idx
+ * Index of AGE parameter.
+ *
+ * @return
+ * AGE context if the index is still aged-out, NULL otherwise.
+ */
+void *
+mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
+ uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
+
+ MLX5_ASSERT(param != NULL);
+ if (__atomic_compare_exchange_n(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ return param->context;
+ switch (expected) {
+ case HWS_AGE_FREE:
+ /*
+ * This AGE couldn't have been destroyed since it was inside
+ * the ring. Its state has updated, and now it is actually
+ * destroyed.
+ */
+ mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
+ break;
+ case HWS_AGE_CANDIDATE_INSIDE_RING:
+ __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
+ __ATOMIC_RELAXED);
+ break;
+ case HWS_AGE_CANDIDATE:
+ /*
+ * Only BG thread pushes to ring and it never pushes this state.
+ * When AGE inside the ring becomes candidate, it has a special
+ * state called HWS_AGE_CANDIDATE_INSIDE_RING.
+ * Fall-through.
+ */
+ case HWS_AGE_AGED_OUT_REPORTED:
+ /*
+ * Only this thread (doing query) may write this state, and it
+ * happens only after the query thread takes it out of the ring.
+ * Fall-through.
+ */
+ case HWS_AGE_AGED_OUT_NOT_REPORTED:
+ /*
+ * In this case the compare return true and function return
+ * the context immediately.
+ * Fall-through.
+ */
+ default:
+ MLX5_ASSERT(0);
+ break;
+ }
+ return NULL;
+}
+
+#ifdef RTE_ARCH_64
+#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX UINT32_MAX
+#else
+#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX RTE_BIT32(8)
+#endif
+
+/**
+ * Get the size of aged out ring list for each queue.
+ *
+ * The size is one percent of nb_counters divided by nb_queues.
+ * The ring size must be power of 2, so it align up to power of 2.
+ * In 32 bit systems, the size is limited by 256.
+ *
+ * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is on.
+ *
+ * @param nb_counters
+ * Final number of allocated counter in the pool.
+ * @param nb_queues
+ * Number of HWS queues in this port.
+ *
+ * @return
+ * Size of aged out ring per queue.
+ */
+static __rte_always_inline uint32_t
+mlx5_hws_aged_out_q_ring_size_get(uint32_t nb_counters, uint32_t nb_queues)
+{
+ uint32_t size = rte_align32pow2((nb_counters / 100) / nb_queues);
+ uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
+
+ return RTE_MIN(size, max_size);
+}
+
+/**
+ * Get the size of the aged out ring list.
+ *
+ * The size is one percent of nb_counters.
+ * The ring size must be power of 2, so it align up to power of 2.
+ * In 32 bit systems, the size is limited by 256.
+ *
+ * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is off.
+ *
+ * @param nb_counters
+ * Final number of allocated counter in the pool.
+ *
+ * @return
+ * Size of the aged out ring list.
+ */
+static __rte_always_inline uint32_t
+mlx5_hws_aged_out_ring_size_get(uint32_t nb_counters)
+{
+ uint32_t size = rte_align32pow2(nb_counters / 100);
+ uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
+
+ return RTE_MIN(size, max_size);
+}
+
+/**
+ * Initialize the shared aging list information per port.
+ *
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @param nb_queues
+ * Number of HWS queues.
+ * @param strict_queue
+ * Indicator whether is strict_queue mode.
+ * @param ring_size
+ * Size of aged-out ring for creation.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_hws_age_info_init(struct rte_eth_dev *dev, uint16_t nb_queues,
+ bool strict_queue, uint32_t ring_size)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ uint32_t flags = RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_ring *r = NULL;
+ uint32_t qidx;
+
+ age_info->flags = 0;
+ if (strict_queue) {
+ size_t size = sizeof(*age_info->hw_q_age) +
+ sizeof(struct rte_ring *) * nb_queues;
+
+ age_info->hw_q_age = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
+ size, 0, SOCKET_ID_ANY);
+ if (age_info->hw_q_age == NULL)
+ return -ENOMEM;
+ for (qidx = 0; qidx < nb_queues; ++qidx) {
+ snprintf(mz_name, sizeof(mz_name),
+ "port_%u_queue_%u_aged_out_ring",
+ dev->data->port_id, qidx);
+ r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY,
+ flags);
+ if (r == NULL) {
+ DRV_LOG(ERR, "\"%s\" creation failed: %s",
+ mz_name, rte_strerror(rte_errno));
+ goto error;
+ }
+ age_info->hw_q_age->aged_lists[qidx] = r;
+ DRV_LOG(DEBUG,
+ "\"%s\" is successfully created (size=%u).",
+ mz_name, ring_size);
+ }
+ age_info->hw_q_age->nb_rings = nb_queues;
+ } else {
+ snprintf(mz_name, sizeof(mz_name), "port_%u_aged_out_ring",
+ dev->data->port_id);
+ r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY, flags);
+ if (r == NULL) {
+ DRV_LOG(ERR, "\"%s\" creation failed: %s", mz_name,
+ rte_strerror(rte_errno));
+ return -rte_errno;
+ }
+ age_info->hw_age.aged_list = r;
+ DRV_LOG(DEBUG, "\"%s\" is successfully created (size=%u).",
+ mz_name, ring_size);
+ /* In non "strict_queue" mode, initialize the event. */
+ MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
+ }
+ return 0;
+error:
+ MLX5_ASSERT(strict_queue);
+ while (qidx--)
+ rte_ring_free(age_info->hw_q_age->aged_lists[qidx]);
+ rte_free(age_info->hw_q_age);
+ return -1;
+}
+
+/**
+ * Destroy the shared aging list information per port.
+ *
+ * @param priv
+ * Pointer to port private object.
+ */
+static void
+mlx5_hws_age_info_destroy(struct mlx5_priv *priv)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ uint16_t nb_queues = age_info->hw_q_age->nb_rings;
+
+ if (priv->hws_strict_queue) {
+ uint32_t qidx;
+
+ for (qidx = 0; qidx < nb_queues; ++qidx)
+ rte_ring_free(age_info->hw_q_age->aged_lists[qidx]);
+ rte_free(age_info->hw_q_age);
+ } else {
+ rte_ring_free(age_info->hw_age.aged_list);
+ }
+}
+
+/**
+ * Initialize the aging mechanism per port.
+ *
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @param attr
+ * Port configuration attributes.
+ * @param nb_queues
+ * Number of HWS queues.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *attr,
+ uint16_t nb_queues)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool_config cfg = {
+ .size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct mlx5_hws_age_param)),
+ .need_lock = 1,
+ .release_mem_en = !!priv->sh->config.reclaim_mode,
+ .malloc = mlx5_malloc,
+ .free = mlx5_free,
+ .type = "mlx5_hws_age_pool",
+ };
+ bool strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
+ uint32_t nb_alloc_cnts;
+ uint32_t rsize;
+ uint32_t nb_ages_updated;
+ int ret;
+
+ MLX5_ASSERT(priv->hws_cpool);
+ nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
+ if (strict_queue) {
+ rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,
+ nb_queues);
+ nb_ages_updated = rsize * nb_queues + attr->nb_aging_objects;
+ } else {
+ rsize = mlx5_hws_aged_out_ring_size_get(nb_alloc_cnts);
+ nb_ages_updated = rsize + attr->nb_aging_objects;
+ }
+ ret = mlx5_hws_age_info_init(dev, nb_queues, strict_queue, rsize);
+ if (ret < 0)
+ return ret;
+ cfg.trunk_size = rte_align32pow2(nb_ages_updated);
+ age_info->ages_ipool = mlx5_ipool_create(&cfg);
+ if (age_info->ages_ipool == NULL) {
+ mlx5_hws_age_info_destroy(priv);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->hws_age_req = 1;
+ return 0;
+}
+
+/**
+ * Cleanup all aging resources per port.
+ *
+ * @param priv
+ * Pointer to port private object.
+ */
+void
+mlx5_hws_age_pool_destroy(struct mlx5_priv *priv)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+
+ MLX5_ASSERT(priv->hws_age_req);
+ mlx5_ipool_destroy(age_info->ages_ipool);
+ age_info->ages_ipool = NULL;
+ mlx5_hws_age_info_destroy(priv);
+ priv->hws_age_req = 0;
+}
+
#endif
@@ -10,26 +10,26 @@
#include "mlx5_flow.h"
/*
- * COUNTER ID's layout
+ * HWS COUNTER ID's layout
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | T | | D | |
- * ~ Y | | C | IDX ~
- * | P | | S | |
+ * | T | | D | |
+ * ~ Y | | C | IDX ~
+ * | P | | S | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Bit 31:30 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
+ * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
* Bit 25:24 = DCS index
* Bit 23:00 = IDX in this counter belonged DCS bulk.
*/
-typedef uint32_t cnt_id_t;
-#define MLX5_HWS_CNT_DCS_NUM 4
#define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
#define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
#define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
+#define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
+
struct mlx5_hws_cnt_dcs {
void *dr_action;
uint32_t batch_sz;
@@ -44,12 +44,22 @@ struct mlx5_hws_cnt_dcs_mng {
struct mlx5_hws_cnt {
struct flow_counter_stats reset;
+ bool in_used; /* Indicator whether this counter in used or in pool. */
union {
- uint32_t share: 1;
- /*
- * share will be set to 1 when this counter is used as indirect
- * action. Only meaningful when user own this counter.
- */
+ struct {
+ uint32_t share:1;
+ /*
+ * share will be set to 1 when this counter is used as
+ * indirect action.
+ */
+ uint32_t age_idx:24;
+ /*
+ * When this counter uses for aging, it save the index
+ * of AGE parameter. For pure counter (without aging)
+ * this index is zero.
+ */
+ };
+ /* This struct is only meaningful when user own this counter. */
uint32_t query_gen_when_free;
/*
* When PMD own this counter (user put back counter to PMD
@@ -96,8 +106,48 @@ struct mlx5_hws_cnt_pool {
struct rte_ring *free_list;
struct rte_ring *wait_reset_list;
struct mlx5_hws_cnt_pool_caches *cache;
+ uint64_t time_of_last_age_check;
} __rte_cache_aligned;
+/* HWS AGE status. */
+enum {
+ HWS_AGE_FREE, /* Initialized state. */
+ HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
+ HWS_AGE_CANDIDATE_INSIDE_RING,
+ /*
+ * AGE assigned to flows but it still in ring. It was aged-out but the
+ * timeout was changed, so it in ring but stiil candidate.
+ */
+ HWS_AGE_AGED_OUT_REPORTED,
+ /*
+ * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
+ */
+ HWS_AGE_AGED_OUT_NOT_REPORTED,
+ /*
+ * Aged-out, inside the aged-out ring.
+ * wait for rte_flow_get_q_aged_flows and destroy.
+ */
+};
+
+/* HWS counter age parameter. */
+struct mlx5_hws_age_param {
+ uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
+ uint32_t sec_since_last_hit;
+ /* Time in seconds since last hit (atomically accessed). */
+ uint16_t state; /* AGE state (atomically accessed). */
+ uint64_t accumulator_last_hits;
+ /* Last total value of hits for comparing. */
+ uint64_t accumulator_hits;
+ /* Accumulator for hits coming from several counters. */
+ uint32_t accumulator_cnt;
+ /* Number counters which already updated the accumulator in this sec. */
+ uint32_t nb_cnts; /* Number counters used by this AGE. */
+ uint32_t queue_id; /* Queue id of the counter. */
+ cnt_id_t own_cnt_index;
+ /* Counter action created specifically for this AGE action. */
+ void *context; /* Flow AGE context. */
+} __rte_packed __rte_cache_aligned;
+
/**
* Translate counter id into internal index (start from 0), which can be used
* as index of raw/cnt pool.
@@ -107,7 +157,7 @@ struct mlx5_hws_cnt_pool {
* @return
* Internal index
*/
-static __rte_always_inline cnt_id_t
+static __rte_always_inline uint32_t
mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
{
uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
@@ -139,7 +189,7 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
* Counter id
*/
static __rte_always_inline cnt_id_t
-mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, cnt_id_t iidx)
+mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
{
struct mlx5_hws_cnt_dcs_mng *dcs_mng = &cpool->dcs_mng;
uint32_t idx;
@@ -344,9 +394,10 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
struct rte_ring_zc_data zcdr = {0};
struct rte_ring *qcache = NULL;
unsigned int wb_num = 0; /* cache write-back number. */
- cnt_id_t iidx;
+ uint32_t iidx;
iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
+ cpool->pool[iidx].in_used = false;
cpool->pool[iidx].query_gen_when_free =
__atomic_load_n(&cpool->query_gen, __ATOMIC_RELAXED);
if (likely(queue != NULL))
@@ -388,20 +439,23 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
* A pointer to HWS queue. If null, it means fetch from common pool.
* @param cnt_id
* A pointer to a cnt_id_t * pointer (counter id) that will be filled.
+ * @param age_idx
+ * Index of AGE parameter using this counter, zero means there is no such AGE.
+ *
* @return
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
* - -EAGAIN: counter is not ready; try again.
*/
static __rte_always_inline int
-mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
- uint32_t *queue, cnt_id_t *cnt_id)
+mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
+ cnt_id_t *cnt_id, uint32_t age_idx)
{
unsigned int ret;
struct rte_ring_zc_data zcdc = {0};
struct rte_ring *qcache = NULL;
- uint32_t query_gen = 0;
- cnt_id_t iidx, tmp_cid = 0;
+ uint32_t iidx, query_gen = 0;
+ cnt_id_t tmp_cid = 0;
if (likely(queue != NULL))
qcache = cpool->cache->qcache[*queue];
@@ -422,6 +476,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
__hws_cnt_query_raw(cpool, *cnt_id,
&cpool->pool[iidx].reset.hits,
&cpool->pool[iidx].reset.bytes);
+ cpool->pool[iidx].in_used = true;
+ cpool->pool[iidx].age_idx = age_idx;
return 0;
}
ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
@@ -455,6 +511,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
&cpool->pool[iidx].reset.bytes);
rte_ring_dequeue_zc_elem_finish(qcache, 1);
cpool->pool[iidx].share = 0;
+ cpool->pool[iidx].in_used = true;
+ cpool->pool[iidx].age_idx = age_idx;
return 0;
}
@@ -478,16 +536,16 @@ mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
}
static __rte_always_inline int
-mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
+mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
+ uint32_t age_idx)
{
int ret;
uint32_t iidx;
- ret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id);
+ ret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id, age_idx);
if (ret != 0)
return ret;
iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
- MLX5_ASSERT(cpool->pool[iidx].share == 0);
cpool->pool[iidx].share = 1;
return 0;
}
@@ -513,10 +571,73 @@ mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
return cpool->pool[iidx].share ? true : false;
}
+static __rte_always_inline void
+mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
+ uint32_t age_idx)
+{
+ uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
+
+ MLX5_ASSERT(cpool->pool[iidx].share);
+ cpool->pool[iidx].age_idx = age_idx;
+}
+
+static __rte_always_inline uint32_t
+mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
+{
+ uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
+
+ MLX5_ASSERT(cpool->pool[iidx].share);
+ return cpool->pool[iidx].age_idx;
+}
+
+static __rte_always_inline cnt_id_t
+mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
+ uint32_t age_idx)
+{
+ if (!param->own_cnt_index) {
+ /* Create indirect counter one for internal usage. */
+ if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
+ ¶m->own_cnt_index, age_idx) < 0)
+ return 0;
+ param->nb_cnts++;
+ }
+ return param->own_cnt_index;
+}
+
+static __rte_always_inline void
+mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
+
+ MLX5_ASSERT(param != NULL);
+ param->nb_cnts++;
+}
+
+static __rte_always_inline void
+mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
+{
+ struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
+ struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
+ struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
+
+ if (param != NULL)
+ param->nb_cnts--;
+}
+
+static __rte_always_inline bool
+mlx5_hws_age_is_indirect(uint32_t age_idx)
+{
+ return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
+ MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
+}
+
/* init HWS counter pool. */
struct mlx5_hws_cnt_pool *
-mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
- const struct mlx5_hws_cache_param *ccfg);
+mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
+ const struct mlx5_hws_cnt_pool_cfg *pcfg,
+ const struct mlx5_hws_cache_param *ccfg);
void
mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool *cntp);
@@ -555,4 +676,28 @@ mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh);
void
mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
+int
+mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
+ struct rte_flow_error *error);
+
+uint32_t
+mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
+ bool shared, const struct rte_flow_action_age *age,
+ uint32_t flow_idx, struct rte_flow_error *error);
+
+int
+mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
+ const void *update, struct rte_flow_error *error);
+
+void *
+mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
+
+int
+mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
+ const struct rte_flow_port_attr *attr,
+ uint16_t nb_queues);
+
+void
+mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
+
#endif /* _MLX5_HWS_CNT_H_ */