[6/8] net/mlx5: support flow hit action for aging
diff mbox series

Message ID 1604008681-414157-7-git-send-email-matan@nvidia.com
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers show
Series
  • net/mlx5: support flow hit steering action
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Matan Azrad Oct. 29, 2020, 9:57 p.m. UTC
From: Dekel Peled <dekelp@nvidia.com>

A new ASO (Advanced Steering Operation) feature was added in the last
mlx5 adapters to support flow hit detection.

Using this new steering action, the driver can detect flow traffic hit
and to reset this indication any time.

Add support for flow aging action in rte_flow using this new feature.

The counter aging mode will be taken only when the ASO feature is not
supported.

Signed-off-by: Dekel Peled <dekelp@nvidia.com>
Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h   |   9 +-
 drivers/net/mlx5/linux/mlx5_os.c |  11 +
 drivers/net/mlx5/meson.build     |   1 +
 drivers/net/mlx5/mlx5.c          |  70 ++++
 drivers/net/mlx5/mlx5.h          |  98 +++++-
 drivers/net/mlx5/mlx5_flow.h     |   1 +
 drivers/net/mlx5/mlx5_flow_age.c | 675 +++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow_dv.c  | 338 ++++++++++++++++++--
 8 files changed, 1171 insertions(+), 32 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_age.c

Patch
diff mbox series

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index cd50d13..bf5b6d9 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2360,12 +2360,17 @@  enum mlx5_access_aso_op_mod {
 	ASO_OP_MOD_FLOW_HIT = 0x4,
 };
 
+#define ASO_CSEG_DATA_MASK_MODE_OFFSET	30
+
 enum mlx5_aso_data_mask_mode {
 	BITWISE_64BIT = 0x0,
 	BYTEWISE_64BYTE = 0x1,
 	CALCULATED_64BYTE = 0x2,
 };
 
+#define ASO_CSEG_COND_0_OPER_OFFSET	20
+#define ASO_CSEG_COND_1_OPER_OFFSET	16
+
 enum mlx5_aso_pre_cond_op {
 	ASO_OP_ALWAYS_FALSE = 0x0,
 	ASO_OP_ALWAYS_TRUE = 0x1,
@@ -2379,6 +2384,8 @@  enum mlx5_aso_pre_cond_op {
 	ASO_OP_CYCLIC_LESSER = 0x9,
 };
 
+#define ASO_CSEG_COND_OPER_OFFSET	6
+
 enum mlx5_aso_op {
 	ASO_OPER_LOGICAL_AND = 0x0,
 	ASO_OPER_LOGICAL_OR = 0x1,
@@ -2387,7 +2394,7 @@  enum mlx5_aso_op {
 /* ASO WQE CTRL segment. */
 struct mlx5_aso_cseg {
 	uint32_t va_h;
-	uint32_t va_l_ro;
+	uint32_t va_l_r;
 	uint32_t lkey;
 	uint32_t operand_masks;
 	uint32_t condition_0_data;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index d4f2194..79dc65d 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1129,6 +1129,17 @@ 
 			err = -err;
 			goto error;
 		}
+#ifdef HAVE_MLX5DV_DR_ACTION_FLOW_HIT
+		if (config->hca_attr.flow_hit_aso) {
+			sh->flow_hit_aso_en = 1;
+			err = mlx5_flow_aso_age_mng_init(sh);
+			if (err) {
+				err = -err;
+				goto error;
+			}
+			DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
+		}
+#endif /* HAVE_MLX5DV_DR_ACTION_FLOW_HIT */
 		/* Check relax ordering support. */
 		if (config->hca_attr.relaxed_ordering_write &&
 		    config->hca_attr.relaxed_ordering_read  &&
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index 9a97bb9..e7495a7 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -16,6 +16,7 @@  sources = files(
 	'mlx5_flow_meter.c',
 	'mlx5_flow_dv.c',
 	'mlx5_flow_verbs.c',
+        'mlx5_flow_age.c',
 	'mlx5_mac.c',
 	'mlx5_mr.c',
 	'mlx5_rss.c',
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 862bd40..a5c50ff 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -345,6 +345,72 @@  static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096
 
 /**
+ * Initialize the ASO aging management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object to free
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
+{
+	int err;
+
+	if (sh->aso_age_mng)
+		return 0;
+	sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
+				      RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (!sh->aso_age_mng) {
+		DRV_LOG(ERR, "aso_age_mng allocation was failed.");
+		rte_errno = ENOMEM;
+		return -ENOMEM;
+	}
+	err = mlx5_aso_queue_init(sh);
+	if (err) {
+		mlx5_free(sh->aso_age_mng);
+		return -1;
+	}
+	rte_spinlock_init(&sh->aso_age_mng->resize_sl);
+	rte_spinlock_init(&sh->aso_age_mng->free_sl);
+	LIST_INIT(&sh->aso_age_mng->free);
+	return 0;
+}
+
+/**
+ * Close and release all the resources of the ASO aging management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object to free.
+ */
+static void
+mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
+{
+	int i, j;
+
+	mlx5_aso_queue_stop(sh);
+	mlx5_aso_queue_uninit(sh);
+	if (sh->aso_age_mng->pools) {
+		struct mlx5_aso_age_pool *pool;
+
+		for (i = 0; i < sh->aso_age_mng->next; ++i) {
+			pool = sh->aso_age_mng->pools[i];
+			claim_zero(mlx5_devx_cmd_destroy
+						(pool->flow_hit_aso_obj));
+			for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
+				if (pool->actions[j].dr_action)
+					claim_zero
+						(mlx5_glue->destroy_flow_action
+						  (pool->actions[j].dr_action));
+			mlx5_free(pool);
+		}
+		mlx5_free(sh->aso_age_mng->pools);
+	}
+	memset(&sh->aso_age_mng, 0, sizeof(sh->aso_age_mng));
+}
+
+/**
  * Initialize the shared aging list information per port.
  *
  * @param[in] sh
@@ -984,6 +1050,10 @@  struct mlx5_dev_ctx_shared *
 	 *  Only primary process handles async device events.
 	 **/
 	mlx5_flow_counters_mng_close(sh);
+	if (sh->aso_age_mng) {
+		mlx5_flow_aso_age_mng_close(sh);
+		sh->aso_age_mng = NULL;
+	}
 	mlx5_flow_ipool_destroy(sh);
 	mlx5_os_dev_shared_handler_uninstall(sh);
 	if (sh->cnt_id_tbl) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b080426..cf6975d 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -471,6 +471,84 @@  struct mlx5_flow_counter_mng {
 	LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
 };
 
+/* ASO structures. */
+#define MLX5_ASO_QUEUE_LOG_DESC 10
+
+struct mlx5_aso_cq {
+	uint16_t log_desc_n;
+	uint32_t cq_ci:24;
+	struct mlx5_devx_obj *cq;
+	struct mlx5dv_devx_umem *umem_obj;
+	union {
+		volatile void *umem_buf;
+		volatile struct mlx5_cqe *cqes;
+	};
+	volatile uint32_t *db_rec;
+	uint64_t errors;
+};
+
+struct mlx5_aso_devx_mr {
+	void *buf;
+	uint64_t length;
+	struct mlx5dv_devx_umem *umem;
+	struct mlx5_devx_obj *mkey;
+	bool is_indirect;
+};
+
+struct mlx5_aso_sq_elem {
+	struct mlx5_aso_age_pool *pool;
+	uint16_t burst_size;
+};
+
+struct mlx5_aso_sq {
+	uint16_t log_desc_n;
+	struct mlx5_aso_cq cq;
+	struct mlx5_devx_obj *sq;
+	struct mlx5dv_devx_umem *wqe_umem; /* SQ buffer umem. */
+	union {
+		volatile void *umem_buf;
+		volatile struct mlx5_aso_wqe *wqes;
+	};
+	volatile uint32_t *db_rec;
+	struct mlx5dv_devx_uar *uar_obj;
+	volatile uint64_t *uar_addr;
+	struct mlx5_aso_devx_mr mr;
+	uint16_t pi;
+	uint16_t ci;
+	uint32_t sqn;
+	struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC];
+	uint16_t next; /* Pool index of the next pool to query. */
+};
+
+struct mlx5_aso_age_action {
+	LIST_ENTRY(mlx5_aso_age_action) next;
+	void *dr_action;
+	/* Following fields relevant only when action is active. */
+	uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
+	struct mlx5_age_param age_params;
+};
+
+#define MLX5_ASO_AGE_ACTIONS_PER_POOL 512
+
+struct mlx5_aso_age_pool {
+	struct mlx5_devx_obj *flow_hit_aso_obj;
+	uint16_t index; /* Pool index in pools array. */
+	uint64_t time_of_last_age_check; /* In seconds. */
+	struct mlx5_aso_age_action actions[MLX5_ASO_AGE_ACTIONS_PER_POOL];
+};
+
+LIST_HEAD(aso_age_list, mlx5_aso_age_action);
+
+struct mlx5_aso_age_mng {
+	struct mlx5_aso_age_pool **pools;
+	uint16_t n; /* Total number of pools. */
+	uint16_t next; /* Number of pools in use, index of next free pool. */
+	rte_spinlock_t resize_sl; /* Lock for resize objects. */
+	rte_spinlock_t free_sl; /* Lock for free list access. */
+	struct aso_age_list free; /* Free age actions list - ready to use. */
+	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
+};
+
 #define MLX5_AGE_EVENT_NEW		1
 #define MLX5_AGE_TRIGGER		2
 #define MLX5_AGE_SET(age_info, BIT) \
@@ -485,8 +563,11 @@  struct mlx5_flow_counter_mng {
 /* Aging information for per port. */
 struct mlx5_age_info {
 	uint8_t flags; /* Indicate if is new event or need to be triggered. */
-	struct mlx5_counters aged_counters; /* Aged flow counter list. */
-	rte_spinlock_t aged_sl; /* Aged flow counter list lock. */
+	union {
+		struct mlx5_counters aged_counters; /* Aged counter list. */
+		struct aso_age_list aged_aso; /* Aged ASO actions list. */
+	};
+	rte_spinlock_t aged_sl; /* Aged flow list lock. */
 };
 
 /* Per port data of shared IB device. */
@@ -623,6 +704,7 @@  struct mlx5_dev_ctx_shared {
 	LIST_ENTRY(mlx5_dev_ctx_shared) next;
 	uint32_t refcnt;
 	uint32_t devx:1; /* Opened with DV. */
+	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
 	uint32_t eqn; /* Event Queue number. */
 	uint32_t max_port; /* Maximal IB device port index. */
 	void *ctx; /* Verbs/DV/DevX context. */
@@ -678,6 +760,8 @@  struct mlx5_dev_ctx_shared {
 	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
 	/* Flex parser profiles information. */
 	void *devx_rx_uar; /* DevX UAR for Rx. */
+	struct mlx5_aso_age_mng *aso_age_mng;
+	/* Management data for aging mechanism using ASO Flow Hit. */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
@@ -811,6 +895,8 @@  enum mlx5_txq_modify_type {
 	MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
 };
 
+
+
 /* HW objects operations structure. */
 struct mlx5_obj_ops {
 	int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
@@ -961,6 +1047,7 @@  int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
 			 struct rte_eth_hairpin_cap *cap);
 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
+int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
 
 /* mlx5_ethdev.c */
 
@@ -1219,4 +1306,11 @@  int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev,
 
 eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
 
+/* mlx5_flow_age.c */
+
+int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh);
+void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh);
+
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8ef2a85..1b4a9d1 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1035,6 +1035,7 @@  struct rte_flow {
 	/**< Index to metadata register copy table resource. */
 	uint32_t counter; /**< Holds flow counter. */
 	uint32_t tunnel_id;  /**< Tunnel id */
+	uint32_t age; /**< Holds ASO age bit index. */
 } __rte_packed;
 
 /*
diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c
new file mode 100644
index 0000000..0d47d8e
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_age.c
@@ -0,0 +1,675 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 Mellanox Technologies, Ltd
+ */
+#include <mlx5_prm.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+
+#include <mlx5_malloc.h>
+
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+/**
+ * Destroy Completion Queue used for ASO access.
+ *
+ * @param[in] cq
+ *   ASO CQ to destroy.
+ */
+static void
+mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
+{
+	if (cq->cq)
+		claim_zero(mlx5_devx_cmd_destroy(cq->cq));
+	if (cq->umem_obj)
+		claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
+	if (cq->umem_buf)
+		mlx5_free((void *)(uintptr_t)cq->umem_buf);
+	memset(cq, 0, sizeof(*cq));
+}
+
+/**
+ * Create Completion Queue used for ASO access.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in/out] cq
+ *   Pointer to CQ to create.
+ * @param[in] log_desc_n
+ *   Log of number of descriptors in queue.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] uar_page_id
+ *   UAR page ID to use.
+ * @param[in] eqn
+ *   EQ number.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
+		   int socket, int uar_page_id, uint32_t eqn)
+{
+	struct mlx5_devx_cq_attr attr = { 0 };
+	size_t pgsize = sysconf(_SC_PAGESIZE);
+	uint32_t umem_size;
+	uint16_t cq_size = 1 << log_desc_n;
+
+	cq->log_desc_n = log_desc_n;
+	umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
+	cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+				   4096, socket);
+	if (!cq->umem_buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
+		rte_errno = ENOMEM;
+		return -ENOMEM;
+	}
+	cq->umem_obj = mlx5_glue->devx_umem_reg(ctx,
+						(void *)(uintptr_t)cq->umem_buf,
+						umem_size,
+						IBV_ACCESS_LOCAL_WRITE);
+	if (!cq->umem_obj) {
+		DRV_LOG(ERR, "Failed to register umem for aso CQ.");
+		goto error;
+	}
+	attr.q_umem_valid = 1;
+	attr.db_umem_valid = 1;
+	attr.use_first_only = 0;
+	attr.overrun_ignore = 0;
+	attr.uar_page_id = uar_page_id;
+	attr.q_umem_id = cq->umem_obj->umem_id;
+	attr.q_umem_offset = 0;
+	attr.db_umem_id = attr.q_umem_id;
+	attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
+	attr.eqn = eqn;
+	attr.log_cq_size = log_desc_n;
+	attr.log_page_size = rte_log2_u32(pgsize);
+	cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr);
+	if (!cq->cq)
+		goto error;
+	cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
+	cq->cq_ci = 0;
+	memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
+	return 0;
+error:
+	mlx5_aso_cq_destroy(cq);
+	return -1;
+}
+
+/**
+ * Free MR resources.
+ *
+ * @param[in] mr
+ *   MR to free.
+ */
+static void
+mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
+{
+	claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
+	if (!mr->is_indirect && mr->umem)
+		claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
+	mlx5_free(mr->buf);
+	memset(mr, 0, sizeof(*mr));
+}
+
+/**
+ * Register Memory Region.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in] length
+ *   Size of MR buffer.
+ * @param[in/out] mr
+ *   Pointer to MR to create.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] pdn
+ *   Protection Domain number to use.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
+		     int socket, int pdn)
+{
+	struct mlx5_devx_mkey_attr mkey_attr;
+
+	mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
+			      socket);
+	if (!mr->buf) {
+		DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
+		return -1;
+	}
+	mr->umem = mlx5_glue->devx_umem_reg(ctx, mr->buf, length,
+						 IBV_ACCESS_LOCAL_WRITE);
+	if (!mr->umem) {
+		DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
+		goto error;
+	}
+	mkey_attr.addr = (uintptr_t)mr->buf;
+	mkey_attr.size = length;
+	mkey_attr.umem_id = mr->umem->umem_id;
+	mkey_attr.pd = pdn;
+	mkey_attr.pg_access = 1;
+	mkey_attr.klm_array = NULL;
+	mkey_attr.klm_num = 0;
+	mkey_attr.relaxed_ordering = 0;
+	mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
+	if (!mr->mkey) {
+		DRV_LOG(ERR, "Failed to create direct Mkey.");
+		goto error;
+	}
+	mr->length = length;
+	mr->is_indirect = false;
+	return 0;
+error:
+	if (mr->umem)
+		claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
+	mlx5_free(mr->buf);
+	return -1;
+}
+
+/**
+ * Destroy Send Queue used for ASO access.
+ *
+ * @param[in] sq
+ *   ASO SQ to destroy.
+ */
+static void
+mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
+{
+	if (sq->wqe_umem) {
+		mlx5_glue->devx_umem_dereg(sq->wqe_umem);
+		sq->wqe_umem = NULL;
+	}
+	if (sq->umem_buf) {
+		mlx5_free((void *)(uintptr_t)sq->umem_buf);
+		sq->umem_buf = NULL;
+	}
+	if (sq->sq) {
+		mlx5_devx_cmd_destroy(sq->sq);
+		sq->sq = NULL;
+	}
+	if (sq->cq.cq)
+		mlx5_aso_cq_destroy(&sq->cq);
+	if (sq->uar_obj)
+		mlx5_glue->devx_free_uar(sq->uar_obj);
+	mlx5_aso_devx_dereg_mr(&sq->mr);
+	memset(sq, 0, sizeof(*sq));
+}
+
+/**
+ * Initialize Send Queue used for ASO access.
+ *
+ * @param[in] sq
+ *   ASO SQ to initialize.
+ */
+static void
+mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
+{
+	volatile struct mlx5_aso_wqe *restrict wqe;
+	int i;
+	int size = 1 << sq->log_desc_n;
+	uint64_t addr;
+
+	/* All the next fields state should stay constant. */
+	for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) {
+		wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
+							  (sizeof(*wqe) >> 4));
+		wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
+		addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
+					    MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
+		wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
+		wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
+		wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
+			(0u |
+			 (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
+			 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
+			 (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
+			 (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
+		wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
+	}
+}
+
+/**
+ * Create Send Queue used for ASO access.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in/out] sq
+ *   Pointer to SQ to create.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] pdn
+ *   Protection Domain number to use.
+ * @param[in] eqn
+ *   EQ number.
+ * @param[in] log_desc_n
+ *   Log of number of descriptors in queue.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
+		   uint32_t pdn, uint32_t eqn,  uint16_t log_desc_n)
+{
+	struct mlx5_devx_create_sq_attr attr = { 0 };
+	struct mlx5_devx_modify_sq_attr modify_attr = { 0 };
+	size_t pgsize = sysconf(_SC_PAGESIZE);
+	struct mlx5_devx_wq_attr *wq_attr = &attr.wq_attr;
+	uint32_t sq_desc_n = 1 << log_desc_n;
+	uint32_t wq_size = sizeof(struct mlx5_aso_wqe) * sq_desc_n;
+	int ret;
+
+	if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+				 sq_desc_n, &sq->mr, socket, pdn))
+		return -1;
+	sq->uar_obj = mlx5_glue->devx_alloc_uar(ctx, 0);
+	if (!sq->uar_obj)
+		goto error;
+	if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
+			       sq->uar_obj->page_id, eqn))
+		goto error;
+	sq->log_desc_n = log_desc_n;
+	sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
+				   sizeof(*sq->db_rec) * 2, 4096, socket);
+	if (!sq->umem_buf) {
+		DRV_LOG(ERR, "Can't allocate wqe buffer.");
+		return -ENOMEM;
+	}
+	sq->wqe_umem = mlx5_glue->devx_umem_reg(ctx,
+						(void *)(uintptr_t)sq->umem_buf,
+						wq_size +
+						sizeof(*sq->db_rec) * 2,
+						IBV_ACCESS_LOCAL_WRITE);
+	if (!sq->wqe_umem) {
+		DRV_LOG(ERR, "Failed to register umem for SQ.");
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	attr.state = MLX5_SQC_STATE_RST;
+	attr.tis_lst_sz = 0;
+	attr.tis_num = 0;
+	attr.user_index = 0xFFFF;
+	attr.cqn = sq->cq.cq->id;
+	wq_attr->uar_page = sq->uar_obj->page_id;
+	wq_attr->pd = pdn;
+	wq_attr->wq_type = MLX5_WQ_TYPE_CYCLIC;
+	wq_attr->log_wq_pg_sz = rte_log2_u32(pgsize);
+	wq_attr->wq_umem_id = sq->wqe_umem->umem_id;
+	wq_attr->wq_umem_offset = 0;
+	wq_attr->wq_umem_valid = 1;
+	wq_attr->log_wq_stride = 6;
+	wq_attr->log_wq_sz = rte_log2_u32(wq_size) - 6;
+	wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
+	wq_attr->dbr_addr = wq_size;
+	wq_attr->dbr_umem_valid = 1;
+	sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr);
+	if (!sq->sq) {
+		DRV_LOG(ERR, "Can't create sq object.");
+		rte_errno  = ENOMEM;
+		goto error;
+	}
+	modify_attr.state = MLX5_SQC_STATE_RDY;
+	ret = mlx5_devx_cmd_modify_sq(sq->sq, &modify_attr);
+	if (ret) {
+		DRV_LOG(ERR, "Can't change sq state to ready.");
+		rte_errno  = ENOMEM;
+		goto error;
+	}
+	sq->ci = 0;
+	sq->pi = 0;
+	sq->sqn = sq->sq->id;
+	sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
+	sq->uar_addr = (volatile uint64_t *)((uint8_t *)sq->uar_obj->base_addr +
+									 0x800);
+	mlx5_aso_init_sq(sq);
+	return 0;
+error:
+	mlx5_aso_destroy_sq(sq);
+	return -1;
+}
+
+/**
+ * API to create and initialize Send Queue used for ASO access.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
+{
+	return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, sh->pdn,
+				  sh->eqn, MLX5_ASO_QUEUE_LOG_DESC);
+}
+
+/**
+ * API to destroy Send Queue used for ASO access.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ */
+void
+mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
+{
+	mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
+}
+
+/**
+ * Write a burst of WQEs to ASO SQ.
+ *
+ * @param[in] mng
+ *   ASO management data, contains the SQ.
+ * @param[in] n
+ *   Index of the last valid pool.
+ *
+ * @return
+ *   Number of WQEs in burst.
+ */
+static uint16_t
+mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
+{
+	volatile struct mlx5_aso_wqe *wqe;
+	struct mlx5_aso_sq *sq = &mng->aso_sq;
+	struct mlx5_aso_age_pool *pool;
+	uint16_t size = 1 << sq->log_desc_n;
+	uint16_t mask = size - 1;
+	uint16_t max;
+	uint16_t start_pi = sq->pi;
+
+	max = RTE_MIN(size - (uint16_t)(sq->pi - sq->ci), n - sq->next);
+	if (unlikely(!max))
+		return 0;
+	sq->elts[start_pi & mask].burst_size = max;
+	do {
+		wqe = &sq->wqes[sq->pi & mask];
+		rte_prefetch0(&sq->wqes[(sq->pi + 1) & mask]);
+		/* Fill next WQE. */
+		rte_spinlock_lock(&mng->resize_sl);
+		pool = mng->pools[sq->next];
+		rte_spinlock_unlock(&mng->resize_sl);
+		sq->elts[sq->pi & mask].pool = pool;
+		wqe->general_cseg.misc =
+				rte_cpu_to_be_32(((struct mlx5_devx_obj *)
+						 (pool->flow_hit_aso_obj))->id);
+		wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
+							 MLX5_COMP_MODE_OFFSET);
+		wqe->general_cseg.opcode = rte_cpu_to_be_32
+						(MLX5_OPCODE_ACCESS_ASO |
+						 ASO_OP_MOD_FLOW_HIT << 24 |
+						 sq->pi << 9);
+		sq->pi++;
+		sq->next++;
+		max--;
+	} while (max);
+	wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+							 MLX5_COMP_MODE_OFFSET);
+	rte_io_wmb();
+	sq->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi << 1);
+	rte_wmb();
+	*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
+	rte_wmb();
+	return sq->elts[start_pi & mask].burst_size;
+}
+
+/**
+ * Debug utility function. Dump contents of error CQE and WQE.
+ *
+ * @param[in] cqe
+ *   Error CQE to dump.
+ * @param[in] wqe
+ *   Error WQE to dump.
+ */
+static void
+mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
+{
+	int i;
+
+	DRV_LOG(ERR, "Error cqe:");
+	for (i = 0; i < 16; i += 4)
+		DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
+			cqe[i + 2], cqe[i + 3]);
+	DRV_LOG(ERR, "\nError wqe:");
+	for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
+		DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
+			wqe[i + 2], wqe[i + 3]);
+}
+
+/**
+ * Handle case of error CQE.
+ *
+ * @param[in] sq
+ *   ASO SQ to use.
+ */
+static void
+mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
+{
+	struct mlx5_aso_cq *cq = &sq->cq;
+	uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
+	volatile struct mlx5_err_cqe *cqe =
+				(volatile struct mlx5_err_cqe *)&cq->cqes[idx];
+
+	cq->errors++;
+	idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
+	mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
+				 (volatile uint32_t *)&sq->wqes[idx]);
+}
+
+/**
+ * Update ASO objects upon completion.
+ *
+ * @param[in] sh
+ *   Shared device context.
+ * @param[in] n
+ *   Number of completed ASO objects.
+ */
+static void
+mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
+{
+	struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
+	struct mlx5_aso_sq *sq = &mng->aso_sq;
+	struct mlx5_age_info *age_info;
+	const uint16_t size = 1 << sq->log_desc_n;
+	const uint16_t mask = size - 1;
+	const uint64_t curr = MLX5_CURR_TIME_SEC;
+	uint16_t expected = AGE_CANDIDATE;
+	uint16_t i;
+
+	for (i = 0; i < n; ++i) {
+		uint16_t idx = (sq->ci + i) & mask;
+		struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
+		uint64_t diff = curr - pool->time_of_last_age_check;
+		uint64_t *addr = sq->mr.buf;
+		int j;
+
+		addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL  / 64;
+		pool->time_of_last_age_check = curr;
+		for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
+			struct mlx5_aso_age_action *act = &pool->actions[j];
+			struct mlx5_age_param *ap = &act->age_params;
+			uint8_t byte;
+			uint8_t offset;
+			uint8_t *u8addr;
+			uint8_t hit;
+
+			if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+					    AGE_CANDIDATE)
+				continue;
+			byte = 63 - (j / 8);
+			offset = j % 8;
+			u8addr = (uint8_t *)addr;
+			hit = (u8addr[byte] >> offset) & 0x1;
+			if (hit) {
+				__atomic_store_n(&ap->sec_since_last_hit, 0,
+						 __ATOMIC_RELAXED);
+			} else {
+				struct mlx5_priv *priv;
+
+				__atomic_fetch_add(&ap->sec_since_last_hit,
+						   diff, __ATOMIC_RELAXED);
+				/* If timeout passed add to aged-out list. */
+				if (ap->sec_since_last_hit <= ap->timeout)
+					continue;
+				priv =
+				rte_eth_devices[ap->port_id].data->dev_private;
+				age_info = GET_PORT_AGE_INFO(priv);
+				rte_spinlock_lock(&age_info->aged_sl);
+				if (__atomic_compare_exchange_n(&ap->state,
+								&expected,
+								AGE_TMOUT,
+								false,
+							       __ATOMIC_RELAXED,
+							    __ATOMIC_RELAXED)) {
+					LIST_INSERT_HEAD(&age_info->aged_aso,
+							 act, next);
+					MLX5_AGE_SET(age_info,
+						     MLX5_AGE_EVENT_NEW);
+				}
+				rte_spinlock_unlock(&age_info->aged_sl);
+			}
+		}
+	}
+	for (i = 0; i < sh->max_port; i++) {
+		age_info = &sh->port[i].age_info;
+		if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
+			continue;
+		if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
+			rte_eth_dev_callback_process
+				(&rte_eth_devices[sh->port[i].devx_ih_port_id],
+				RTE_ETH_EVENT_FLOW_AGED, NULL);
+		age_info->flags = 0;
+	}
+}
+
+/**
+ * Handle completions from WQEs sent to ASO SQ.
+ *
+ * @param[in] sh
+ *   Shared device context.
+ *
+ * @return
+ *   Number of CQEs handled.
+ */
+static uint16_t
+mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
+	struct mlx5_aso_sq *sq = &mng->aso_sq;
+	struct mlx5_aso_cq *cq = &sq->cq;
+	volatile struct mlx5_cqe *restrict cqe;
+	const unsigned int cq_size = 1 << cq->log_desc_n;
+	const unsigned int mask = cq_size - 1;
+	uint32_t idx;
+	uint32_t next_idx = cq->cq_ci & mask;
+	const uint16_t max = (uint16_t)(sq->pi - sq->ci);
+	uint16_t i = 0;
+	int ret;
+	if (unlikely(!max))
+		return 0;
+	do {
+		idx = next_idx;
+		next_idx = (cq->cq_ci + 1) & mask;
+		rte_prefetch0(&cq->cqes[next_idx]);
+		cqe = &cq->cqes[idx];
+		ret = check_cqe(cqe, cq_size, cq->cq_ci);
+		/*
+		 * Be sure owner read is done before any other cookie field or
+		 * opaque field.
+		 */
+		rte_io_rmb();
+		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+			if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
+				break;
+			mlx5_aso_cqe_err_handle(sq);
+		} else {
+			i += sq->elts[(sq->ci + i) & mask].burst_size;
+		}
+		cq->cq_ci++;
+	} while (1);
+	if (likely(i)) {
+		mlx5_aso_age_action_update(sh, i);
+		sq->ci += i;
+		rte_io_wmb();
+		cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+	}
+	return i;
+}
+
+/**
+ * Periodically read CQEs and send WQEs to ASO SQ.
+ *
+ * @param[in] arg
+ *   Shared device context containing the ASO SQ.
+ */
+static void
+mlx5_flow_aso_alarm(void *arg)
+{
+	struct mlx5_dev_ctx_shared *sh = arg;
+	struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
+	uint32_t us = 100u;
+	uint16_t n;
+
+	rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
+	n = sh->aso_age_mng->next;
+	rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
+	mlx5_aso_completion_handle(sh);
+	if (sq->next == n) {
+		/* End of loop: wait 1 second. */
+		us = 1000000u;
+		sq->next = 0;
+	}
+	mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
+	if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
+		DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
+}
+
+/**
+ * API to start ASO access using ASO SQ.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
+{
+	if (rte_eal_alarm_set(1000000, mlx5_flow_aso_alarm, sh)) {
+		DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
+		return -rte_errno;
+	}
+	return 0;
+}
+
+/**
+ * API to stop ASO access using ASO SQ.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
+{
+	int retries = 1024;
+
+	if (!sh->aso_age_mng->aso_sq.sq)
+		return -EINVAL;
+	rte_errno = 0;
+	while (--retries) {
+		rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
+		if (rte_errno != EINPROGRESS)
+			break;
+		rte_pause();
+	}
+	return -rte_errno;
+}
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 01b6e7c..dcc397d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4127,7 +4127,8 @@  struct mlx5_cache_entry *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct rte_flow_action_age *age = action->conf;
 
-	if (!priv->config.devx || priv->sh->cmng.counter_fallback)
+	if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
+	    !priv->sh->aso_age_mng))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -8400,6 +8401,7 @@  struct mlx5_hlist_entry *
 	__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
 	return counter;
 }
+
 /**
  * Add Tx queue matcher
  *
@@ -9239,6 +9241,253 @@  struct mlx5_cache_entry *
 }
 
 /**
+ * Get ASO age action by index.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] age_idx
+ *   Index to the ASO age action.
+ *
+ * @return
+ *   The specified ASO age action.
+ */
+static struct mlx5_aso_age_action*
+flow_dv_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+	uint16_t pool_idx = age_idx & UINT16_MAX;
+	uint16_t offset = (age_idx >> 16) & UINT16_MAX;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+	struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
+
+	return &pool->actions[offset - 1];
+}
+
+/**
+ * Remove a flow counter from aged counter list.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] age
+ *   Pointer to the aso age action handler.
+ */
+static void
+flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
+				struct mlx5_aso_age_action *age)
+{
+	struct mlx5_age_info *age_info;
+	struct mlx5_age_param *age_param = &age->age_params;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	uint16_t expected = AGE_CANDIDATE;
+
+	age_info = GET_PORT_AGE_INFO(priv);
+	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
+					 AGE_FREE, false, __ATOMIC_RELAXED,
+					 __ATOMIC_RELAXED)) {
+		/**
+		 * We need the lock even it is age timeout,
+		 * since age action may still in process.
+		 */
+		rte_spinlock_lock(&age_info->aged_sl);
+		LIST_REMOVE(age, next);
+		rte_spinlock_unlock(&age_info->aged_sl);
+		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+	}
+}
+
+static void
+flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+	struct mlx5_aso_age_action *age = flow_dv_aso_age_get_by_idx(dev,
+								     age_idx);
+
+	flow_dv_aso_age_remove_from_age(dev, age);
+	rte_spinlock_lock(&mng->free_sl);
+	LIST_INSERT_HEAD(&mng->free, age, next);
+	rte_spinlock_unlock(&mng->free_sl);
+}
+
+/**
+ * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ *
+ * @return
+ *   0 on success, otherwise negative errno value and rte_errno is set.
+ */
+static int
+flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+	void *old_pools = mng->pools;
+	uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
+	uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
+	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
+
+	if (!pools) {
+		rte_errno = ENOMEM;
+		return -ENOMEM;
+	}
+	if (old_pools) {
+		memcpy(pools, old_pools,
+		       mng->n * sizeof(struct mlx5_flow_counter_pool *));
+		mlx5_free(old_pools);
+	} else {
+		/* First ASO flow hit allocation - starting ASO data-path. */
+		int ret = mlx5_aso_queue_start(priv->sh);
+
+		if (ret)
+			return ret;
+	}
+	mng->n = resize;
+	mng->pools = pools;
+	return 0;
+}
+
+/**
+ * Create and initialize a new ASO aging pool.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[out] age_free
+ *   Where to put the pointer of a new age action.
+ *
+ * @return
+ *   The age actions pool pointer and @p age_free is set on success,
+ *   NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_aso_age_pool *
+flow_dv_age_pool_create(struct rte_eth_dev *dev,
+			struct mlx5_aso_age_action **age_free)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+	struct mlx5_aso_age_pool *pool = NULL;
+	struct mlx5_devx_obj *obj = NULL;
+	uint32_t i;
+
+	obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
+						    priv->sh->pdn);
+	if (!obj) {
+		rte_errno = ENODATA;
+		DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
+		return NULL;
+	}
+	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
+	if (!pool) {
+		claim_zero(mlx5_devx_cmd_destroy(obj));
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	pool->flow_hit_aso_obj = obj;
+	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
+	rte_spinlock_lock(&mng->resize_sl);
+	pool->index = mng->next;
+	/* Resize pools array if there is no room for the new pool in it. */
+	if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
+		claim_zero(mlx5_devx_cmd_destroy(obj));
+		mlx5_free(pool);
+		rte_spinlock_unlock(&mng->resize_sl);
+		return NULL;
+	}
+	mng->pools[pool->index] = pool;
+	mng->next++;
+	rte_spinlock_unlock(&mng->resize_sl);
+	/* Assign the first action in the new pool, the rest go to free list. */
+	*age_free = &pool->actions[0];
+	for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
+		pool->actions[i].offset = i;
+		LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
+	}
+	return pool;
+}
+
+/**
+ * Allocate a ASO aging bit.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ *
+ * @return
+ *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t
+flow_dv_aso_age_alloc(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_age_pool *pool;
+	struct mlx5_aso_age_action *age_free = NULL;
+	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
+
+	MLX5_ASSERT(mng);
+	/* Try to get the next free age action bit. */
+	rte_spinlock_lock(&mng->free_sl);
+	age_free = LIST_FIRST(&mng->free);
+	if (age_free) {
+		LIST_REMOVE(age_free, next);
+	} else if (!flow_dv_age_pool_create(dev, &age_free)) {
+		rte_spinlock_unlock(&mng->free_sl);
+		goto err;
+	}
+	rte_spinlock_unlock(&mng->free_sl);
+	pool = container_of
+	  ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
+			(age_free - age_free->offset), struct mlx5_aso_age_pool,
+								       actions);
+	if (!age_free->dr_action) {
+		age_free->dr_action = mlx5_glue->dr_action_create_flow_hit
+						(pool->flow_hit_aso_obj->obj,
+						 age_free->offset, REG_C_5);
+		if (!age_free->dr_action)
+			goto err;
+	}
+	return pool->index | ((age_free->offset + 1) << 16);
+err:
+	if (age_free) {
+		rte_spinlock_lock(&mng->free_sl);
+		LIST_INSERT_HEAD(&mng->free, age_free, next);
+		rte_spinlock_unlock(&mng->free_sl);
+	}
+	return 0;
+}
+
+/**
+ * Create a age action using ASO mechanism.
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in] age
+ *   Pointer to the aging action configuration.
+ *
+ * @return
+ *   Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
+				 const struct rte_flow_action_age *age)
+{
+	uint32_t age_idx = 0;
+	struct mlx5_aso_age_action *aso_age = NULL;
+
+	age_idx = flow_dv_aso_age_alloc(dev);
+	if (!age_idx)
+		return 0;
+	aso_age = flow_dv_aso_age_get_by_idx(dev, age_idx);
+	aso_age->age_params.context = age->context;
+	aso_age->age_params.timeout = age->timeout;
+	aso_age->age_params.port_id = dev->data->port_id;
+	__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
+			 __ATOMIC_RELAXED);
+	__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
+			 __ATOMIC_RELAXED);
+	return age_idx;
+}
+
+/**
  * Fill the flow with DV spec, lock free
  * (mutex should be acquired by caller).
  *
@@ -9528,6 +9777,22 @@  struct mlx5_cache_entry *
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
 			break;
 		case RTE_FLOW_ACTION_TYPE_AGE:
+			if (priv->sh->flow_hit_aso_en) {
+				flow->age = flow_dv_translate_create_aso_age
+						(dev, action->conf);
+				if (!flow->age)
+					return rte_flow_error_set
+						(error, rte_errno,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 NULL,
+						 "can't create age action");
+				dev_flow->dv.actions[actions_n++] =
+					  (flow_dv_aso_age_get_by_idx
+						(dev, flow->age))->dr_action;
+				action_flags |= MLX5_FLOW_ACTION_AGE;
+				break;
+			}
+			/* Fall-through */
 		case RTE_FLOW_ACTION_TYPE_COUNT:
 			if (!dev_conf->devx) {
 				return rte_flow_error_set
@@ -10859,6 +11124,8 @@  struct mlx5_cache_entry *
 			mlx5_flow_meter_detach(fm);
 		flow->meter = 0;
 	}
+	if (flow->age)
+		flow_dv_aso_age_release(dev, flow->age);
 	while (flow->dev_handles) {
 		uint32_t tmp_idx = flow->dev_handles;
 
@@ -11391,30 +11658,33 @@  struct mlx5_cache_entry *
 		  void *data, struct rte_flow_error *error)
 {
 	struct rte_flow_query_age *resp = data;
+	struct mlx5_age_param *age_param;
 
-	if (flow->counter) {
-		struct mlx5_age_param *age_param =
-				flow_dv_counter_idx_get_age(dev, flow->counter);
+	if (flow->age) {
+		struct mlx5_aso_age_action *act =
+				     flow_dv_aso_age_get_by_idx(dev, flow->age);
+
+		age_param = &act->age_params;
+	} else if (flow->counter) {
+		age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
 
 		if (!age_param || !age_param->timeout)
 			return rte_flow_error_set
 					(error, EINVAL,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					 NULL, "cannot read age data");
-		resp->aged = __atomic_load_n(&age_param->state,
-					     __ATOMIC_RELAXED) ==
-							AGE_TMOUT ? 1 : 0;
-		resp->sec_since_last_hit_valid = !resp->aged;
-		if (resp->sec_since_last_hit_valid)
-			resp->sec_since_last_hit =
-				__atomic_load_n(&age_param->sec_since_last_hit,
-						__ATOMIC_RELAXED);
-		return 0;
-	}
-	return rte_flow_error_set(error, EINVAL,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL,
-				  "age data not available");
+	} else {
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL, "age data not available");
+	}
+	resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+				     AGE_TMOUT ? 1 : 0;
+	resp->sec_since_last_hit_valid = !resp->aged;
+	if (resp->sec_since_last_hit_valid)
+		resp->sec_since_last_hit = __atomic_load_n
+			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+	return 0;
 }
 
 /**
@@ -12038,25 +12308,35 @@  struct mlx5_cache_entry *
 	struct mlx5_age_info *age_info;
 	struct mlx5_age_param *age_param;
 	struct mlx5_flow_counter *counter;
+	struct mlx5_aso_age_action *act;
 	int nb_flows = 0;
 
 	if (nb_contexts && !context)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					  NULL,
-					  "Should assign at least one flow or"
-					  " context to get if nb_contexts != 0");
+					  NULL, "empty context");
 	age_info = GET_PORT_AGE_INFO(priv);
 	rte_spinlock_lock(&age_info->aged_sl);
-	TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
-		nb_flows++;
-		if (nb_contexts) {
-			age_param = MLX5_CNT_TO_AGE(counter);
-			context[nb_flows - 1] = age_param->context;
-			if (!(--nb_contexts))
-				break;
+	if (priv->sh->flow_hit_aso_en)
+		LIST_FOREACH(act, &age_info->aged_aso, next) {
+			nb_flows++;
+			if (nb_contexts) {
+				context[nb_flows - 1] =
+							act->age_params.context;
+				if (!(--nb_contexts))
+					break;
+			}
+		}
+	else
+		TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
+			nb_flows++;
+			if (nb_contexts) {
+				age_param = MLX5_CNT_TO_AGE(counter);
+				context[nb_flows - 1] = age_param->context;
+				if (!(--nb_contexts))
+					break;
+			}
 		}
-	}
 	rte_spinlock_unlock(&age_info->aged_sl);
 	MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
 	return nb_flows;