get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117010/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117010,
    "url": "http://patches.dpdk.org/api/patches/117010/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-14-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220928033130.9106-14-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220928033130.9106-14-suanmingm@nvidia.com",
    "date": "2022-09-28T03:31:26",
    "name": "[v2,13/17] net/mlx5: add HWS AGE action support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "25dbb152fb20dca50798df2adfc8e7bdb83c4a14",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-14-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24870,
            "url": "http://patches.dpdk.org/api/series/24870/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24870",
            "date": "2022-09-28T03:31:15",
            "name": "net/mlx5: HW steering PMD update",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/24870/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117010/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/117010/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5264FA00C2;\n\tWed, 28 Sep 2022 05:33:54 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D8F7F4284D;\n\tWed, 28 Sep 2022 05:32:35 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2040.outbound.protection.outlook.com [40.107.236.40])\n by mails.dpdk.org (Postfix) with ESMTP id 501DE42B83\n for <dev@dpdk.org>; Wed, 28 Sep 2022 05:32:33 +0200 (CEST)",
            "from DS7PR03CA0124.namprd03.prod.outlook.com (2603:10b6:5:3b4::9) by\n MW4PR12MB6921.namprd12.prod.outlook.com (2603:10b6:303:208::8) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5654.26; Wed, 28 Sep 2022 03:32:29 +0000",
            "from DM6NAM11FT063.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3b4:cafe::bc) by DS7PR03CA0124.outlook.office365.com\n (2603:10b6:5:3b4::9) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5654.25 via Frontend\n Transport; Wed, 28 Sep 2022 03:32:29 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT063.mail.protection.outlook.com (10.13.172.219) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.17 via Frontend Transport; Wed, 28 Sep 2022 03:32:29 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Tue, 27 Sep\n 2022 20:32:17 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Tue, 27 Sep\n 2022 20:32:15 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=NACCsEspcTofKX6BN7wvWop0zzmuIrzpfsmn/YIpR1Vy6kKEQq45Y5jF35iBg64NsItyONibIlZ5mDysnbLtmcYqvXwhqQ/hZtpdokQXBYO4g2PAl+M/1Et5/1L4U3DbxoGaicTgiM0UrAjuCN1KD58EabRB/jtGlPP3jXG8KTboqy34C842agfbkqGFw4juByjjGJkw3wF/8E9MzZqRyQUFgm9mUHWhBA9wBhjgg2GDt4tUgHuofpAYoTmiQiBXjtYhJGt9TdsISjj02MO245OW5cwF1o38r6j7MDEe5HvDw93Sys745uB6zI61kYEb1mNF1Qx3r9rkTAUxzpm2QQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=VahIPcyt7cQAiSpIWJUJMT8EzVOkswKhaB8lOvlehG4=;\n b=fo1LG61TqOFpaLDnYYLv25LtZN+j5UbaESW+emDbmvlPv+6YJ9KK5Pg9uv6KK2gRzaOEFqbJtrwURpr2RloSPNSaxxfo9Ef6kh7dpHYiGfg08bN74Gp9MhHRYhtE9iNMqn4/Zu0m/3j/FLTqRIAC42KM4iahNi1qKk/DNgd9CdkcBR7wRTrUYyrQJpBZVITQ3eAx45dlo2TrXOiiziGHdwPwEINqkxgHEUiaM761clNT9tmNH2SH3ggyNy4c42SH4d5Gn9f4Zv6P4hkq8WdZqtd6Clc6Y7BRsZblbtlBtad7KLEQTbAV2zQnOzHOESjevsnOCNGr8jEvnz3YqjxlBA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=VahIPcyt7cQAiSpIWJUJMT8EzVOkswKhaB8lOvlehG4=;\n b=D1BXBwXcDMFE7tD+g6GUBc8MXoO/aOBasLnlXrquOtCbPfKUJsBfZvLbjL7n7DGCKXuM7zIA9XDAPynuGb8b4wWV2NRhKHztBiPp+skDnyXEj0kaTElYH0Jvchfkr/6OG79K6Hx2neCA92VmVxBR0hAPbOuhrMqU3xKTpRJvXY8dLe9TcGUvKyv+Zczt6LkYBh4Gd2JW/dnbvV6+QT76AjB3pfDFa8nj4s5xCzns3L3d1hn0RPSB3zn5RomrsXS6VY79wEmSiGuxeK2Vhv486xRG8qLHowZ3cv+aXnkwiMzGDHGq8r1Uzp6kWCnIKIlKPByTGG5NuljD1/vFrkR8dQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>, Michael Baum\n <michaelba@nvidia.com>",
        "Subject": "[PATCH v2 13/17] net/mlx5: add HWS AGE action support",
        "Date": "Wed, 28 Sep 2022 06:31:26 +0300",
        "Message-ID": "<20220928033130.9106-14-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220928033130.9106-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220928033130.9106-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT063:EE_|MW4PR12MB6921:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "b14cc8c4-9c5d-4c37-22d9-08daa1021255",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n viX5Jf2OjqXppuqSdQqRc3o3ne96Ut69yFCIwsjlUIGV9WVeZMsrqDc7r3dCgyLrET6j0XCOwue/D29ARbNrEkdRDEGhY2qGuD7HU2bJhqCgtALKB0ZxBh08JpkgjFBjEljKyh9khwHZtS6y7Lc4ntU63PNrCr9BWT1o2tQWIKjj/x7FWmT1OggbaRisYpFaNiyB5dhlielRZ4sRJDLTYJQCZChJUiZsMo+0ZIvoLUTjGfr7bjJyj84fJ3Z92Qcy/f7UEr4yf0pL6tG7c2NK/eoSuXZJi0elaU3MLPWBxO+HSybM+svIvRA861axJxmUWUxJkTXmoldzMrBuSLyLcpHeDKldiN0SGSlUcvFkKMFIT06CQNxcPFX04FKXEJmGEqnjQxpEUXc/5j/zdYmC/bU+4iIj7XO9MUpi5f9USOr8qhL+KjecMstQMLJ7D6fKGVVpiglnI0uFPc9zBUC2SvpB1uhYWtBFKzyIEaY0sYVdaHlqBI4h4Q5nvEXR25oxvaL/zzbuPgWVMaGh6qjW0yB4KUYi3fHYw30raV6lCtBlAkrW5MKse1NzwOXvNyOkrU0Fhz7eYuQ9RnR5nlnwMvaGZAENjNT7zyCNTTqcBIrAvSrB/uhNIzkJMBK9LwEMG0WS5vKvjG/WtfMDYr73O9GUrm6mkRzLpdUnFd/PR8fMxFG7JaRh58jQZdIP7F3P8C7XdEQYZrLRLfupL+QeeSY4/8LXzCgjvRB3pgmT/j2nILBTzGmO9RFuGCA4hG5bWgfgnOhmvGRY93lKJVagkoidiC/oAmxTwtRuw2Y6Dw4=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(396003)(346002)(376002)(39860400002)(136003)(451199015)(40470700004)(46966006)(36840700001)(70586007)(8676002)(356005)(70206006)(186003)(5660300002)(4326008)(7636003)(2906002)(8936002)(30864003)(6286002)(1076003)(16526019)(2616005)(86362001)(83380400001)(7696005)(316002)(426003)(336012)(26005)(40460700003)(107886003)(6666004)(478600001)(6636002)(54906003)(41300700001)(82740400003)(110136005)(47076005)(36860700001)(36756003)(55016003)(40480700001)(82310400005)(559001)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Sep 2022 03:32:29.4360 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b14cc8c4-9c5d-4c37-22d9-08daa1021255",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT063.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW4PR12MB6921",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Michael Baum <michaelba@nvidia.com>\n\nAdd support for AGE action for HW steering.\nThis patch includes:\n\n 1. Add new structures to manage the aging.\n 2. Initialize all them in configure function.\n 3. Implement per second aging check using CNT background thread.\n 4. Enable AGE action in flow create/destroy operations.\n 5. Implement queue-based function to report aged flow rules.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c            |   67 +-\n drivers/net/mlx5/mlx5.h            |   51 +-\n drivers/net/mlx5/mlx5_defs.h       |    3 +\n drivers/net/mlx5/mlx5_flow.c       |   89 ++-\n drivers/net/mlx5/mlx5_flow.h       |   33 +-\n drivers/net/mlx5/mlx5_flow_dv.c    |   30 +-\n drivers/net/mlx5/mlx5_flow_hw.c    | 1104 ++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_flow_verbs.c |    4 +-\n drivers/net/mlx5/mlx5_hws_cnt.c    |  704 +++++++++++++++++-\n drivers/net/mlx5/mlx5_hws_cnt.h    |  193 ++++-\n 10 files changed, 2013 insertions(+), 265 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 383a789dfa..742607509b 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -497,6 +497,12 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)\n \tuint32_t i;\n \tstruct mlx5_age_info *age_info;\n \n+\t/*\n+\t * In HW steering, aging information structure is initialized later\n+\t * during configure function.\n+\t */\n+\tif (sh->config.dv_flow_en == 2)\n+\t\treturn;\n \tfor (i = 0; i < sh->max_port; i++) {\n \t\tage_info = &sh->port[i].age_info;\n \t\tage_info->flags = 0;\n@@ -540,8 +546,8 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)\n \t\t\thca_attr->flow_counter_bulk_alloc_bitmap);\n \t/* Initialize fallback mode only on the port initializes sh. */\n \tif (sh->refcnt == 1)\n-\t\tsh->cmng.counter_fallback = fallback;\n-\telse if (fallback != sh->cmng.counter_fallback)\n+\t\tsh->sws_cmng.counter_fallback = fallback;\n+\telse if (fallback != sh->sws_cmng.counter_fallback)\n \t\tDRV_LOG(WARNING, \"Port %d in sh has different fallback mode \"\n \t\t\t\"with others:%d.\", PORT_ID(priv), fallback);\n #endif\n@@ -556,17 +562,38 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)\n static void\n mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)\n {\n-\tint i;\n+\tint i, j;\n+\n+\tif (sh->config.dv_flow_en < 2) {\n+\t\tmemset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));\n+\t\tTAILQ_INIT(&sh->sws_cmng.flow_counters);\n+\t\tsh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;\n+\t\tsh->sws_cmng.max_id = -1;\n+\t\tsh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;\n+\t\trte_spinlock_init(&sh->sws_cmng.pool_update_sl);\n+\t\tfor (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {\n+\t\t\tTAILQ_INIT(&sh->sws_cmng.counters[i]);\n+\t\t\trte_spinlock_init(&sh->sws_cmng.csl[i]);\n+\t\t}\n+\t} else {\n+\t\tstruct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;\n+\t\tuint32_t fw_max_nb_cnts = attr->max_flow_counter;\n+\t\tuint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;\n+\t\tuint32_t max_nb_cnts = 0;\n+\n+\t\tfor (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {\n+\t\t\tint log_dcs_i = log_dcs - i;\n \n-\tmemset(&sh->cmng, 0, sizeof(sh->cmng));\n-\tTAILQ_INIT(&sh->cmng.flow_counters);\n-\tsh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;\n-\tsh->cmng.max_id = -1;\n-\tsh->cmng.last_pool_idx = POOL_IDX_INVALID;\n-\trte_spinlock_init(&sh->cmng.pool_update_sl);\n-\tfor (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {\n-\t\tTAILQ_INIT(&sh->cmng.counters[i]);\n-\t\trte_spinlock_init(&sh->cmng.csl[i]);\n+\t\t\tif (log_dcs_i < 0)\n+\t\t\t\tbreak;\n+\t\t\tif ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >\n+\t\t\t    fw_max_nb_cnts)\n+\t\t\t\tcontinue;\n+\t\t\tmax_nb_cnts |= RTE_BIT32(log_dcs_i);\n+\t\t\tj++;\n+\t\t}\n+\t\tsh->hws_max_log_bulk_sz = log_dcs;\n+\t\tsh->hws_max_nb_counters = max_nb_cnts;\n \t}\n }\n \n@@ -607,13 +634,13 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)\n \t\trte_pause();\n \t}\n \n-\tif (sh->cmng.pools) {\n+\tif (sh->sws_cmng.pools) {\n \t\tstruct mlx5_flow_counter_pool *pool;\n-\t\tuint16_t n_valid = sh->cmng.n_valid;\n-\t\tbool fallback = sh->cmng.counter_fallback;\n+\t\tuint16_t n_valid = sh->sws_cmng.n_valid;\n+\t\tbool fallback = sh->sws_cmng.counter_fallback;\n \n \t\tfor (i = 0; i < n_valid; ++i) {\n-\t\t\tpool = sh->cmng.pools[i];\n+\t\t\tpool = sh->sws_cmng.pools[i];\n \t\t\tif (!fallback && pool->min_dcs)\n \t\t\t\tclaim_zero(mlx5_devx_cmd_destroy\n \t\t\t\t\t\t\t       (pool->min_dcs));\n@@ -632,14 +659,14 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)\n \t\t\t}\n \t\t\tmlx5_free(pool);\n \t\t}\n-\t\tmlx5_free(sh->cmng.pools);\n+\t\tmlx5_free(sh->sws_cmng.pools);\n \t}\n-\tmng = LIST_FIRST(&sh->cmng.mem_mngs);\n+\tmng = LIST_FIRST(&sh->sws_cmng.mem_mngs);\n \twhile (mng) {\n \t\tmlx5_flow_destroy_counter_stat_mem_mng(mng);\n-\t\tmng = LIST_FIRST(&sh->cmng.mem_mngs);\n+\t\tmng = LIST_FIRST(&sh->sws_cmng.mem_mngs);\n \t}\n-\tmemset(&sh->cmng, 0, sizeof(sh->cmng));\n+\tmemset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex d85cb7adea..eca719f269 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -639,12 +639,45 @@ struct mlx5_geneve_tlv_option_resource {\n /* Current time in seconds. */\n #define MLX5_CURR_TIME_SEC\t(rte_rdtsc() / rte_get_tsc_hz())\n \n+/*\n+ * HW steering queue oriented AGE info.\n+ * It contains an array of rings, one for each HWS queue.\n+ */\n+struct mlx5_hws_q_age_info {\n+\tuint16_t nb_rings; /* Number of aged-out ring lists. */\n+\tstruct rte_ring *aged_lists[]; /* Aged-out lists. */\n+};\n+\n+/*\n+ * HW steering AGE info.\n+ * It has a ring list containing all aged out flow rules.\n+ */\n+struct mlx5_hws_age_info {\n+\tstruct rte_ring *aged_list; /* Aged out lists. */\n+};\n+\n /* Aging information for per port. */\n struct mlx5_age_info {\n \tuint8_t flags; /* Indicate if is new event or need to be triggered. */\n-\tstruct mlx5_counters aged_counters; /* Aged counter list. */\n-\tstruct aso_age_list aged_aso; /* Aged ASO actions list. */\n-\trte_spinlock_t aged_sl; /* Aged flow list lock. */\n+\tunion {\n+\t\t/* SW/FW steering AGE info. */\n+\t\tstruct {\n+\t\t\tstruct mlx5_counters aged_counters;\n+\t\t\t/* Aged counter list. */\n+\t\t\tstruct aso_age_list aged_aso;\n+\t\t\t/* Aged ASO actions list. */\n+\t\t\trte_spinlock_t aged_sl; /* Aged flow list lock. */\n+\t\t};\n+\t\tstruct {\n+\t\t\tstruct mlx5_indexed_pool *ages_ipool;\n+\t\t\tunion {\n+\t\t\t\tstruct mlx5_hws_age_info hw_age;\n+\t\t\t\t/* HW steering AGE info. */\n+\t\t\t\tstruct mlx5_hws_q_age_info *hw_q_age;\n+\t\t\t\t/* HW steering queue oriented AGE info. */\n+\t\t\t};\n+\t\t};\n+\t};\n };\n \n /* Per port data of shared IB device. */\n@@ -1302,6 +1335,9 @@ struct mlx5_dev_ctx_shared {\n \tuint32_t hws_tags:1; /* Check if tags info for HWS initialized. */\n \tuint32_t shared_mark_enabled:1;\n \t/* If mark action is enabled on Rxqs (shared E-Switch domain). */\n+\tuint32_t hws_max_log_bulk_sz:5;\n+\t/* Log of minimal HWS counters created hard coded. */\n+\tuint32_t hws_max_nb_counters; /* Maximal number for HWS counters. */\n \tuint32_t max_port; /* Maximal IB device port index. */\n \tstruct mlx5_bond_info bond; /* Bonding information. */\n \tstruct mlx5_common_device *cdev; /* Backend mlx5 device. */\n@@ -1342,7 +1378,8 @@ struct mlx5_dev_ctx_shared {\n \tstruct mlx5_list *dest_array_list;\n \tstruct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */\n \t/* List of destination array actions. */\n-\tstruct mlx5_flow_counter_mng cmng; /* Counters management structure. */\n+\tstruct mlx5_flow_counter_mng sws_cmng;\n+\t/* SW steering counters management structure. */\n \tvoid *default_miss_action; /* Default miss action. */\n \tstruct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];\n \tstruct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];\n@@ -1670,6 +1707,9 @@ struct mlx5_priv {\n \tLIST_HEAD(flow_hw_at, rte_flow_actions_template) flow_hw_at;\n \tstruct mlx5dr_context *dr_ctx; /**< HW steering DR context. */\n \t/* HW steering queue polling mechanism job descriptor LIFO. */\n+\tuint32_t hws_strict_queue:1;\n+\t/**< Whether all operations strictly happen on the same HWS queue. */\n+\tuint32_t hws_age_req:1; /**< Whether this port has AGE indexed pool. */\n \tstruct mlx5_hw_q *hw_q;\n \t/* HW steering rte flow table list header. */\n \tLIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;\n@@ -1985,6 +2025,9 @@ int mlx5_validate_action_ct(struct rte_eth_dev *dev,\n \t\t\t    const struct rte_flow_action_conntrack *conntrack,\n \t\t\t    struct rte_flow_error *error);\n \n+int mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t       void **contexts, uint32_t nb_contexts,\n+\t\t\t       struct rte_flow_error *error);\n \n /* mlx5_mp_os.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h\nindex d064abfef3..2af8c731ef 100644\n--- a/drivers/net/mlx5/mlx5_defs.h\n+++ b/drivers/net/mlx5/mlx5_defs.h\n@@ -43,6 +43,9 @@\n #define MLX5_PMD_SOFT_COUNTERS 1\n #endif\n \n+/* Maximum number of DCS created per port. */\n+#define MLX5_HWS_CNT_DCS_NUM 4\n+\n /* Alarm timeout. */\n #define MLX5_ALARM_TIMEOUT_US 100000\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 9627ffc979..4bfa604578 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -987,6 +987,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.isolate = mlx5_flow_isolate,\n \t.query = mlx5_flow_query,\n \t.dev_dump = mlx5_flow_dev_dump,\n+\t.get_q_aged_flows = mlx5_flow_get_q_aged_flows,\n \t.get_aged_flows = mlx5_flow_get_aged_flows,\n \t.action_handle_create = mlx5_action_handle_create,\n \t.action_handle_destroy = mlx5_action_handle_destroy,\n@@ -8942,11 +8943,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)\n \t\tmem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;\n \t}\n \tfor (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)\n-\t\tLIST_INSERT_HEAD(&sh->cmng.free_stat_raws,\n+\t\tLIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws,\n \t\t\t\t mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,\n \t\t\t\t next);\n-\tLIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);\n-\tsh->cmng.mem_mng = mem_mng;\n+\tLIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next);\n+\tsh->sws_cmng.mem_mng = mem_mng;\n \treturn 0;\n }\n \n@@ -8965,7 +8966,7 @@ static int\n mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,\n \t\t\t       struct mlx5_flow_counter_pool *pool)\n {\n-\tstruct mlx5_flow_counter_mng *cmng = &sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;\n \t/* Resize statistic memory once used out. */\n \tif (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&\n \t    mlx5_flow_create_counter_stat_mem_mng(sh)) {\n@@ -8994,14 +8995,14 @@ mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)\n {\n \tuint32_t pools_n, us;\n \n-\tpools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);\n+\tpools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);\n \tus = MLX5_POOL_QUERY_FREQ_US / pools_n;\n \tDRV_LOG(DEBUG, \"Set alarm for %u pools each %u us\", pools_n, us);\n \tif (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {\n-\t\tsh->cmng.query_thread_on = 0;\n+\t\tsh->sws_cmng.query_thread_on = 0;\n \t\tDRV_LOG(ERR, \"Cannot reinitialize query alarm\");\n \t} else {\n-\t\tsh->cmng.query_thread_on = 1;\n+\t\tsh->sws_cmng.query_thread_on = 1;\n \t}\n }\n \n@@ -9017,12 +9018,12 @@ mlx5_flow_query_alarm(void *arg)\n {\n \tstruct mlx5_dev_ctx_shared *sh = arg;\n \tint ret;\n-\tuint16_t pool_index = sh->cmng.pool_index;\n-\tstruct mlx5_flow_counter_mng *cmng = &sh->cmng;\n+\tuint16_t pool_index = sh->sws_cmng.pool_index;\n+\tstruct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;\n \tstruct mlx5_flow_counter_pool *pool;\n \tuint16_t n_valid;\n \n-\tif (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)\n+\tif (sh->sws_cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)\n \t\tgoto set_alarm;\n \trte_spinlock_lock(&cmng->pool_update_sl);\n \tpool = cmng->pools[pool_index];\n@@ -9035,7 +9036,7 @@ mlx5_flow_query_alarm(void *arg)\n \t\t/* There is a pool query in progress. */\n \t\tgoto set_alarm;\n \tpool->raw_hw =\n-\t\tLIST_FIRST(&sh->cmng.free_stat_raws);\n+\t\tLIST_FIRST(&sh->sws_cmng.free_stat_raws);\n \tif (!pool->raw_hw)\n \t\t/* No free counter statistics raw memory. */\n \t\tgoto set_alarm;\n@@ -9061,12 +9062,12 @@ mlx5_flow_query_alarm(void *arg)\n \t\tgoto set_alarm;\n \t}\n \tLIST_REMOVE(pool->raw_hw, next);\n-\tsh->cmng.pending_queries++;\n+\tsh->sws_cmng.pending_queries++;\n \tpool_index++;\n \tif (pool_index >= n_valid)\n \t\tpool_index = 0;\n set_alarm:\n-\tsh->cmng.pool_index = pool_index;\n+\tsh->sws_cmng.pool_index = pool_index;\n \tmlx5_set_query_alarm(sh);\n }\n \n@@ -9149,7 +9150,7 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,\n \t\t(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;\n \tstruct mlx5_counter_stats_raw *raw_to_free;\n \tuint8_t query_gen = pool->query_gen ^ 1;\n-\tstruct mlx5_flow_counter_mng *cmng = &sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;\n \tenum mlx5_counter_type cnt_type =\n \t\tpool->is_aged ? MLX5_COUNTER_TYPE_AGE :\n \t\t\t\tMLX5_COUNTER_TYPE_ORIGIN;\n@@ -9172,9 +9173,9 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,\n \t\t\trte_spinlock_unlock(&cmng->csl[cnt_type]);\n \t\t}\n \t}\n-\tLIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);\n+\tLIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next);\n \tpool->raw_hw = NULL;\n-\tsh->cmng.pending_queries--;\n+\tsh->sws_cmng.pending_queries--;\n }\n \n static int\n@@ -9534,7 +9535,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,\n \tstruct mlx5_list_inconst *l_inconst;\n \tstruct mlx5_list_entry *e;\n \tint lcore_index;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tuint32_t max;\n \tvoid *action;\n \n@@ -9705,18 +9706,58 @@ mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,\n {\n \tconst struct mlx5_flow_driver_ops *fops;\n \tstruct rte_flow_attr attr = { .transfer = 0 };\n+\tenum mlx5_flow_drv_type type = flow_get_drv_type(dev, &attr);\n \n-\tif (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {\n-\t\tfops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);\n-\t\treturn fops->get_aged_flows(dev, contexts, nb_contexts,\n-\t\t\t\t\t\t    error);\n+\tif (type == MLX5_FLOW_TYPE_DV || type == MLX5_FLOW_TYPE_HW) {\n+\t\tfops = flow_get_drv_ops(type);\n+\t\treturn fops->get_aged_flows(dev, contexts, nb_contexts, error);\n \t}\n-\tDRV_LOG(ERR,\n-\t\t\"port %u get aged flows is not supported.\",\n-\t\t dev->data->port_id);\n+\tDRV_LOG(ERR, \"port %u get aged flows is not supported.\",\n+\t\tdev->data->port_id);\n \treturn -ENOTSUP;\n }\n \n+/**\n+ * Get aged-out flows per HWS queue.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] queue_id\n+ *   Flow queue to query.\n+ * @param[in] context\n+ *   The address of an array of pointers to the aged-out flows contexts.\n+ * @param[in] nb_countexts\n+ *   The length of context array pointers.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. Initialized in case of\n+ *   error only.\n+ *\n+ * @return\n+ *   how many contexts get in success, otherwise negative errno value.\n+ *   if nb_contexts is 0, return the amount of all aged contexts.\n+ *   if nb_contexts is not 0 , return the amount of aged flows reported\n+ *   in the context array.\n+ */\n+int\n+mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t   void **contexts, uint32_t nb_contexts,\n+\t\t\t   struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\tstruct rte_flow_attr attr = { 0 };\n+\n+\tif (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) {\n+\t\tfops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\t\treturn fops->get_q_aged_flows(dev, queue_id, contexts,\n+\t\t\t\t\t      nb_contexts, error);\n+\t}\n+\tDRV_LOG(ERR, \"port %u queue %u get aged flows is not supported.\",\n+\t\tdev->data->port_id, queue_id);\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"get Q aged flows with incorrect steering mode\");\n+}\n+\n /* Wrapper for driver action_validate op callback */\n static int\n flow_drv_action_validate(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex ffa4f28255..30a18ea35e 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -293,6 +293,8 @@ enum mlx5_feature_name {\n #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)\n #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)\n #define MLX5_FLOW_ACTION_CT (1ull << 41)\n+#define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 42)\n+#define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 43)\n \n #define MLX5_FLOW_FATE_ACTIONS \\\n \t(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \\\n@@ -1099,6 +1101,22 @@ struct rte_flow {\n \tuint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */\n } __rte_packed;\n \n+/*\n+ * HWS COUNTER ID's layout\n+ *       3                   2                   1                   0\n+ *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0\n+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ *    |  T  |     | D |                                               |\n+ *    ~  Y  |     | C |                    IDX                        ~\n+ *    |  P  |     | S |                                               |\n+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n+ *\n+ *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10\n+ *    Bit 25:24 = DCS index\n+ *    Bit 23:00 = IDX in this counter belonged DCS bulk.\n+ */\n+typedef uint32_t cnt_id_t;\n+\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \n /* HWS flow struct. */\n@@ -1112,7 +1130,8 @@ struct rte_flow_hw {\n \t};\n \tstruct rte_flow_template_table *table; /* The table flow allcated from. */\n \tstruct mlx5dr_rule rule; /* HWS layer data struct. */\n-\tuint32_t cnt_id;\n+\tuint32_t age_idx;\n+\tcnt_id_t cnt_id;\n \tuint32_t mtr_id;\n } __rte_packed;\n \n@@ -1158,7 +1177,7 @@ struct mlx5_action_construct_data {\n \t\t\tuint32_t idx; /* Shared action index. */\n \t\t} shared_rss;\n \t\tstruct {\n-\t\t\tuint32_t id;\n+\t\t\tcnt_id_t id;\n \t\t} shared_counter;\n \t\tstruct {\n \t\t\tuint32_t id;\n@@ -1189,6 +1208,7 @@ struct rte_flow_actions_template {\n \tstruct rte_flow_action *actions; /* Cached flow actions. */\n \tstruct rte_flow_action *masks; /* Cached action masks.*/\n \tstruct mlx5dr_action_template *tmpl; /* mlx5dr action template. */\n+\tuint64_t action_flags; /* Bit-map of all valid action in template. */\n \tuint16_t dr_actions_num; /* Amount of DR rules actions. */\n \tuint16_t actions_num; /* Amount of flow actions */\n \tuint16_t *actions_off; /* DR action offset for given rte action offset. */\n@@ -1245,7 +1265,7 @@ struct mlx5_hw_actions {\n \tstruct mlx5_hw_encap_decap_action *encap_decap;\n \tuint16_t encap_decap_pos; /* Encap/Decap action position. */\n \tuint32_t mark:1; /* Indicate the mark action. */\n-\tuint32_t cnt_id; /* Counter id. */\n+\tcnt_id_t cnt_id; /* Counter id. */\n \tuint32_t mtr_id; /* Meter id. */\n \t/* Translated DR action array from action template. */\n \tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n@@ -1619,6 +1639,12 @@ typedef int (*mlx5_flow_get_aged_flows_t)\n \t\t\t\t\t void **context,\n \t\t\t\t\t uint32_t nb_contexts,\n \t\t\t\t\t struct rte_flow_error *error);\n+typedef int (*mlx5_flow_get_q_aged_flows_t)\n+\t\t\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t\t\t uint32_t queue_id,\n+\t\t\t\t\t void **context,\n+\t\t\t\t\t uint32_t nb_contexts,\n+\t\t\t\t\t struct rte_flow_error *error);\n typedef int (*mlx5_flow_action_validate_t)\n \t\t\t\t(struct rte_eth_dev *dev,\n \t\t\t\t const struct rte_flow_indir_action_conf *conf,\n@@ -1825,6 +1851,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_counter_free_t counter_free;\n \tmlx5_flow_counter_query_t counter_query;\n \tmlx5_flow_get_aged_flows_t get_aged_flows;\n+\tmlx5_flow_get_q_aged_flows_t get_q_aged_flows;\n \tmlx5_flow_action_validate_t action_validate;\n \tmlx5_flow_action_create_t action_create;\n \tmlx5_flow_action_destroy_t action_destroy;\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex a50a600024..58a7e94ee0 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -5524,7 +5524,7 @@ flow_dv_validate_action_age(uint64_t action_flags,\n \tconst struct rte_flow_action_age *age = action->conf;\n \n \tif (!priv->sh->cdev->config.devx ||\n-\t    (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))\n+\t    (priv->sh->sws_cmng.counter_fallback && !priv->sh->aso_age_mng))\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL,\n@@ -6085,7 +6085,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,\n \t\t\t   struct mlx5_flow_counter_pool **ppool)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tstruct mlx5_flow_counter_pool *pool;\n \n \t/* Decrease to original index and clear shared bit. */\n@@ -6179,7 +6179,7 @@ static int\n flow_dv_container_resize(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tvoid *old_pools = cmng->pools;\n \tuint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;\n \tuint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;\n@@ -6225,7 +6225,7 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,\n \n \tcnt = flow_dv_counter_get_by_idx(dev, counter, &pool);\n \tMLX5_ASSERT(pool);\n-\tif (priv->sh->cmng.counter_fallback)\n+\tif (priv->sh->sws_cmng.counter_fallback)\n \t\treturn mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,\n \t\t\t\t\t0, pkts, bytes, 0, NULL, NULL, 0);\n \trte_spinlock_lock(&pool->sl);\n@@ -6262,8 +6262,8 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_counter_pool *pool;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n-\tbool fallback = priv->sh->cmng.counter_fallback;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n+\tbool fallback = priv->sh->sws_cmng.counter_fallback;\n \tuint32_t size = sizeof(*pool);\n \n \tsize += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;\n@@ -6324,14 +6324,14 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,\n \t\t\t     uint32_t age)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tstruct mlx5_flow_counter_pool *pool;\n \tstruct mlx5_counters tmp_tq;\n \tstruct mlx5_devx_obj *dcs = NULL;\n \tstruct mlx5_flow_counter *cnt;\n \tenum mlx5_counter_type cnt_type =\n \t\t\tage ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;\n-\tbool fallback = priv->sh->cmng.counter_fallback;\n+\tbool fallback = priv->sh->sws_cmng.counter_fallback;\n \tuint32_t i;\n \n \tif (fallback) {\n@@ -6395,8 +6395,8 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_counter_pool *pool = NULL;\n \tstruct mlx5_flow_counter *cnt_free = NULL;\n-\tbool fallback = priv->sh->cmng.counter_fallback;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tbool fallback = priv->sh->sws_cmng.counter_fallback;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tenum mlx5_counter_type cnt_type =\n \t\t\tage ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;\n \tuint32_t cnt_idx;\n@@ -6442,7 +6442,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)\n \tif (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,\n \t\t\t\t &cnt_free->bytes))\n \t\tgoto err;\n-\tif (!fallback && !priv->sh->cmng.query_thread_on)\n+\tif (!fallback && !priv->sh->sws_cmng.query_thread_on)\n \t\t/* Start the asynchronous batch query by the host thread. */\n \t\tmlx5_set_query_alarm(priv->sh);\n \t/*\n@@ -6570,7 +6570,7 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)\n \t * this case, lock will not be needed as query callback and release\n \t * function both operate with the different list.\n \t */\n-\tif (!priv->sh->cmng.counter_fallback) {\n+\tif (!priv->sh->sws_cmng.counter_fallback) {\n \t\trte_spinlock_lock(&pool->csl);\n \t\tTAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);\n \t\trte_spinlock_unlock(&pool->csl);\n@@ -6578,10 +6578,10 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)\n \t\tcnt->dcs_when_free = cnt->dcs_when_active;\n \t\tcnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :\n \t\t\t\t\t   MLX5_COUNTER_TYPE_ORIGIN;\n-\t\trte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);\n-\t\tTAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],\n+\t\trte_spinlock_lock(&priv->sh->sws_cmng.csl[cnt_type]);\n+\t\tTAILQ_INSERT_TAIL(&priv->sh->sws_cmng.counters[cnt_type],\n \t\t\t\t  cnt, next);\n-\t\trte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);\n+\t\trte_spinlock_unlock(&priv->sh->sws_cmng.csl[cnt_type]);\n \t}\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex d4ce2f185a..5c0981d385 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -460,7 +460,8 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,\n \t\t\t\t  enum rte_flow_action_type type,\n \t\t\t\t  uint16_t action_src,\n \t\t\t\t  uint16_t action_dst)\n-{\tstruct mlx5_action_construct_data *act_data;\n+{\n+\tstruct mlx5_action_construct_data *act_data;\n \n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n@@ -495,7 +496,8 @@ __flow_hw_act_data_encap_append(struct mlx5_priv *priv,\n \t\t\t\tuint16_t action_src,\n \t\t\t\tuint16_t action_dst,\n \t\t\t\tuint16_t len)\n-{\tstruct mlx5_action_construct_data *act_data;\n+{\n+\tstruct mlx5_action_construct_data *act_data;\n \n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n@@ -565,7 +567,8 @@ __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,\n \t\t\t\t     uint16_t action_dst,\n \t\t\t\t     uint32_t idx,\n \t\t\t\t     struct mlx5_shared_action_rss *rss)\n-{\tstruct mlx5_action_construct_data *act_data;\n+{\n+\tstruct mlx5_action_construct_data *act_data;\n \n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n@@ -604,7 +607,8 @@ __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,\n \t\t\t\t     uint16_t action_src,\n \t\t\t\t     uint16_t action_dst,\n \t\t\t\t     cnt_id_t cnt_id)\n-{\tstruct mlx5_action_construct_data *act_data;\n+{\n+\tstruct mlx5_action_construct_data *act_data;\n \n \tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n \tif (!act_data)\n@@ -700,6 +704,10 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,\n \t\t\taction_src, action_dst, act_idx))\n \t\t\treturn -1;\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n+\t\t/* Not supported, prevent by validate function. */\n+\t\tMLX5_ASSERT(0);\n+\t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n \t\tif (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,\n \t\t\t\t       idx, &acts->rule_acts[action_dst]))\n@@ -1092,7 +1100,7 @@ flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t  start_pos,\n \tcnt_id_t cnt_id;\n \tint ret;\n \n-\tret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id);\n+\tret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);\n \tif (ret != 0)\n \t\treturn ret;\n \tret = mlx5_hws_cnt_pool_get_action_offset\n@@ -1233,8 +1241,6 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n  *   Pointer to the rte_eth_dev structure.\n  * @param[in] cfg\n  *   Pointer to the table configuration.\n- * @param[in] item_templates\n- *   Item template array to be binded to the table.\n  * @param[in/out] acts\n  *   Pointer to the template HW steering DR actions.\n  * @param[in] at\n@@ -1243,7 +1249,7 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,\n  *   Pointer to error structure.\n  *\n  * @return\n- *    Table on success, NULL otherwise and rte_errno is set.\n+ *   0 on success, a negative errno otherwise and rte_errno is set.\n  */\n static int\n __flow_hw_actions_translate(struct rte_eth_dev *dev,\n@@ -1272,6 +1278,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tuint16_t jump_pos;\n \tuint32_t ct_idx;\n \tint err;\n+\tuint32_t target_grp = 0;\n \n \tflow_hw_modify_field_init(&mhdr, at);\n \tif (attr->transfer)\n@@ -1499,8 +1506,42 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\taction_pos))\n \t\t\t\tgoto err;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\t\tflow_hw_translate_group(dev, cfg, attr->group,\n+\t\t\t\t\t\t&target_grp, error);\n+\t\t\tif (target_grp == 0) {\n+\t\t\t\t__flow_hw_action_template_destroy(dev, acts);\n+\t\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\tNULL,\n+\t\t\t\t\t\t\"Age action on root table is not supported in HW steering mode\");\n+\t\t\t}\n+\t\t\taction_pos = at->actions_off[actions - at->actions];\n+\t\t\tif (__flow_hw_act_data_general_append(priv, acts,\n+\t\t\t\t\t\t\t actions->type,\n+\t\t\t\t\t\t\t actions - action_start,\n+\t\t\t\t\t\t\t action_pos))\n+\t\t\t\tgoto err;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\t\taction_pos = at->actions_off[actions - action_start];\n+\t\t\tflow_hw_translate_group(dev, cfg, attr->group,\n+\t\t\t\t\t\t&target_grp, error);\n+\t\t\tif (target_grp == 0) {\n+\t\t\t\t__flow_hw_action_template_destroy(dev, acts);\n+\t\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\tNULL,\n+\t\t\t\t\t\t\"Counter action on root table is not supported in HW steering mode\");\n+\t\t\t}\n+\t\t\tif ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||\n+\t\t\t    (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))\n+\t\t\t\t/*\n+\t\t\t\t * When both COUNT and AGE are requested, it is\n+\t\t\t\t * saved as AGE action which creates also the\n+\t\t\t\t * counter.\n+\t\t\t\t */\n+\t\t\t\tbreak;\n+\t\t\taction_pos = at->actions_off[actions - at->actions];\n \t\t\tif (masks->conf &&\n \t\t\t    ((const struct rte_flow_action_count *)\n \t\t\t     masks->conf)->id) {\n@@ -1727,6 +1768,10 @@ flow_hw_shared_action_get(struct rte_eth_dev *dev,\n  *   Pointer to the flow table.\n  * @param[in] it_idx\n  *   Item template index the action template refer to.\n+ * @param[in] action_flags\n+ *   Actions bit-map detected in this template.\n+ * @param[in, out] flow\n+ *   Pointer to the flow containing the counter.\n  * @param[in] rule_act\n  *   Pointer to the shared action's destination rule DR action.\n  *\n@@ -1737,7 +1782,8 @@ static __rte_always_inline int\n flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\tconst struct rte_flow_action *action,\n \t\t\t\tstruct rte_flow_template_table *table,\n-\t\t\t\tconst uint8_t it_idx,\n+\t\t\t\tconst uint8_t it_idx, uint64_t action_flags,\n+\t\t\t\tstruct rte_flow_hw *flow,\n \t\t\t\tstruct mlx5dr_rule_action *rule_act)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -1745,11 +1791,14 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,\n \tstruct mlx5_action_construct_data act_data;\n \tstruct mlx5_shared_action_rss *shared_rss;\n \tstruct mlx5_aso_mtr *aso_mtr;\n+\tstruct mlx5_age_info *age_info;\n+\tstruct mlx5_hws_age_param *param;\n \tuint32_t act_idx = (uint32_t)(uintptr_t)action->conf;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n \tuint32_t idx = act_idx &\n \t\t       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n \tuint64_t item_flags;\n+\tcnt_id_t age_cnt;\n \n \tmemset(&act_data, 0, sizeof(act_data));\n \tswitch (type) {\n@@ -1775,6 +1824,44 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\t&rule_act->action,\n \t\t\t\t&rule_act->counter.offset))\n \t\t\treturn -1;\n+\t\tflow->cnt_id = act_idx;\n+\t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n+\t\t/*\n+\t\t * Save the index with the indirect type, to recognize\n+\t\t * it in flow destroy.\n+\t\t */\n+\t\tflow->age_idx = act_idx;\n+\t\tif (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)\n+\t\t\t/*\n+\t\t\t * The mutual update for idirect AGE & COUNT will be\n+\t\t\t * performed later after we have ID for both of them.\n+\t\t\t */\n+\t\t\tbreak;\n+\t\tage_info = GET_PORT_AGE_INFO(priv);\n+\t\tparam = mlx5_ipool_get(age_info->ages_ipool, idx);\n+\t\tif (param == NULL)\n+\t\t\treturn -1;\n+\t\tif (action_flags & MLX5_FLOW_ACTION_COUNT) {\n+\t\t\tif (mlx5_hws_cnt_pool_get(priv->hws_cpool,\n+\t\t\t\t\t\t  &param->queue_id, &age_cnt,\n+\t\t\t\t\t\t  idx) < 0)\n+\t\t\t\treturn -1;\n+\t\t\tflow->cnt_id = age_cnt;\n+\t\t\tparam->nb_cnts++;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * Get the counter of this indirect AGE or create one\n+\t\t\t * if doesn't exist.\n+\t\t\t */\n+\t\t\tage_cnt = mlx5_hws_age_cnt_get(priv, param, idx);\n+\t\t\tif (age_cnt == 0)\n+\t\t\t\treturn -1;\n+\t\t}\n+\t\tif (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,\n+\t\t\t\t\t\t     age_cnt, &rule_act->action,\n+\t\t\t\t\t\t     &rule_act->counter.offset))\n+\t\t\treturn -1;\n \t\tbreak;\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n \t\tif (flow_hw_ct_compile(dev, queue, idx, rule_act))\n@@ -1935,7 +2022,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t  const uint8_t it_idx,\n \t\t\t  const struct rte_flow_action actions[],\n \t\t\t  struct mlx5dr_rule_action *rule_acts,\n-\t\t\t  uint32_t queue)\n+\t\t\t  uint32_t queue,\n+\t\t\t  struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n@@ -1948,6 +2036,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tconst struct rte_flow_item *enc_item = NULL;\n \tconst struct rte_flow_action_ethdev *port_action = NULL;\n \tconst struct rte_flow_action_meter *meter = NULL;\n+\tconst struct rte_flow_action_age *age = NULL;\n \tuint8_t *buf = job->encap_data;\n \tstruct rte_flow_attr attr = {\n \t\t\t.ingress = 1,\n@@ -1955,6 +2044,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tuint32_t ft_flag;\n \tsize_t encap_len = 0;\n \tint ret;\n+\tuint32_t age_idx = 0;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \n \trte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);\n@@ -2007,6 +2097,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n \t\t\tif (flow_hw_shared_action_construct\n \t\t\t\t\t(dev, queue, action, table, it_idx,\n+\t\t\t\t\t at->action_flags, job->flow,\n \t\t\t\t\t &rule_acts[act_data->action_dst]))\n \t\t\t\treturn -1;\n \t\t\tbreak;\n@@ -2115,9 +2206,32 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tif (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr))\n \t\t\t\treturn -1;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\t\tage = action->conf;\n+\t\t\t/*\n+\t\t\t * First, create the AGE parameter, then create its\n+\t\t\t * counter later:\n+\t\t\t * Regular counter - in next case.\n+\t\t\t * Indirect counter - update it after the loop.\n+\t\t\t */\n+\t\t\tage_idx = mlx5_hws_age_action_create(priv, queue, 0,\n+\t\t\t\t\t\t\t     age,\n+\t\t\t\t\t\t\t     job->flow->idx,\n+\t\t\t\t\t\t\t     error);\n+\t\t\tif (age_idx == 0)\n+\t\t\t\treturn -rte_errno;\n+\t\t\tjob->flow->age_idx = age_idx;\n+\t\t\tif (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)\n+\t\t\t\t/*\n+\t\t\t\t * When AGE uses indirect counter, no need to\n+\t\t\t\t * create counter but need to update it with the\n+\t\t\t\t * AGE parameter, will be done after the loop.\n+\t\t\t\t */\n+\t\t\t\tbreak;\n+\t\t\t/* Fall-through. */\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\tret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue,\n-\t\t\t\t\t&cnt_id);\n+\t\t\t\t\t\t    &cnt_id, age_idx);\n \t\t\tif (ret != 0)\n \t\t\t\treturn ret;\n \t\t\tret = mlx5_hws_cnt_pool_get_action_offset\n@@ -2174,6 +2288,25 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tbreak;\n \t\t}\n \t}\n+\tif (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {\n+\t\tif (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {\n+\t\t\tage_idx = job->flow->age_idx & MLX5_HWS_AGE_IDX_MASK;\n+\t\t\tif (mlx5_hws_cnt_age_get(priv->hws_cpool,\n+\t\t\t\t\t\t job->flow->cnt_id) != age_idx)\n+\t\t\t\t/*\n+\t\t\t\t * This is first use of this indirect counter\n+\t\t\t\t * for this indirect AGE, need to increase the\n+\t\t\t\t * number of counters.\n+\t\t\t\t */\n+\t\t\t\tmlx5_hws_age_nb_cnt_increase(priv, age_idx);\n+\t\t}\n+\t\t/*\n+\t\t * Update this indirect counter the indirect/direct AGE in which\n+\t\t * using it.\n+\t\t */\n+\t\tmlx5_hws_cnt_age_set(priv->hws_cpool, job->flow->cnt_id,\n+\t\t\t\t     age_idx);\n+\t}\n \tif (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {\n \t\trule_acts[hw_acts->encap_decap_pos].reformat.offset =\n \t\t\t\tjob->flow->idx - 1;\n@@ -2323,8 +2456,10 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \t * No need to copy and contrust a new \"actions\" list based on the\n \t * user's input, in order to save the cost.\n \t */\n-\tif (flow_hw_actions_construct(dev, job, &table->ats[action_template_index],\n-\t\t\t\t      pattern_template_index, actions, rule_acts, queue)) {\n+\tif (flow_hw_actions_construct(dev, job,\n+\t\t\t\t      &table->ats[action_template_index],\n+\t\t\t\t      pattern_template_index, actions,\n+\t\t\t\t      rule_acts, queue, error)) {\n \t\trte_errno = EINVAL;\n \t\tgoto free;\n \t}\n@@ -2409,6 +2544,49 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,\n \t\t\t\"fail to create rte flow\");\n }\n \n+/**\n+ * Release the AGE and counter for given flow.\n+ *\n+ * @param[in] priv\n+ *   Pointer to the port private data structure.\n+ * @param[in] queue\n+ *   The queue to release the counter.\n+ * @param[in, out] flow\n+ *   Pointer to the flow containing the counter.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ */\n+static void\n+flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,\n+\t\t\t  struct rte_flow_hw *flow,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tif (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {\n+\t\tif (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) {\n+\t\t\t/* Remove this AGE parameter from indirect counter. */\n+\t\t\tmlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);\n+\t\t\t/* Release the AGE parameter. */\n+\t\t\tmlx5_hws_age_action_destroy(priv, flow->age_idx, error);\n+\t\t\tflow->age_idx = 0;\n+\t\t}\n+\t\treturn;\n+\t}\n+\t/* Put the counter first to reduce the race risk in BG thread. */\n+\tmlx5_hws_cnt_pool_put(priv->hws_cpool, &queue, &flow->cnt_id);\n+\tflow->cnt_id = 0;\n+\tif (flow->age_idx) {\n+\t\tif (mlx5_hws_age_is_indirect(flow->age_idx)) {\n+\t\t\tuint32_t idx = flow->age_idx & MLX5_HWS_AGE_IDX_MASK;\n+\n+\t\t\tmlx5_hws_age_nb_cnt_decrease(priv, idx);\n+\t\t} else {\n+\t\t\t/* Release the AGE parameter. */\n+\t\t\tmlx5_hws_age_action_destroy(priv, flow->age_idx, error);\n+\t\t}\n+\t\tflow->age_idx = 0;\n+\t}\n+}\n+\n /**\n  * Pull the enqueued flows.\n  *\n@@ -2455,13 +2633,9 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\t\t\tflow_hw_jump_release(dev, job->flow->jump);\n \t\t\telse if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)\n \t\t\t\tmlx5_hrxq_obj_release(dev, job->flow->hrxq);\n-\t\t\tif (mlx5_hws_cnt_id_valid(job->flow->cnt_id) &&\n-\t\t\t    mlx5_hws_cnt_is_shared\n-\t\t\t\t(priv->hws_cpool, job->flow->cnt_id) == false) {\n-\t\t\t\tmlx5_hws_cnt_pool_put(priv->hws_cpool, &queue,\n-\t\t\t\t\t\t&job->flow->cnt_id);\n-\t\t\t\tjob->flow->cnt_id = 0;\n-\t\t\t}\n+\t\t\tif (mlx5_hws_cnt_id_valid(job->flow->cnt_id))\n+\t\t\t\tflow_hw_age_count_release(priv, queue,\n+\t\t\t\t\t\t\t  job->flow, error);\n \t\t\tif (job->flow->mtr_id) {\n \t\t\t\tmlx5_ipool_free(pool->idx_pool,\tjob->flow->mtr_id);\n \t\t\t\tjob->flow->mtr_id = 0;\n@@ -3093,100 +3267,315 @@ flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static inline int\n-flow_hw_action_meta_copy_insert(const struct rte_flow_action actions[],\n-\t\t\t\tconst struct rte_flow_action masks[],\n-\t\t\t\tconst struct rte_flow_action *ins_actions,\n-\t\t\t\tconst struct rte_flow_action *ins_masks,\n-\t\t\t\tstruct rte_flow_action *new_actions,\n-\t\t\t\tstruct rte_flow_action *new_masks,\n-\t\t\t\tuint16_t *ins_pos)\n+/**\n+ * Validate AGE action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] action\n+ *   Pointer to the indirect action.\n+ * @param[in] action_flags\n+ *   Holds the actions detected until now.\n+ * @param[in] fixed_cnt\n+ *   Indicator if this list has a fixed COUNT action.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_validate_action_age(struct rte_eth_dev *dev,\n+\t\t\t    const struct rte_flow_action *action,\n+\t\t\t    uint64_t action_flags, bool fixed_cnt,\n+\t\t\t    struct rte_flow_error *error)\n {\n-\tuint16_t idx, total = 0;\n-\tuint16_t end_idx = UINT16_MAX;\n-\tbool act_end = false;\n-\tbool modify_field = false;\n-\tbool rss_or_queue = false;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n \n-\tMLX5_ASSERT(actions && masks);\n-\tMLX5_ASSERT(new_actions && new_masks);\n-\tMLX5_ASSERT(ins_actions && ins_masks);\n-\tfor (idx = 0; !act_end; idx++) {\n-\t\tswitch (actions[idx].type) {\n-\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n-\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n-\t\t\t/* It is assumed that application provided only single RSS/QUEUE action. */\n-\t\t\tMLX5_ASSERT(!rss_or_queue);\n-\t\t\trss_or_queue = true;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n-\t\t\tmodify_field = true;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_END:\n-\t\t\tend_idx = idx;\n-\t\t\tact_end = true;\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tbreak;\n+\tif (!priv->sh->cdev->config.devx)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"AGE action not supported\");\n+\tif (age_info->ages_ipool == NULL)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"aging pool not initialized\");\n+\tif ((action_flags & MLX5_FLOW_ACTION_AGE) ||\n+\t    (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"duplicate AGE actions set\");\n+\tif (fixed_cnt)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"AGE and fixed COUNT combination is not supported\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate count action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] action\n+ *   Pointer to the indirect action.\n+ * @param[in] mask\n+ *   Pointer to the indirect action mask.\n+ * @param[in] action_flags\n+ *   Holds the actions detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_validate_action_count(struct rte_eth_dev *dev,\n+\t\t\t      const struct rte_flow_action *action,\n+\t\t\t      const struct rte_flow_action *mask,\n+\t\t\t      uint64_t action_flags,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_count *count = mask->conf;\n+\n+\tif (!priv->sh->cdev->config.devx)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"count action not supported\");\n+\tif (!priv->hws_cpool)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"counters pool not initialized\");\n+\tif ((action_flags & MLX5_FLOW_ACTION_COUNT) ||\n+\t    (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"duplicate count actions set\");\n+\tif (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, mask,\n+\t\t\t\t\t  \"AGE and COUNT action shared by mask combination is not supported\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate meter_mark action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] action\n+ *   Pointer to the indirect action.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,\n+\t\t\t      const struct rte_flow_action *action,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tRTE_SET_USED(action);\n+\n+\tif (!priv->sh->cdev->config.devx)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"meter_mark action not supported\");\n+\tif (!priv->hws_mpool)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"meter_mark pool not initialized\");\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate indirect action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] action\n+ *   Pointer to the indirect action.\n+ * @param[in] mask\n+ *   Pointer to the indirect action mask.\n+ * @param[in, out] action_flags\n+ *   Holds the actions detected until now.\n+ * @param[in, out] fixed_cnt\n+ *   Pointer to indicator if this list has a fixed COUNT action.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_validate_action_indirect(struct rte_eth_dev *dev,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t uint64_t *action_flags, bool *fixed_cnt,\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tuint32_t type;\n+\tint ret;\n+\n+\tif (!mask)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"Unable to determine indirect action type without a mask specified\");\n+\ttype = mask->type;\n+\tswitch (type) {\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\tret = flow_hw_validate_action_meter_mark(dev, mask, error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\t*action_flags |= MLX5_FLOW_ACTION_METER;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t/* TODO: Validation logic (same as flow_hw_actions_validate) */\n+\t\t*action_flags |= MLX5_FLOW_ACTION_RSS;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t/* TODO: Validation logic (same as flow_hw_actions_validate) */\n+\t\t*action_flags |= MLX5_FLOW_ACTION_CT;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\tif (action->conf && mask->conf) {\n+\t\t\tif ((*action_flags & MLX5_FLOW_ACTION_AGE) ||\n+\t\t\t    (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))\n+\t\t\t\t/*\n+\t\t\t\t * AGE cannot use indirect counter which is\n+\t\t\t\t * shared with enother flow rules.\n+\t\t\t\t */\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t  \"AGE and fixed COUNT combination is not supported\");\n+\t\t\t*fixed_cnt = true;\n \t\t}\n+\t\tret = flow_hw_validate_action_count(dev, action, mask,\n+\t\t\t\t\t\t    *action_flags, error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\t*action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;\n+\t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\tret = flow_hw_validate_action_age(dev, action, *action_flags,\n+\t\t\t\t\t\t  *fixed_cnt, error);\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\t*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(WARNING, \"Unsupported shared action type: %d\", type);\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,\n+\t\t\t\t\t  \"Unsupported indirect action type\");\n \t}\n-\tif (!rss_or_queue)\n-\t\treturn 0;\n-\telse if (idx >= MLX5_HW_MAX_ACTS)\n-\t\treturn -1; /* No more space. */\n-\ttotal = idx;\n-\t/*\n-\t * If actions template contains MODIFY_FIELD action, then meta copy action can be inserted\n-\t * at the template's end. Position of MODIFY_HDR action is based on the position of the\n-\t * first MODIFY_FIELD flow action.\n-\t */\n-\tif (modify_field) {\n-\t\t*ins_pos = end_idx;\n-\t\tgoto insert_meta_copy;\n-\t}\n-\t/*\n-\t * If actions template does not contain MODIFY_FIELD action, then meta copy action must be\n-\t * inserted at aplace conforming with action order defined in steering/mlx5dr_action.c.\n+\treturn 0;\n+}\n+\n+/**\n+ * Validate raw_encap action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] action\n+ *   Pointer to the indirect action.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t\t  const struct rte_flow_action *action,\n+\t\t\t\t  struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action_raw_encap *raw_encap_data = action->conf;\n+\n+\tif (!raw_encap_data || !raw_encap_data->size || !raw_encap_data->data)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t\t\t  \"invalid raw_encap_data\");\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+flow_hw_template_expand_modify_field(const struct rte_flow_action actions[],\n+\t\t\t\t     const struct rte_flow_action masks[],\n+\t\t\t\t     const struct rte_flow_action *mf_action,\n+\t\t\t\t     const struct rte_flow_action *mf_mask,\n+\t\t\t\t     struct rte_flow_action *new_actions,\n+\t\t\t\t     struct rte_flow_action *new_masks,\n+\t\t\t\t     uint64_t flags, uint32_t act_num)\n+{\n+\tuint32_t i, tail;\n+\n+\tMLX5_ASSERT(actions && masks);\n+\tMLX5_ASSERT(new_actions && new_masks);\n+\tMLX5_ASSERT(mf_action && mf_mask);\n+\tif (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {\n+\t\t/*\n+\t\t * Application action template already has Modify Field.\n+\t\t * It's location will be used in DR.\n+\t\t * Expanded MF action can be added before the END.\n+\t\t */\n+\t\ti = act_num - 1;\n+\t\tgoto insert;\n+\t}\n+\t/**\n+\t * Locate the first action positioned BEFORE the new MF.\n+\t *\n+\t * Search for a place to insert modify header\n+\t * from the END action backwards:\n+\t * 1. END is always present in actions array\n+\t * 2. END location is always at action[act_num - 1]\n+\t * 3. END always positioned AFTER modify field location\n+\t *\n+\t * Relative actions order is the same for RX, TX and FDB.\n+\t *\n+\t * Current actions order (draft-3)\n+\t * @see action_order_arr[]\n \t */\n-\tact_end = false;\n-\tfor (idx = 0; !act_end; idx++) {\n-\t\tswitch (actions[idx].type) {\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\tcase RTE_FLOW_ACTION_TYPE_METER:\n-\t\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n-\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\tfor (i = act_num - 2; (int)i >= 0; i--) {\n+\t\tenum rte_flow_action_type type = actions[i].type;\n+\n+\t\tif (type == RTE_FLOW_ACTION_TYPE_INDIRECT)\n+\t\t\ttype = masks[i].type;\n+\t\tswitch (type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n \t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n \t\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n-\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n-\t\t\t*ins_pos = idx;\n-\t\t\tact_end = true;\n-\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n-\t\t\tact_end = true;\n \t\t\tbreak;\n \t\tdefault:\n+\t\t\ti++; /* new MF inserted AFTER actions[i] */\n+\t\t\tgoto insert;\n \t\t\tbreak;\n \t\t}\n \t}\n-insert_meta_copy:\n-\tMLX5_ASSERT(*ins_pos != UINT16_MAX);\n-\tMLX5_ASSERT(*ins_pos < total);\n-\t/* Before the position, no change for the actions. */\n-\tfor (idx = 0; idx < *ins_pos; idx++) {\n-\t\tnew_actions[idx] = actions[idx];\n-\t\tnew_masks[idx] = masks[idx];\n-\t}\n-\t/* Insert the new action and mask to the position. */\n-\tnew_actions[idx] = *ins_actions;\n-\tnew_masks[idx] = *ins_masks;\n-\t/* Remaining content is right shifted by one position. */\n-\tfor (; idx < total; idx++) {\n-\t\tnew_actions[idx + 1] = actions[idx];\n-\t\tnew_masks[idx + 1] = masks[idx];\n-\t}\n-\treturn 0;\n+\ti = 0;\n+insert:\n+\ttail = act_num - i; /* num action to move */\n+\tmemcpy(new_actions, actions, sizeof(actions[0]) * i);\n+\tnew_actions[i] = *mf_action;\n+\tmemcpy(new_actions + i + 1, actions + i, sizeof(actions[0]) * tail);\n+\tmemcpy(new_masks, masks, sizeof(masks[0]) * i);\n+\tnew_masks[i] = *mf_mask;\n+\tmemcpy(new_masks + i + 1, masks + i, sizeof(masks[0]) * tail);\n+\treturn i;\n }\n \n static int\n@@ -3257,13 +3646,17 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,\n }\n \n static int\n-flow_hw_actions_validate(struct rte_eth_dev *dev,\n-\t\t\tconst struct rte_flow_actions_template_attr *attr,\n-\t\t\tconst struct rte_flow_action actions[],\n-\t\t\tconst struct rte_flow_action masks[],\n-\t\t\tstruct rte_flow_error *error)\n+mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,\n+\t\t\t      const struct rte_flow_actions_template_attr *attr,\n+\t\t\t      const struct rte_flow_action actions[],\n+\t\t\t      const struct rte_flow_action masks[],\n+\t\t\t      uint64_t *act_flags,\n+\t\t\t      struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_count *count_mask = NULL;\n+\tbool fixed_cnt = false;\n+\tuint64_t action_flags = 0;\n \tuint16_t i;\n \tbool actions_end = false;\n \tint ret;\n@@ -3289,46 +3682,70 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n-\t\t\t/* TODO: Validation logic */\n+\t\t\tret = flow_hw_validate_action_indirect(dev, action,\n+\t\t\t\t\t\t\t       mask,\n+\t\t\t\t\t\t\t       &action_flags,\n+\t\t\t\t\t\t\t       &fixed_cnt,\n+\t\t\t\t\t\t\t       error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DROP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_JUMP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_QUEUE;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_RSS;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n-\t\t\t/* TODO: Validation logic */\n+\t\t\tret = flow_hw_validate_action_raw_encap(dev, action, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_METER:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_METER;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n-\t\t\t/* TODO: Validation logic */\n+\t\t\tret = flow_hw_validate_action_meter_mark(dev, action,\n+\t\t\t\t\t\t\t\t error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_METER;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n \t\t\tret = flow_hw_validate_action_modify_field(action,\n@@ -3336,21 +3753,43 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\t\t\terror);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n \t\t\tret = flow_hw_validate_action_represented_port\n \t\t\t\t\t(dev, action, mask, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_PORT_ID;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\t\tif (count_mask && count_mask->id)\n+\t\t\t\tfixed_cnt = true;\n+\t\t\tret = flow_hw_validate_action_age(dev, action,\n+\t\t\t\t\t\t\t  action_flags,\n+\t\t\t\t\t\t\t  fixed_cnt, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\t\t/* TODO: Validation logic */\n+\t\t\tret = flow_hw_validate_action_count(dev, action, mask,\n+\t\t\t\t\t\t\t    action_flags,\n+\t\t\t\t\t\t\t    error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\tcount_mask = mask->conf;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n \t\t\t/* TODO: Validation logic */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_CT;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\n \t\t\tret = flow_hw_validate_action_push_vlan\n@@ -3360,6 +3799,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\t\ti += is_of_vlan_pcp_present(action) ?\n \t\t\t\tMLX5_HW_VLAN_PUSH_PCP_IDX :\n \t\t\t\tMLX5_HW_VLAN_PUSH_VID_IDX;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n@@ -3371,9 +3811,23 @@ flow_hw_actions_validate(struct rte_eth_dev *dev,\n \t\t\t\t\t\t  \"action not supported in template API\");\n \t\t}\n \t}\n+\tif (act_flags != NULL)\n+\t\t*act_flags = action_flags;\n \treturn 0;\n }\n \n+static int\n+flow_hw_actions_validate(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_actions_template_attr *attr,\n+\t\t\t const struct rte_flow_action actions[],\n+\t\t\t const struct rte_flow_action masks[],\n+\t\t\t struct rte_flow_error *error)\n+{\n+\treturn mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL,\n+\t\t\t\t\t     error);\n+}\n+\n+\n static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {\n \t[RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,\n \t[RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,\n@@ -3386,7 +3840,6 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {\n \t[RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_TNL_L2_TO_L2,\n \t[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,\n \t[RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,\n-\t[RTE_FLOW_ACTION_TYPE_COUNT] = MLX5DR_ACTION_TYP_CTR,\n \t[RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,\n \t[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,\n \t[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,\n@@ -3396,7 +3849,7 @@ static int\n flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n \t\t\t\t\t  unsigned int action_src,\n \t\t\t\t\t  enum mlx5dr_action_type *action_types,\n-\t\t\t\t\t  uint16_t *curr_off,\n+\t\t\t\t\t  uint16_t *curr_off, uint16_t *cnt_off,\n \t\t\t\t\t  struct rte_flow_actions_template *at)\n {\n \tuint32_t type;\n@@ -3413,10 +3866,18 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,\n \t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;\n \t\t*curr_off = *curr_off + 1;\n \t\tbreak;\n+\tcase RTE_FLOW_ACTION_TYPE_AGE:\n \tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\tat->actions_off[action_src] = *curr_off;\n-\t\taction_types[*curr_off] = MLX5DR_ACTION_TYP_CTR;\n-\t\t*curr_off = *curr_off + 1;\n+\t\t/*\n+\t\t * Both AGE and COUNT action need counter, the first one fills\n+\t\t * the action_types array, and the second only saves the offset.\n+\t\t */\n+\t\tif (*cnt_off == UINT16_MAX) {\n+\t\t\t*cnt_off = *curr_off;\n+\t\t\taction_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;\n+\t\t\t*curr_off = *curr_off + 1;\n+\t\t}\n+\t\tat->actions_off[action_src] = *cnt_off;\n \t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n \t\tat->actions_off[action_src] = *curr_off;\n@@ -3455,6 +3916,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \tenum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_TNL_L2_TO_L2;\n \tuint16_t reformat_off = UINT16_MAX;\n \tuint16_t mhdr_off = UINT16_MAX;\n+\tuint16_t cnt_off = UINT16_MAX;\n \tint ret;\n \tfor (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {\n \t\tconst struct rte_flow_action_raw_encap *raw_encap_data;\n@@ -3467,9 +3929,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n-\t\t\tret = flow_hw_dr_actions_template_handle_shared(&at->masks[i], i,\n-\t\t\t\t\t\t\t\t\taction_types,\n-\t\t\t\t\t\t\t\t\t&curr_off, at);\n+\t\t\tret = flow_hw_dr_actions_template_handle_shared\n+\t\t\t\t\t\t\t\t (&at->masks[i],\n+\t\t\t\t\t\t\t\t  i,\n+\t\t\t\t\t\t\t\t  action_types,\n+\t\t\t\t\t\t\t\t  &curr_off,\n+\t\t\t\t\t\t\t\t  &cnt_off, at);\n \t\t\tif (ret)\n \t\t\t\treturn NULL;\n \t\t\tbreak;\n@@ -3525,6 +3990,19 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)\n \t\t\tif (curr_off >= MLX5_HW_MAX_ACTS)\n \t\t\t\tgoto err_actions_num;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\t\t/*\n+\t\t\t * Both AGE and COUNT action need counter, the first\n+\t\t\t * one fills the action_types array, and the second only\n+\t\t\t * saves the offset.\n+\t\t\t */\n+\t\t\tif (cnt_off == UINT16_MAX) {\n+\t\t\t\tcnt_off = curr_off++;\n+\t\t\t\taction_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;\n+\t\t\t}\n+\t\t\tat->actions_off[i] = cnt_off;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\ttype = mlx5_hw_dr_action_types[at->actions[i].type];\n \t\t\tat->actions_off[i] = curr_off;\n@@ -3665,6 +4143,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \tunsigned int i;\n \tstruct rte_flow_actions_template *at = NULL;\n \tuint16_t pos = UINT16_MAX;\n+\tuint64_t action_flags = 0;\n \tstruct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];\n \tstruct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];\n \tstruct rte_flow_action *ra = (void *)(uintptr_t)actions;\n@@ -3707,22 +4186,9 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t.conf = &rx_mreg_mask,\n \t};\n \n-\tif (flow_hw_actions_validate(dev, attr, actions, masks, error))\n+\tif (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,\n+\t\t\t\t\t  &action_flags, error))\n \t\treturn NULL;\n-\tif (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&\n-\t    priv->sh->config.dv_esw_en) {\n-\t\t/* Application should make sure only one Q/RSS exist in one rule. */\n-\t\tif (flow_hw_action_meta_copy_insert(actions, masks, &rx_cpy, &rx_cpy_mask,\n-\t\t\t\t\t\t    tmp_action, tmp_mask, &pos)) {\n-\t\t\trte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n-\t\t\t\t\t   \"Failed to concatenate new action/mask\");\n-\t\t\treturn NULL;\n-\t\t} else if (pos != UINT16_MAX) {\n-\t\t\tra = tmp_action;\n-\t\t\trm = tmp_mask;\n-\t\t}\n-\t}\n \tfor (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {\n \t\tswitch (ra[i].type) {\n \t\t/* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */\n@@ -3748,6 +4214,29 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL, \"Too many actions\");\n \t\treturn NULL;\n \t}\n+\tif (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&\n+\t    priv->sh->config.dv_esw_en &&\n+\t    (action_flags &\n+\t     (RTE_FLOW_ACTION_TYPE_QUEUE | RTE_FLOW_ACTION_TYPE_RSS))) {\n+\t\t/* Insert META copy */\n+\t\tif (act_num + 1 > MLX5_HW_MAX_ACTS) {\n+\t\t\trte_flow_error_set(error, E2BIG,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   NULL, \"cannot expand: too many actions\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\t/* Application should make sure only one Q/RSS exist in one rule. */\n+\t\tpos = flow_hw_template_expand_modify_field(actions, masks,\n+\t\t\t\t\t\t\t   &rx_cpy,\n+\t\t\t\t\t\t\t   &rx_cpy_mask,\n+\t\t\t\t\t\t\t   tmp_action, tmp_mask,\n+\t\t\t\t\t\t\t   action_flags,\n+\t\t\t\t\t\t\t   act_num);\n+\t\tra = tmp_action;\n+\t\trm = tmp_mask;\n+\t\tact_num++;\n+\t\taction_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;\n+\t}\n \tif (set_vlan_vid_ix != -1) {\n \t\t/* If temporary action buffer was not used, copy template actions to it */\n \t\tif (ra == actions && rm == masks) {\n@@ -3818,6 +4307,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \tat->tmpl = flow_hw_dr_actions_template_create(at);\n \tif (!at->tmpl)\n \t\tgoto error;\n+\tat->action_flags = action_flags;\n \t__atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);\n \tLIST_INSERT_HEAD(&priv->flow_hw_at, at, next);\n \treturn at;\n@@ -4161,6 +4651,7 @@ flow_hw_info_get(struct rte_eth_dev *dev,\n \t\t struct rte_flow_queue_info *queue_info,\n \t\t struct rte_flow_error *error __rte_unused)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tuint16_t port_id = dev->data->port_id;\n \tstruct rte_mtr_capabilities mtr_cap;\n \tint ret;\n@@ -4177,6 +4668,8 @@ flow_hw_info_get(struct rte_eth_dev *dev,\n \t\tport_info->max_nb_meter_profiles = UINT32_MAX;\n \t\tport_info->max_nb_meter_policies = UINT32_MAX;\n \t}\n+\tport_info->max_nb_counters = priv->sh->hws_max_nb_counters;\n+\tport_info->max_nb_aging_objects = port_info->max_nb_counters;\n \treturn 0;\n }\n \n@@ -5555,8 +6048,6 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t\tgoto err;\n \t\t}\n \t}\n-\tif (_queue_attr)\n-\t\tmlx5_free(_queue_attr);\n \tif (port_attr->nb_conn_tracks) {\n \t\tmem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +\n \t\t\t   sizeof(*priv->ct_mng);\n@@ -5573,13 +6064,35 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \tif (port_attr->nb_counters) {\n \t\tpriv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,\n-\t\t\t\tnb_queue);\n+\t\t\t\t\t\t\t   nb_queue);\n \t\tif (priv->hws_cpool == NULL)\n \t\t\tgoto err;\n \t}\n+\tif (port_attr->nb_aging_objects) {\n+\t\tif (port_attr->nb_counters == 0) {\n+\t\t\t/*\n+\t\t\t * Aging management uses counter. Number counters\n+\t\t\t * requesting should take into account a counter for\n+\t\t\t * each flow rules containing AGE without counter.\n+\t\t\t */\n+\t\t\tDRV_LOG(ERR, \"Port %u AGE objects are requested (%u) \"\n+\t\t\t\t\"without counters requesting.\",\n+\t\t\t\tdev->data->port_id,\n+\t\t\t\tport_attr->nb_aging_objects);\n+\t\t\trte_errno = EINVAL;\n+\t\t\tgoto err;\n+\t\t}\n+\t\tret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);\n+\t\tif (ret < 0)\n+\t\t\tgoto err;\n+\t}\n \tret = flow_hw_create_vlan(dev);\n \tif (ret)\n \t\tgoto err;\n+\tif (_queue_attr)\n+\t\tmlx5_free(_queue_attr);\n+\tif (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)\n+\t\tpriv->hws_strict_queue = 1;\n \treturn 0;\n err:\n \tif (priv->hws_ctpool) {\n@@ -5590,6 +6103,10 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tflow_hw_ct_mng_destroy(dev, priv->ct_mng);\n \t\tpriv->ct_mng = NULL;\n \t}\n+\tif (priv->hws_age_req)\n+\t\tmlx5_hws_age_pool_destroy(priv);\n+\tif (priv->hws_cpool)\n+\t\tmlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);\n \tflow_hw_free_vport_actions(priv);\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n \t\tif (priv->hw_drop[i])\n@@ -5663,6 +6180,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t\tmlx5_ipool_destroy(priv->acts_ipool);\n \t\tpriv->acts_ipool = NULL;\n \t}\n+\tif (priv->hws_age_req)\n+\t\tmlx5_hws_age_pool_destroy(priv);\n \tif (priv->hws_cpool)\n \t\tmlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);\n \tif (priv->hws_ctpool) {\n@@ -5999,13 +6518,53 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\tMLX5_ACTION_CTX_CT_GEN_IDX(PORT_ID(priv), ct_idx);\n }\n \n+/**\n+ * Validate shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used.\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] conf\n+ *   Indirect action configuration.\n+ * @param[in] action\n+ *   rte_flow action detail.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, otherwise negative errno value.\n+ */\n+static int\n+flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t       const struct rte_flow_op_attr *attr,\n+\t\t\t       const struct rte_flow_indir_action_conf *conf,\n+\t\t\t       const struct rte_flow_action *action,\n+\t\t\t       void *user_data,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tRTE_SET_USED(attr);\n+\tRTE_SET_USED(queue);\n+\tRTE_SET_USED(user_data);\n+\tswitch (action->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_METER_MARK:\n+\t\treturn flow_hw_validate_action_meter_mark(dev, action, error);\n+\tdefault:\n+\t\treturn flow_dv_action_validate(dev, conf, action, error);\n+\t}\n+}\n+\n /**\n  * Create shared action.\n  *\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev structure.\n  * @param[in] queue\n- *   Which queue to be used..\n+ *   Which queue to be used.\n  * @param[in] attr\n  *   Operation attribute.\n  * @param[in] conf\n@@ -6030,16 +6589,32 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n {\n \tstruct rte_flow_action_handle *handle = NULL;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_age *age;\n \tstruct mlx5_aso_mtr *aso_mtr;\n \tcnt_id_t cnt_id;\n \tuint32_t mtr_id;\n+\tuint32_t age_idx;\n \n-\tRTE_SET_USED(queue);\n \tRTE_SET_USED(attr);\n \tRTE_SET_USED(user_data);\n \tswitch (action->type) {\n+\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\tage = action->conf;\n+\t\tage_idx = mlx5_hws_age_action_create(priv, queue, true, age,\n+\t\t\t\t\t\t     0, error);\n+\t\tif (age_idx == 0) {\n+\t\t\trte_flow_error_set(error, ENODEV,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t   \"AGE are not configured!\");\n+\t\t} else {\n+\t\t\tage_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<\n+\t\t\t\t   MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;\n+\t\t\thandle =\n+\t\t\t    (struct rte_flow_action_handle *)(uintptr_t)age_idx;\n+\t\t}\n+\t\tbreak;\n \tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\tif (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id))\n+\t\tif (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))\n \t\t\trte_flow_error_set(error, ENODEV,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\tNULL,\n@@ -6059,8 +6634,13 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\tMLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id);\n \t\thandle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id;\n \t\tbreak;\n-\tdefault:\n+\tcase RTE_FLOW_ACTION_TYPE_RSS:\n \t\thandle = flow_dv_action_create(dev, conf, action, error);\n+\t\tbreak;\n+\tdefault:\n+\t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"action type not supported\");\n+\t\treturn NULL;\n \t}\n \treturn handle;\n }\n@@ -6071,7 +6651,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev structure.\n  * @param[in] queue\n- *   Which queue to be used..\n+ *   Which queue to be used.\n  * @param[in] attr\n  *   Operation attribute.\n  * @param[in] handle\n@@ -6094,7 +6674,6 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t     void *user_data,\n \t\t\t     struct rte_flow_error *error)\n {\n-\tRTE_SET_USED(queue);\n \tRTE_SET_USED(attr);\n \tRTE_SET_USED(user_data);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -6109,6 +6688,8 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n \n \tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n+\t\treturn mlx5_hws_age_action_update(priv, idx, update, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n \t\treturn flow_hw_conntrack_update(dev, queue, update, act_idx, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_METER_MARK:\n@@ -6142,11 +6723,15 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n-\t\treturn 0;\n-\tdefault:\n \t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n+\t\treturn flow_dv_action_update(dev, handle, update, error);\n+\tdefault:\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"action type not supported\");\n \t}\n-\treturn flow_dv_action_update(dev, handle, update, error);\n+\treturn 0;\n }\n \n /**\n@@ -6155,7 +6740,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev structure.\n  * @param[in] queue\n- *   Which queue to be used..\n+ *   Which queue to be used.\n  * @param[in] attr\n  *   Operation attribute.\n  * @param[in] handle\n@@ -6177,6 +6762,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n {\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\tuint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;\n \tuint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_aso_mtr_pool *pool = priv->hws_mpool;\n@@ -6187,7 +6773,16 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \tRTE_SET_USED(attr);\n \tRTE_SET_USED(user_data);\n \tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n+\t\treturn mlx5_hws_age_action_destroy(priv, age_idx, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n+\t\tage_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);\n+\t\tif (age_idx != 0)\n+\t\t\t/*\n+\t\t\t * If this counter belongs to indirect AGE, here is the\n+\t\t\t * time to update the AGE.\n+\t\t\t */\n+\t\t\tmlx5_hws_age_nb_cnt_decrease(priv, age_idx);\n \t\treturn mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n \t\treturn flow_hw_conntrack_destroy(dev, act_idx, error);\n@@ -6212,10 +6807,15 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\tNULL, \"Unable to wait for ASO meter CQE\");\n \t\tmlx5_ipool_free(pool->idx_pool, idx);\n-\t\treturn 0;\n-\tdefault:\n+\t\tbreak;\n+\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n \t\treturn flow_dv_action_destroy(dev, handle, error);\n+\tdefault:\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"action type not supported\");\n \t}\n+\treturn 0;\n }\n \n static int\n@@ -6225,13 +6825,14 @@ flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_hws_cnt *cnt;\n \tstruct rte_flow_query_count *qc = data;\n-\tuint32_t iidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);\n+\tuint32_t iidx;\n \tuint64_t pkts, bytes;\n \n \tif (!mlx5_hws_cnt_id_valid(counter))\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\"counter are not available\");\n+\tiidx = mlx5_hws_cnt_iidx(priv->hws_cpool, counter);\n \tcnt = &priv->hws_cpool->pool[iidx];\n \t__hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);\n \tqc->hits_set = 1;\n@@ -6245,12 +6846,64 @@ flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,\n \treturn 0;\n }\n \n+/**\n+ * Query a flow rule AGE action for aging information.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] age_idx\n+ *   Index of AGE action parameter.\n+ * @param[out] data\n+ *   Data retrieved by the query.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,\n+\t\t  struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);\n+\tstruct rte_flow_query_age *resp = data;\n+\n+\tif (!param || !param->timeout)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"age data not available\");\n+\tswitch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {\n+\tcase HWS_AGE_AGED_OUT_REPORTED:\n+\tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n+\t\tresp->aged = 1;\n+\t\tbreak;\n+\tcase HWS_AGE_CANDIDATE:\n+\tcase HWS_AGE_CANDIDATE_INSIDE_RING:\n+\t\tresp->aged = 0;\n+\t\tbreak;\n+\tcase HWS_AGE_FREE:\n+\t\t/*\n+\t\t * When state is FREE the flow itself should be invalid.\n+\t\t * Fall-through.\n+\t\t */\n+\tdefault:\n+\t\tMLX5_ASSERT(0);\n+\t\tbreak;\n+\t}\n+\tresp->sec_since_last_hit_valid = !resp->aged;\n+\tif (resp->sec_since_last_hit_valid)\n+\t\tresp->sec_since_last_hit = __atomic_load_n\n+\t\t\t\t (&param->sec_since_last_hit, __ATOMIC_RELAXED);\n+\treturn 0;\n+}\n+\n static int\n-flow_hw_query(struct rte_eth_dev *dev,\n-\t      struct rte_flow *flow __rte_unused,\n-\t      const struct rte_flow_action *actions __rte_unused,\n-\t      void *data __rte_unused,\n-\t      struct rte_flow_error *error __rte_unused)\n+flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t      const struct rte_flow_action *actions, void *data,\n+\t      struct rte_flow_error *error)\n {\n \tint ret = -EINVAL;\n \tstruct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;\n@@ -6261,7 +6914,11 @@ flow_hw_query(struct rte_eth_dev *dev,\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\tret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,\n-\t\t\t\t\t\t  error);\n+\t\t\t\t\t\t    error);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\t\tret = flow_hw_query_age(dev, hw_flow->age_idx, data,\n+\t\t\t\t\t\terror);\n \t\t\tbreak;\n \t\tdefault:\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n@@ -6273,6 +6930,32 @@ flow_hw_query(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n+/**\n+ * Validate indirect action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] conf\n+ *   Shared action configuration.\n+ * @param[in] action\n+ *   Action specification used to create indirect action.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. Initialized in case of\n+ *   error only.\n+ *\n+ * @return\n+ *   0 on success, otherwise negative errno value.\n+ */\n+static int\n+flow_hw_action_validate(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_indir_action_conf *conf,\n+\t\t\tconst struct rte_flow_action *action,\n+\t\t\tstruct rte_flow_error *err)\n+{\n+\treturn flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,\n+\t\t\t\t\t      conf, action, NULL, err);\n+}\n+\n /**\n  * Create indirect action.\n  *\n@@ -6296,6 +6979,12 @@ flow_hw_action_create(struct rte_eth_dev *dev,\n \t\t       const struct rte_flow_action *action,\n \t\t       struct rte_flow_error *err)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (priv->hws_strict_queue)\n+\t\tDRV_LOG(WARNING,\n+\t\t\t\"port %u create indirect action called in strict queue mode.\",\n+\t\t\tdev->data->port_id);\n \treturn flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,\n \t\t\t\t\t    NULL, conf, action, NULL, err);\n }\n@@ -6362,17 +7051,118 @@ flow_hw_action_query(struct rte_eth_dev *dev,\n {\n \tuint32_t act_idx = (uint32_t)(uintptr_t)handle;\n \tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\tuint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;\n \n \tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_AGE:\n+\t\treturn flow_hw_query_age(dev, age_idx, data, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_COUNT:\n \t\treturn flow_hw_query_counter(dev, act_idx, data, error);\n \tcase MLX5_INDIRECT_ACTION_TYPE_CT:\n \t\treturn flow_hw_conntrack_query(dev, act_idx, data, error);\n-\tdefault:\n+\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n \t\treturn flow_dv_action_query(dev, handle, data, error);\n+\tdefault:\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t  \"action type not supported\");\n \t}\n }\n \n+/**\n+ * Get aged-out flows of a given port on the given HWS flow queue.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] queue_id\n+ *   Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.\n+ * @param[in, out] contexts\n+ *   The address of an array of pointers to the aged-out flows contexts.\n+ * @param[in] nb_contexts\n+ *   The length of context array pointers.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. Initialized in case of\n+ *   error only.\n+ *\n+ * @return\n+ *   if nb_contexts is 0, return the amount of all aged contexts.\n+ *   if nb_contexts is not 0 , return the amount of aged flows reported\n+ *   in the context array, otherwise negative errno value.\n+ */\n+static int\n+flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,\n+\t\t\t void **contexts, uint32_t nb_contexts,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct rte_ring *r;\n+\tint nb_flows = 0;\n+\n+\tif (nb_contexts && !contexts)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL, \"empty context\");\n+\tif (priv->hws_strict_queue) {\n+\t\tif (queue_id >= age_info->hw_q_age->nb_rings)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\tNULL, \"invalid queue id\");\n+\t\tr = age_info->hw_q_age->aged_lists[queue_id];\n+\t} else {\n+\t\tr = age_info->hw_age.aged_list;\n+\t\tMLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);\n+\t}\n+\tif (nb_contexts == 0)\n+\t\treturn rte_ring_count(r);\n+\twhile ((uint32_t)nb_flows < nb_contexts) {\n+\t\tuint32_t age_idx;\n+\n+\t\tif (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)\n+\t\t\tbreak;\n+\t\t/* get the AGE context if the aged-out index is still valid. */\n+\t\tcontexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);\n+\t\tif (!contexts[nb_flows])\n+\t\t\tcontinue;\n+\t\tnb_flows++;\n+\t}\n+\treturn nb_flows;\n+}\n+\n+/**\n+ * Get aged-out flows.\n+ *\n+ * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the Ethernet device structure.\n+ * @param[in] contexts\n+ *   The address of an array of pointers to the aged-out flows contexts.\n+ * @param[in] nb_contexts\n+ *   The length of context array pointers.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL. Initialized in case of\n+ *   error only.\n+ *\n+ * @return\n+ *   how many contexts get in success, otherwise negative errno value.\n+ *   if nb_contexts is 0, return the amount of all aged contexts.\n+ *   if nb_contexts is not 0 , return the amount of aged flows reported\n+ *   in the context array.\n+ */\n+static int\n+flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,\n+\t\t       uint32_t nb_contexts, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (priv->hws_strict_queue)\n+\t\tDRV_LOG(WARNING,\n+\t\t\t\"port %u get aged flows called in strict queue mode.\",\n+\t\t\tdev->data->port_id);\n+\treturn flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);\n+}\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -6391,12 +7181,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.async_action_create = flow_hw_action_handle_create,\n \t.async_action_destroy = flow_hw_action_handle_destroy,\n \t.async_action_update = flow_hw_action_handle_update,\n-\t.action_validate = flow_dv_action_validate,\n+\t.action_validate = flow_hw_action_validate,\n \t.action_create = flow_hw_action_create,\n \t.action_destroy = flow_hw_action_destroy,\n \t.action_update = flow_hw_action_update,\n \t.action_query = flow_hw_action_query,\n \t.query = flow_hw_query,\n+\t.get_aged_flows = flow_hw_get_aged_flows,\n+\t.get_q_aged_flows = flow_hw_get_q_aged_flows,\n };\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex 7ffaf4c227..81a33ddf09 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -122,7 +122,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,\n \t\t\t      struct mlx5_flow_counter_pool **ppool)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tstruct mlx5_flow_counter_pool *pool;\n \n \tidx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);\n@@ -215,7 +215,7 @@ static uint32_t\n flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;\n+\tstruct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;\n \tstruct mlx5_flow_counter_pool *pool = NULL;\n \tstruct mlx5_flow_counter *cnt = NULL;\n \tuint32_t n_valid = cmng->n_valid;\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c\nindex e2408ef36d..cd606dc20f 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.c\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.c\n@@ -8,6 +8,7 @@\n #include <rte_ring.h>\n #include <mlx5_devx_cmds.h>\n #include <rte_cycles.h>\n+#include <rte_eal_paging.h>\n \n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \n@@ -26,8 +27,8 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)\n \tuint32_t preload;\n \tuint32_t q_num = cpool->cache->q_num;\n \tuint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool);\n-\tcnt_id_t cnt_id, iidx = 0;\n-\tuint32_t qidx;\n+\tcnt_id_t cnt_id;\n+\tuint32_t qidx, iidx = 0;\n \tstruct rte_ring *qcache = NULL;\n \n \t/*\n@@ -86,6 +87,174 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,\n \t} while (reset_cnt_num > 0);\n }\n \n+/**\n+ * Release AGE parameter.\n+ *\n+ * @param priv\n+ *   Pointer to the port private data structure.\n+ * @param own_cnt_index\n+ *   Counter ID to created only for this AGE to release.\n+ *   Zero means there is no such counter.\n+ * @param age_ipool\n+ *   Pointer to AGE parameter indexed pool.\n+ * @param idx\n+ *   Index of AGE parameter in the indexed pool.\n+ */\n+static void\n+mlx5_hws_age_param_free(struct mlx5_priv *priv, cnt_id_t own_cnt_index,\n+\t\t\tstruct mlx5_indexed_pool *age_ipool, uint32_t idx)\n+{\n+\tif (own_cnt_index) {\n+\t\tstruct mlx5_hws_cnt_pool *cpool = priv->hws_cpool;\n+\n+\t\tMLX5_ASSERT(mlx5_hws_cnt_is_shared(cpool, own_cnt_index));\n+\t\tmlx5_hws_cnt_shared_put(cpool, &own_cnt_index);\n+\t}\n+\tmlx5_ipool_free(age_ipool, idx);\n+}\n+\n+/**\n+ * Check and callback event for new aged flow in the HWS counter pool.\n+ *\n+ * @param[in] priv\n+ *   Pointer to port private object.\n+ * @param[in] cpool\n+ *   Pointer to current counter pool.\n+ */\n+static void\n+mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct flow_counter_stats *stats = cpool->raw_mng->raw;\n+\tstruct mlx5_hws_age_param *param;\n+\tstruct rte_ring *r;\n+\tconst uint64_t curr_time = MLX5_CURR_TIME_SEC;\n+\tconst uint32_t time_delta = curr_time - cpool->time_of_last_age_check;\n+\tuint32_t nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(cpool);\n+\tuint16_t expected1 = HWS_AGE_CANDIDATE;\n+\tuint16_t expected2 = HWS_AGE_CANDIDATE_INSIDE_RING;\n+\tuint32_t i;\n+\n+\tcpool->time_of_last_age_check = curr_time;\n+\tfor (i = 0; i < nb_alloc_cnts; ++i) {\n+\t\tuint32_t age_idx = cpool->pool[i].age_idx;\n+\t\tuint64_t hits;\n+\n+\t\tif (!cpool->pool[i].in_used || age_idx == 0)\n+\t\t\tcontinue;\n+\t\tparam = mlx5_ipool_get(age_info->ages_ipool, age_idx);\n+\t\tif (unlikely(param == NULL)) {\n+\t\t\t/*\n+\t\t\t * When AGE which used indirect counter it is user\n+\t\t\t * responsibility not using this indirect counter\n+\t\t\t * without this AGE.\n+\t\t\t * If this counter is used after the AGE was freed, the\n+\t\t\t * AGE index is invalid and using it here will cause a\n+\t\t\t * segmentation fault.\n+\t\t\t */\n+\t\t\tDRV_LOG(WARNING,\n+\t\t\t\t\"Counter %u is lost his AGE, it is unused.\", i);\n+\t\t\tcontinue;\n+\t\t}\n+\t\tif (param->timeout == 0)\n+\t\t\tcontinue;\n+\t\tswitch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {\n+\t\tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n+\t\tcase HWS_AGE_AGED_OUT_REPORTED:\n+\t\t\t/* Already aged-out, no action is needed. */\n+\t\t\tcontinue;\n+\t\tcase HWS_AGE_CANDIDATE:\n+\t\tcase HWS_AGE_CANDIDATE_INSIDE_RING:\n+\t\t\t/* This AGE candidate to be aged-out, go to checking. */\n+\t\t\tbreak;\n+\t\tcase HWS_AGE_FREE:\n+\t\t\t/*\n+\t\t\t * AGE parameter with state \"FREE\" couldn't be pointed\n+\t\t\t * by any counter since counter is destroyed first.\n+\t\t\t * Fall-through.\n+\t\t\t */\n+\t\tdefault:\n+\t\t\tMLX5_ASSERT(0);\n+\t\t\tcontinue;\n+\t\t}\n+\t\thits = rte_be_to_cpu_64(stats[i].hits);\n+\t\tif (param->nb_cnts == 1) {\n+\t\t\tif (stats[i].hits != param->accumulator_last_hits) {\n+\t\t\t\t__atomic_store_n(&param->sec_since_last_hit, 0,\n+\t\t\t\t\t\t __ATOMIC_RELAXED);\n+\t\t\t\tparam->accumulator_last_hits = hits;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tparam->accumulator_hits += hits;\n+\t\t\tparam->accumulator_cnt++;\n+\t\t\tif (param->accumulator_cnt < param->nb_cnts)\n+\t\t\t\tcontinue;\n+\t\t\tparam->accumulator_cnt = 0;\n+\t\t\tif (param->accumulator_last_hits !=\n+\t\t\t\t\t\tparam->accumulator_hits) {\n+\t\t\t\t__atomic_store_n(&param->sec_since_last_hit,\n+\t\t\t\t\t\t 0, __ATOMIC_RELAXED);\n+\t\t\t\tparam->accumulator_last_hits =\n+\t\t\t\t\t\t\tparam->accumulator_hits;\n+\t\t\t\tparam->accumulator_hits = 0;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tparam->accumulator_hits = 0;\n+\t\t}\n+\t\tif (__atomic_add_fetch(&param->sec_since_last_hit, time_delta,\n+\t\t\t\t       __ATOMIC_RELAXED) <=\n+\t\t   __atomic_load_n(&param->timeout, __ATOMIC_RELAXED))\n+\t\t\tcontinue;\n+\t\t/* Prepare the relevant ring for this AGE parameter */\n+\t\tif (priv->hws_strict_queue)\n+\t\t\tr = age_info->hw_q_age->aged_lists[param->queue_id];\n+\t\telse\n+\t\t\tr = age_info->hw_age.aged_list;\n+\t\t/* Changing the state atomically and insert it into the ring. */\n+\t\tif (__atomic_compare_exchange_n(&param->state, &expected1,\n+\t\t\t\t\t\tHWS_AGE_AGED_OUT_NOT_REPORTED,\n+\t\t\t\t\t\tfalse, __ATOMIC_RELAXED,\n+\t\t\t\t\t\t__ATOMIC_RELAXED)) {\n+\t\t\tint ret = rte_ring_enqueue_burst_elem(r, &age_idx,\n+\t\t\t\t\t\t\t      sizeof(uint32_t),\n+\t\t\t\t\t\t\t      1, NULL);\n+\n+\t\t\t/*\n+\t\t\t * The ring doesn't have enough room for this entry,\n+\t\t\t * it replace back the state for the next second.\n+\t\t\t *\n+\t\t\t * FIXME: if until next sec it get traffic, we are going\n+\t\t\t *        to lose this \"aged out\", will be fixed later\n+\t\t\t *        when optimise it to fill ring in bulks.\n+\t\t\t */\n+\t\t\texpected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;\n+\t\t\tif (ret < 0 &&\n+\t\t\t    !__atomic_compare_exchange_n(&param->state,\n+\t\t\t\t\t\t\t &expected2, expected1,\n+\t\t\t\t\t\t\t false,\n+\t\t\t\t\t\t\t __ATOMIC_RELAXED,\n+\t\t\t\t\t\t\t __ATOMIC_RELAXED) &&\n+\t\t\t    expected2 == HWS_AGE_FREE)\n+\t\t\t\tmlx5_hws_age_param_free(priv,\n+\t\t\t\t\t\t\tparam->own_cnt_index,\n+\t\t\t\t\t\t\tage_info->ages_ipool,\n+\t\t\t\t\t\t\tage_idx);\n+\t\t\t/* The event is irrelevant in strict queue mode. */\n+\t\t\tif (!priv->hws_strict_queue)\n+\t\t\t\tMLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);\n+\t\t} else {\n+\t\t\t__atomic_compare_exchange_n(&param->state, &expected2,\n+\t\t\t\t\t\t  HWS_AGE_AGED_OUT_NOT_REPORTED,\n+\t\t\t\t\t\t  false, __ATOMIC_RELAXED,\n+\t\t\t\t\t\t  __ATOMIC_RELAXED);\n+\t\t}\n+\t}\n+\t/* The event is irrelevant in strict queue mode. */\n+\tif (!priv->hws_strict_queue)\n+\t\tmlx5_age_event_prepare(priv->sh);\n+}\n+\n static void\n mlx5_hws_cnt_raw_data_free(struct mlx5_dev_ctx_shared *sh,\n \t\t\t   struct mlx5_hws_cnt_raw_data_mng *mng)\n@@ -104,12 +273,14 @@ mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n)\n \tstruct mlx5_hws_cnt_raw_data_mng *mng = NULL;\n \tint ret;\n \tsize_t sz = n * sizeof(struct flow_counter_stats);\n+\tsize_t pgsz = rte_mem_page_size();\n \n+\tMLX5_ASSERT(pgsz > 0);\n \tmng = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*mng), 0,\n \t\t\tSOCKET_ID_ANY);\n \tif (mng == NULL)\n \t\tgoto error;\n-\tmng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, 0,\n+\tmng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, pgsz,\n \t\t\tSOCKET_ID_ANY);\n \tif (mng->raw == NULL)\n \t\tgoto error;\n@@ -146,6 +317,9 @@ mlx5_hws_cnt_svc(void *opaque)\n \t\t\t    opriv->sh == sh &&\n \t\t\t    opriv->hws_cpool != NULL) {\n \t\t\t\t__mlx5_hws_cnt_svc(sh, opriv->hws_cpool);\n+\t\t\t\tif (opriv->hws_age_req)\n+\t\t\t\t\tmlx5_hws_aging_check(opriv,\n+\t\t\t\t\t\t\t     opriv->hws_cpool);\n \t\t\t}\n \t\t}\n \t\tquery_cycle = rte_rdtsc() - start_cycle;\n@@ -158,8 +332,9 @@ mlx5_hws_cnt_svc(void *opaque)\n }\n \n struct mlx5_hws_cnt_pool *\n-mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n-\t\tconst struct mlx5_hws_cache_param *ccfg)\n+mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n+\t\t       const struct mlx5_hws_cnt_pool_cfg *pcfg,\n+\t\t       const struct mlx5_hws_cache_param *ccfg)\n {\n \tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \tstruct mlx5_hws_cnt_pool *cntp;\n@@ -185,16 +360,26 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n \tcntp->cache->preload_sz = ccfg->preload_sz;\n \tcntp->cache->threshold = ccfg->threshold;\n \tcntp->cache->q_num = ccfg->q_num;\n+\tif (pcfg->request_num > sh->hws_max_nb_counters) {\n+\t\tDRV_LOG(ERR, \"Counter number %u \"\n+\t\t\t\"is greater than the maximum supported (%u).\",\n+\t\t\tpcfg->request_num, sh->hws_max_nb_counters);\n+\t\tgoto error;\n+\t}\n \tcnt_num = pcfg->request_num * (100 + pcfg->alloc_factor) / 100;\n \tif (cnt_num > UINT32_MAX) {\n \t\tDRV_LOG(ERR, \"counter number %\"PRIu64\" is out of 32bit range\",\n \t\t\tcnt_num);\n \t\tgoto error;\n \t}\n+\t/*\n+\t * When counter request number is supported, but the factor takes it\n+\t * out of size, the factor is reduced.\n+\t */\n+\tcnt_num = RTE_MIN((uint32_t)cnt_num, sh->hws_max_nb_counters);\n \tcntp->pool = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,\n-\t\t\tsizeof(struct mlx5_hws_cnt) *\n-\t\t\tpcfg->request_num * (100 + pcfg->alloc_factor) / 100,\n-\t\t\t0, SOCKET_ID_ANY);\n+\t\t\t\t sizeof(struct mlx5_hws_cnt) * cnt_num,\n+\t\t\t\t 0, SOCKET_ID_ANY);\n \tif (cntp->pool == NULL)\n \t\tgoto error;\n \tsnprintf(mz_name, sizeof(mz_name), \"%s_F_RING\", pcfg->name);\n@@ -231,6 +416,8 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n \t\tif (cntp->cache->qcache[qidx] == NULL)\n \t\t\tgoto error;\n \t}\n+\t/* Initialize the time for aging-out calculation. */\n+\tcntp->time_of_last_age_check = MLX5_CURR_TIME_SEC;\n \treturn cntp;\n error:\n \tmlx5_hws_cnt_pool_deinit(cntp);\n@@ -297,19 +484,17 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,\n \t\t\t    struct mlx5_hws_cnt_pool *cpool)\n {\n \tstruct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;\n-\tuint32_t max_log_bulk_sz = 0;\n+\tuint32_t max_log_bulk_sz = sh->hws_max_log_bulk_sz;\n \tuint32_t log_bulk_sz;\n-\tuint32_t idx, alloced = 0;\n+\tuint32_t idx, alloc_candidate, alloced = 0;\n \tunsigned int cnt_num = mlx5_hws_cnt_pool_get_size(cpool);\n \tstruct mlx5_devx_counter_attr attr = {0};\n \tstruct mlx5_devx_obj *dcs;\n \n \tif (hca_attr->flow_counter_bulk_log_max_alloc == 0) {\n-\t\tDRV_LOG(ERR,\n-\t\t\t\"Fw doesn't support bulk log max alloc\");\n+\t\tDRV_LOG(ERR, \"Fw doesn't support bulk log max alloc\");\n \t\treturn -1;\n \t}\n-\tmax_log_bulk_sz = 23; /* hard code to 8M (1 << 23). */\n \tcnt_num = RTE_ALIGN_CEIL(cnt_num, 4); /* minimal 4 counter in bulk. */\n \tlog_bulk_sz = RTE_MIN(max_log_bulk_sz, rte_log2_u32(cnt_num));\n \tattr.pd = sh->cdev->pdn;\n@@ -327,18 +512,23 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,\n \tcpool->dcs_mng.dcs[0].iidx = 0;\n \talloced = cpool->dcs_mng.dcs[0].batch_sz;\n \tif (cnt_num > cpool->dcs_mng.dcs[0].batch_sz) {\n-\t\tfor (; idx < MLX5_HWS_CNT_DCS_NUM; idx++) {\n+\t\twhile (idx < MLX5_HWS_CNT_DCS_NUM) {\n \t\t\tattr.flow_counter_bulk_log_size = --max_log_bulk_sz;\n+\t\t\talloc_candidate = RTE_BIT32(max_log_bulk_sz);\n+\t\t\tif (alloced + alloc_candidate > sh->hws_max_nb_counters)\n+\t\t\t\tcontinue;\n \t\t\tdcs = mlx5_devx_cmd_flow_counter_alloc_general\n \t\t\t\t(sh->cdev->ctx, &attr);\n \t\t\tif (dcs == NULL)\n \t\t\t\tgoto error;\n \t\t\tcpool->dcs_mng.dcs[idx].obj = dcs;\n-\t\t\tcpool->dcs_mng.dcs[idx].batch_sz =\n-\t\t\t\t(1 << max_log_bulk_sz);\n+\t\t\tcpool->dcs_mng.dcs[idx].batch_sz = alloc_candidate;\n \t\t\tcpool->dcs_mng.dcs[idx].iidx = alloced;\n \t\t\talloced += cpool->dcs_mng.dcs[idx].batch_sz;\n \t\t\tcpool->dcs_mng.batch_total++;\n+\t\t\tif (alloced >= cnt_num)\n+\t\t\t\tbreak;\n+\t\t\tidx++;\n \t\t}\n \t}\n \treturn 0;\n@@ -445,7 +635,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,\n \t\t\tdev->data->port_id);\n \tpcfg.name = mp_name;\n \tpcfg.request_num = pattr->nb_counters;\n-\tcpool = mlx5_hws_cnt_pool_init(&pcfg, &cparam);\n+\tcpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam);\n \tif (cpool == NULL)\n \t\tgoto error;\n \tret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool);\n@@ -525,4 +715,484 @@ mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh)\n \tsh->cnt_svc = NULL;\n }\n \n+/**\n+ * Destroy AGE action.\n+ *\n+ * @param priv\n+ *   Pointer to the port private data structure.\n+ * @param idx\n+ *   Index of AGE parameter.\n+ * @param error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);\n+\n+\tif (param == NULL)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"invalid AGE parameter index\");\n+\tswitch (__atomic_exchange_n(&param->state, HWS_AGE_FREE,\n+\t\t\t\t    __ATOMIC_RELAXED)) {\n+\tcase HWS_AGE_CANDIDATE:\n+\tcase HWS_AGE_AGED_OUT_REPORTED:\n+\t\tmlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);\n+\t\tbreak;\n+\tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n+\tcase HWS_AGE_CANDIDATE_INSIDE_RING:\n+\t\t/*\n+\t\t * In both cases AGE is inside the ring. Change the state here\n+\t\t * and destroy it later when it is taken out of ring.\n+\t\t */\n+\t\tbreak;\n+\tcase HWS_AGE_FREE:\n+\t\t/*\n+\t\t * If index is valid and state is FREE, it says this AGE has\n+\t\t * been freed for the user but not for the PMD since it is\n+\t\t * inside the ring.\n+\t\t */\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"this AGE has already been released\");\n+\tdefault:\n+\t\tMLX5_ASSERT(0);\n+\t\tbreak;\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Create AGE action parameter.\n+ *\n+ * @param[in] priv\n+ *   Pointer to the port private data structure.\n+ * @param[in] queue_id\n+ *   Which HWS queue to be used.\n+ * @param[in] shared\n+ *   Whether it indirect AGE action.\n+ * @param[in] flow_idx\n+ *   Flow index from indexed pool.\n+ *   For indirect AGE action it doesn't affect.\n+ * @param[in] age\n+ *   Pointer to the aging action configuration.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   Index to AGE action parameter on success, 0 otherwise.\n+ */\n+uint32_t\n+mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,\n+\t\t\t   bool shared, const struct rte_flow_action_age *age,\n+\t\t\t   uint32_t flow_idx, struct rte_flow_error *error)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param;\n+\tuint32_t age_idx;\n+\n+\tparam = mlx5_ipool_malloc(ipool, &age_idx);\n+\tif (param == NULL) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"cannot allocate AGE parameter\");\n+\t\treturn 0;\n+\t}\n+\tMLX5_ASSERT(__atomic_load_n(&param->state,\n+\t\t\t\t    __ATOMIC_RELAXED) == HWS_AGE_FREE);\n+\tif (shared) {\n+\t\tparam->nb_cnts = 0;\n+\t\tparam->accumulator_hits = 0;\n+\t\tparam->accumulator_cnt = 0;\n+\t\tflow_idx = age_idx;\n+\t} else {\n+\t\tparam->nb_cnts = 1;\n+\t}\n+\tparam->context = age->context ? age->context :\n+\t\t\t\t\t(void *)(uintptr_t)flow_idx;\n+\tparam->timeout = age->timeout;\n+\tparam->queue_id = queue_id;\n+\tparam->accumulator_last_hits = 0;\n+\tparam->own_cnt_index = 0;\n+\tparam->sec_since_last_hit = 0;\n+\tparam->state = HWS_AGE_CANDIDATE;\n+\treturn age_idx;\n+}\n+\n+/**\n+ * Update indirect AGE action parameter.\n+ *\n+ * @param[in] priv\n+ *   Pointer to the port private data structure.\n+ * @param[in] idx\n+ *   Index of AGE parameter.\n+ * @param[in] update\n+ *   Update value.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,\n+\t\t\t   const void *update, struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_update_age *update_ade = update;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);\n+\tbool sec_since_last_hit_reset = false;\n+\tbool state_update = false;\n+\n+\tif (param == NULL)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"invalid AGE parameter index\");\n+\tif (update_ade->timeout_valid) {\n+\t\tuint32_t old_timeout = __atomic_exchange_n(&param->timeout,\n+\t\t\t\t\t\t\t   update_ade->timeout,\n+\t\t\t\t\t\t\t   __ATOMIC_RELAXED);\n+\n+\t\tif (old_timeout == 0)\n+\t\t\tsec_since_last_hit_reset = true;\n+\t\telse if (old_timeout < update_ade->timeout ||\n+\t\t\t update_ade->timeout == 0)\n+\t\t\t/*\n+\t\t\t * When timeout is increased, aged-out flows might be\n+\t\t\t * active again and state should be updated accordingly.\n+\t\t\t * When new timeout is 0, we update the state for not\n+\t\t\t * reporting aged-out stopped.\n+\t\t\t */\n+\t\t\tstate_update = true;\n+\t}\n+\tif (update_ade->touch) {\n+\t\tsec_since_last_hit_reset = true;\n+\t\tstate_update = true;\n+\t}\n+\tif (sec_since_last_hit_reset)\n+\t\t__atomic_store_n(&param->sec_since_last_hit, 0,\n+\t\t\t\t __ATOMIC_RELAXED);\n+\tif (state_update) {\n+\t\tuint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;\n+\n+\t\t/*\n+\t\t * Change states of aged-out flows to active:\n+\t\t *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING\n+\t\t *  - AGED_OUT_REPORTED -> CANDIDATE\n+\t\t */\n+\t\tif (!__atomic_compare_exchange_n(&param->state, &expected,\n+\t\t\t\t\t\t HWS_AGE_CANDIDATE_INSIDE_RING,\n+\t\t\t\t\t\t false, __ATOMIC_RELAXED,\n+\t\t\t\t\t\t __ATOMIC_RELAXED) &&\n+\t\t    expected == HWS_AGE_AGED_OUT_REPORTED)\n+\t\t\t__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,\n+\t\t\t\t\t __ATOMIC_RELAXED);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Get the AGE context if the aged-out index is still valid.\n+ *\n+ * @param priv\n+ *   Pointer to the port private data structure.\n+ * @param idx\n+ *   Index of AGE parameter.\n+ *\n+ * @return\n+ *   AGE context if the index is still aged-out, NULL otherwise.\n+ */\n+void *\n+mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);\n+\tuint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;\n+\n+\tMLX5_ASSERT(param != NULL);\n+\tif (__atomic_compare_exchange_n(&param->state, &expected,\n+\t\t\t\t\tHWS_AGE_AGED_OUT_REPORTED, false,\n+\t\t\t\t\t__ATOMIC_RELAXED, __ATOMIC_RELAXED))\n+\t\treturn param->context;\n+\tswitch (expected) {\n+\tcase HWS_AGE_FREE:\n+\t\t/*\n+\t\t * This AGE couldn't have been destroyed since it was inside\n+\t\t * the ring. Its state has updated, and now it is actually\n+\t\t * destroyed.\n+\t\t */\n+\t\tmlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);\n+\t\tbreak;\n+\tcase HWS_AGE_CANDIDATE_INSIDE_RING:\n+\t\t__atomic_store_n(&param->state, HWS_AGE_CANDIDATE,\n+\t\t\t\t __ATOMIC_RELAXED);\n+\t\tbreak;\n+\tcase HWS_AGE_CANDIDATE:\n+\t\t/*\n+\t\t * Only BG thread pushes to ring and it never pushes this state.\n+\t\t * When AGE inside the ring becomes candidate, it has a special\n+\t\t * state called HWS_AGE_CANDIDATE_INSIDE_RING.\n+\t\t * Fall-through.\n+\t\t */\n+\tcase HWS_AGE_AGED_OUT_REPORTED:\n+\t\t/*\n+\t\t * Only this thread (doing query) may write this state, and it\n+\t\t * happens only after the query thread takes it out of the ring.\n+\t\t * Fall-through.\n+\t\t */\n+\tcase HWS_AGE_AGED_OUT_NOT_REPORTED:\n+\t\t/*\n+\t\t * In this case the compare return true and function return\n+\t\t * the context immediately.\n+\t\t * Fall-through.\n+\t\t */\n+\tdefault:\n+\t\tMLX5_ASSERT(0);\n+\t\tbreak;\n+\t}\n+\treturn NULL;\n+}\n+\n+#ifdef RTE_ARCH_64\n+#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX UINT32_MAX\n+#else\n+#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX RTE_BIT32(8)\n+#endif\n+\n+/**\n+ * Get the size of aged out ring list for each queue.\n+ *\n+ * The size is one percent of nb_counters divided by nb_queues.\n+ * The ring size must be power of 2, so it align up to power of 2.\n+ * In 32 bit systems, the size is limited by 256.\n+ *\n+ * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is on.\n+ *\n+ * @param nb_counters\n+ *   Final number of allocated counter in the pool.\n+ * @param nb_queues\n+ *   Number of HWS queues in this port.\n+ *\n+ * @return\n+ *   Size of aged out ring per queue.\n+ */\n+static __rte_always_inline uint32_t\n+mlx5_hws_aged_out_q_ring_size_get(uint32_t nb_counters, uint32_t nb_queues)\n+{\n+\tuint32_t size = rte_align32pow2((nb_counters / 100) / nb_queues);\n+\tuint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;\n+\n+\treturn RTE_MIN(size, max_size);\n+}\n+\n+/**\n+ * Get the size of the aged out ring list.\n+ *\n+ * The size is one percent of nb_counters.\n+ * The ring size must be power of 2, so it align up to power of 2.\n+ * In 32 bit systems, the size is limited by 256.\n+ *\n+ * This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is off.\n+ *\n+ * @param nb_counters\n+ *   Final number of allocated counter in the pool.\n+ *\n+ * @return\n+ *   Size of the aged out ring list.\n+ */\n+static __rte_always_inline uint32_t\n+mlx5_hws_aged_out_ring_size_get(uint32_t nb_counters)\n+{\n+\tuint32_t size = rte_align32pow2(nb_counters / 100);\n+\tuint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;\n+\n+\treturn RTE_MIN(size, max_size);\n+}\n+\n+/**\n+ * Initialize the shared aging list information per port.\n+ *\n+ * @param dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param nb_queues\n+ *   Number of HWS queues.\n+ * @param strict_queue\n+ *   Indicator whether is strict_queue mode.\n+ * @param ring_size\n+ *   Size of aged-out ring for creation.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_hws_age_info_init(struct rte_eth_dev *dev, uint16_t nb_queues,\n+\t\t       bool strict_queue, uint32_t ring_size)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tuint32_t flags = RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tstruct rte_ring *r = NULL;\n+\tuint32_t qidx;\n+\n+\tage_info->flags = 0;\n+\tif (strict_queue) {\n+\t\tsize_t size = sizeof(*age_info->hw_q_age) +\n+\t\t\t      sizeof(struct rte_ring *) * nb_queues;\n+\n+\t\tage_info->hw_q_age = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,\n+\t\t\t\t\t\t size, 0, SOCKET_ID_ANY);\n+\t\tif (age_info->hw_q_age == NULL)\n+\t\t\treturn -ENOMEM;\n+\t\tfor (qidx = 0; qidx < nb_queues; ++qidx) {\n+\t\t\tsnprintf(mz_name, sizeof(mz_name),\n+\t\t\t\t \"port_%u_queue_%u_aged_out_ring\",\n+\t\t\t\t dev->data->port_id, qidx);\n+\t\t\tr = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY,\n+\t\t\t\t\t    flags);\n+\t\t\tif (r == NULL) {\n+\t\t\t\tDRV_LOG(ERR, \"\\\"%s\\\" creation failed: %s\",\n+\t\t\t\t\tmz_name, rte_strerror(rte_errno));\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t\tage_info->hw_q_age->aged_lists[qidx] = r;\n+\t\t\tDRV_LOG(DEBUG,\n+\t\t\t\t\"\\\"%s\\\" is successfully created (size=%u).\",\n+\t\t\t\tmz_name, ring_size);\n+\t\t}\n+\t\tage_info->hw_q_age->nb_rings = nb_queues;\n+\t} else {\n+\t\tsnprintf(mz_name, sizeof(mz_name), \"port_%u_aged_out_ring\",\n+\t\t\t dev->data->port_id);\n+\t\tr = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY, flags);\n+\t\tif (r == NULL) {\n+\t\t\tDRV_LOG(ERR, \"\\\"%s\\\" creation failed: %s\", mz_name,\n+\t\t\t\trte_strerror(rte_errno));\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tage_info->hw_age.aged_list = r;\n+\t\tDRV_LOG(DEBUG, \"\\\"%s\\\" is successfully created (size=%u).\",\n+\t\t\tmz_name, ring_size);\n+\t\t/* In non \"strict_queue\" mode, initialize the event. */\n+\t\tMLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);\n+\t}\n+\treturn 0;\n+error:\n+\tMLX5_ASSERT(strict_queue);\n+\twhile (qidx--)\n+\t\trte_ring_free(age_info->hw_q_age->aged_lists[qidx]);\n+\trte_free(age_info->hw_q_age);\n+\treturn -1;\n+}\n+\n+/**\n+ * Destroy the shared aging list information per port.\n+ *\n+ * @param priv\n+ *   Pointer to port private object.\n+ */\n+static void\n+mlx5_hws_age_info_destroy(struct mlx5_priv *priv)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tuint16_t nb_queues = age_info->hw_q_age->nb_rings;\n+\n+\tif (priv->hws_strict_queue) {\n+\t\tuint32_t qidx;\n+\n+\t\tfor (qidx = 0; qidx < nb_queues; ++qidx)\n+\t\t\trte_ring_free(age_info->hw_q_age->aged_lists[qidx]);\n+\t\trte_free(age_info->hw_q_age);\n+\t} else {\n+\t\trte_ring_free(age_info->hw_age.aged_list);\n+\t}\n+}\n+\n+/**\n+ * Initialize the aging mechanism per port.\n+ *\n+ * @param dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param attr\n+ *   Port configuration attributes.\n+ * @param nb_queues\n+ *   Number of HWS queues.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_hws_age_pool_init(struct rte_eth_dev *dev,\n+\t\t       const struct rte_flow_port_attr *attr,\n+\t\t       uint16_t nb_queues)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool_config cfg = {\n+\t\t.size =\n+\t\t      RTE_CACHE_LINE_ROUNDUP(sizeof(struct mlx5_hws_age_param)),\n+\t\t.need_lock = 1,\n+\t\t.release_mem_en = !!priv->sh->config.reclaim_mode,\n+\t\t.malloc = mlx5_malloc,\n+\t\t.free = mlx5_free,\n+\t\t.type = \"mlx5_hws_age_pool\",\n+\t};\n+\tbool strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);\n+\tuint32_t nb_alloc_cnts;\n+\tuint32_t rsize;\n+\tuint32_t nb_ages_updated;\n+\tint ret;\n+\n+\tMLX5_ASSERT(priv->hws_cpool);\n+\tnb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);\n+\tif (strict_queue) {\n+\t\trsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,\n+\t\t\t\t\t\t\t  nb_queues);\n+\t\tnb_ages_updated = rsize * nb_queues + attr->nb_aging_objects;\n+\t} else {\n+\t\trsize = mlx5_hws_aged_out_ring_size_get(nb_alloc_cnts);\n+\t\tnb_ages_updated = rsize + attr->nb_aging_objects;\n+\t}\n+\tret = mlx5_hws_age_info_init(dev, nb_queues, strict_queue, rsize);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tcfg.trunk_size = rte_align32pow2(nb_ages_updated);\n+\tage_info->ages_ipool = mlx5_ipool_create(&cfg);\n+\tif (age_info->ages_ipool == NULL) {\n+\t\tmlx5_hws_age_info_destroy(priv);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\tpriv->hws_age_req = 1;\n+\treturn 0;\n+}\n+\n+/**\n+ * Cleanup all aging resources per port.\n+ *\n+ * @param priv\n+ *   Pointer to port private object.\n+ */\n+void\n+mlx5_hws_age_pool_destroy(struct mlx5_priv *priv)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\n+\tMLX5_ASSERT(priv->hws_age_req);\n+\tmlx5_ipool_destroy(age_info->ages_ipool);\n+\tage_info->ages_ipool = NULL;\n+\tmlx5_hws_age_info_destroy(priv);\n+\tpriv->hws_age_req = 0;\n+}\n+\n #endif\ndiff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h\nindex 5fab4ba597..e311923f71 100644\n--- a/drivers/net/mlx5/mlx5_hws_cnt.h\n+++ b/drivers/net/mlx5/mlx5_hws_cnt.h\n@@ -10,26 +10,26 @@\n #include \"mlx5_flow.h\"\n \n /*\n- * COUNTER ID's layout\n+ * HWS COUNTER ID's layout\n  *       3                   2                   1                   0\n  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0\n  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n- *    | T |       | D |                                               |\n- *    ~ Y |       | C |                    IDX                        ~\n- *    | P |       | S |                                               |\n+ *    |  T  |     | D |                                               |\n+ *    ~  Y  |     | C |                    IDX                        ~\n+ *    |  P  |     | S |                                               |\n  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n  *\n- *    Bit 31:30 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10\n+ *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10\n  *    Bit 25:24 = DCS index\n  *    Bit 23:00 = IDX in this counter belonged DCS bulk.\n  */\n-typedef uint32_t cnt_id_t;\n \n-#define MLX5_HWS_CNT_DCS_NUM 4\n #define MLX5_HWS_CNT_DCS_IDX_OFFSET 24\n #define MLX5_HWS_CNT_DCS_IDX_MASK 0x3\n #define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)\n \n+#define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)\n+\n struct mlx5_hws_cnt_dcs {\n \tvoid *dr_action;\n \tuint32_t batch_sz;\n@@ -44,12 +44,22 @@ struct mlx5_hws_cnt_dcs_mng {\n \n struct mlx5_hws_cnt {\n \tstruct flow_counter_stats reset;\n+\tbool in_used; /* Indicator whether this counter in used or in pool. */\n \tunion {\n-\t\tuint32_t share: 1;\n-\t\t/*\n-\t\t * share will be set to 1 when this counter is used as indirect\n-\t\t * action. Only meaningful when user own this counter.\n-\t\t */\n+\t\tstruct {\n+\t\t\tuint32_t share:1;\n+\t\t\t/*\n+\t\t\t * share will be set to 1 when this counter is used as\n+\t\t\t * indirect action.\n+\t\t\t */\n+\t\t\tuint32_t age_idx:24;\n+\t\t\t/*\n+\t\t\t * When this counter uses for aging, it save the index\n+\t\t\t * of AGE parameter. For pure counter (without aging)\n+\t\t\t * this index is zero.\n+\t\t\t */\n+\t\t};\n+\t\t/* This struct is only meaningful when user own this counter. */\n \t\tuint32_t query_gen_when_free;\n \t\t/*\n \t\t * When PMD own this counter (user put back counter to PMD\n@@ -96,8 +106,48 @@ struct mlx5_hws_cnt_pool {\n \tstruct rte_ring *free_list;\n \tstruct rte_ring *wait_reset_list;\n \tstruct mlx5_hws_cnt_pool_caches *cache;\n+\tuint64_t time_of_last_age_check;\n } __rte_cache_aligned;\n \n+/* HWS AGE status. */\n+enum {\n+\tHWS_AGE_FREE, /* Initialized state. */\n+\tHWS_AGE_CANDIDATE, /* AGE assigned to flows. */\n+\tHWS_AGE_CANDIDATE_INSIDE_RING,\n+\t/*\n+\t * AGE assigned to flows but it still in ring. It was aged-out but the\n+\t * timeout was changed, so it in ring but stiil candidate.\n+\t */\n+\tHWS_AGE_AGED_OUT_REPORTED,\n+\t/*\n+\t * Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.\n+\t */\n+\tHWS_AGE_AGED_OUT_NOT_REPORTED,\n+\t/*\n+\t * Aged-out, inside the aged-out ring.\n+\t * wait for rte_flow_get_q_aged_flows and destroy.\n+\t */\n+};\n+\n+/* HWS counter age parameter. */\n+struct mlx5_hws_age_param {\n+\tuint32_t timeout; /* Aging timeout in seconds (atomically accessed). */\n+\tuint32_t sec_since_last_hit;\n+\t/* Time in seconds since last hit (atomically accessed). */\n+\tuint16_t state; /* AGE state (atomically accessed). */\n+\tuint64_t accumulator_last_hits;\n+\t/* Last total value of hits for comparing. */\n+\tuint64_t accumulator_hits;\n+\t/* Accumulator for hits coming from several counters. */\n+\tuint32_t accumulator_cnt;\n+\t/* Number counters which already updated the accumulator in this sec. */\n+\tuint32_t nb_cnts; /* Number counters used by this AGE. */\n+\tuint32_t queue_id; /* Queue id of the counter. */\n+\tcnt_id_t own_cnt_index;\n+\t/* Counter action created specifically for this AGE action. */\n+\tvoid *context; /* Flow AGE context. */\n+} __rte_packed __rte_cache_aligned;\n+\n /**\n  * Translate counter id into internal index (start from 0), which can be used\n  * as index of raw/cnt pool.\n@@ -107,7 +157,7 @@ struct mlx5_hws_cnt_pool {\n  * @return\n  *   Internal index\n  */\n-static __rte_always_inline cnt_id_t\n+static __rte_always_inline uint32_t\n mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)\n {\n \tuint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;\n@@ -139,7 +189,7 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)\n  *   Counter id\n  */\n static __rte_always_inline cnt_id_t\n-mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, cnt_id_t iidx)\n+mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)\n {\n \tstruct mlx5_hws_cnt_dcs_mng *dcs_mng = &cpool->dcs_mng;\n \tuint32_t idx;\n@@ -344,9 +394,10 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,\n \tstruct rte_ring_zc_data zcdr = {0};\n \tstruct rte_ring *qcache = NULL;\n \tunsigned int wb_num = 0; /* cache write-back number. */\n-\tcnt_id_t iidx;\n+\tuint32_t iidx;\n \n \tiidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);\n+\tcpool->pool[iidx].in_used = false;\n \tcpool->pool[iidx].query_gen_when_free =\n \t\t__atomic_load_n(&cpool->query_gen, __ATOMIC_RELAXED);\n \tif (likely(queue != NULL))\n@@ -388,20 +439,23 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,\n  *   A pointer to HWS queue. If null, it means fetch from common pool.\n  * @param cnt_id\n  *   A pointer to a cnt_id_t * pointer (counter id) that will be filled.\n+ * @param age_idx\n+ *   Index of AGE parameter using this counter, zero means there is no such AGE.\n+ *\n  * @return\n  *   - 0: Success; objects taken.\n  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.\n  *   - -EAGAIN: counter is not ready; try again.\n  */\n static __rte_always_inline int\n-mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,\n-\t\tuint32_t *queue, cnt_id_t *cnt_id)\n+mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,\n+\t\t      cnt_id_t *cnt_id, uint32_t age_idx)\n {\n \tunsigned int ret;\n \tstruct rte_ring_zc_data zcdc = {0};\n \tstruct rte_ring *qcache = NULL;\n-\tuint32_t query_gen = 0;\n-\tcnt_id_t iidx, tmp_cid = 0;\n+\tuint32_t iidx, query_gen = 0;\n+\tcnt_id_t tmp_cid = 0;\n \n \tif (likely(queue != NULL))\n \t\tqcache = cpool->cache->qcache[*queue];\n@@ -422,6 +476,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,\n \t\t__hws_cnt_query_raw(cpool, *cnt_id,\n \t\t\t\t    &cpool->pool[iidx].reset.hits,\n \t\t\t\t    &cpool->pool[iidx].reset.bytes);\n+\t\tcpool->pool[iidx].in_used = true;\n+\t\tcpool->pool[iidx].age_idx = age_idx;\n \t\treturn 0;\n \t}\n \tret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,\n@@ -455,6 +511,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,\n \t\t\t    &cpool->pool[iidx].reset.bytes);\n \trte_ring_dequeue_zc_elem_finish(qcache, 1);\n \tcpool->pool[iidx].share = 0;\n+\tcpool->pool[iidx].in_used = true;\n+\tcpool->pool[iidx].age_idx = age_idx;\n \treturn 0;\n }\n \n@@ -478,16 +536,16 @@ mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,\n }\n \n static __rte_always_inline int\n-mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)\n+mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,\n+\t\t\tuint32_t age_idx)\n {\n \tint ret;\n \tuint32_t iidx;\n \n-\tret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id);\n+\tret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id, age_idx);\n \tif (ret != 0)\n \t\treturn ret;\n \tiidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);\n-\tMLX5_ASSERT(cpool->pool[iidx].share == 0);\n \tcpool->pool[iidx].share = 1;\n \treturn 0;\n }\n@@ -513,10 +571,73 @@ mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)\n \treturn cpool->pool[iidx].share ? true : false;\n }\n \n+static __rte_always_inline void\n+mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,\n+\t\t     uint32_t age_idx)\n+{\n+\tuint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);\n+\n+\tMLX5_ASSERT(cpool->pool[iidx].share);\n+\tcpool->pool[iidx].age_idx = age_idx;\n+}\n+\n+static __rte_always_inline uint32_t\n+mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)\n+{\n+\tuint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);\n+\n+\tMLX5_ASSERT(cpool->pool[iidx].share);\n+\treturn cpool->pool[iidx].age_idx;\n+}\n+\n+static __rte_always_inline cnt_id_t\n+mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,\n+\t\t     uint32_t age_idx)\n+{\n+\tif (!param->own_cnt_index) {\n+\t\t/* Create indirect counter one for internal usage. */\n+\t\tif (mlx5_hws_cnt_shared_get(priv->hws_cpool,\n+\t\t\t\t\t    &param->own_cnt_index, age_idx) < 0)\n+\t\t\treturn 0;\n+\t\tparam->nb_cnts++;\n+\t}\n+\treturn param->own_cnt_index;\n+}\n+\n+static __rte_always_inline void\n+mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);\n+\n+\tMLX5_ASSERT(param != NULL);\n+\tparam->nb_cnts++;\n+}\n+\n+static __rte_always_inline void\n+mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)\n+{\n+\tstruct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);\n+\tstruct mlx5_indexed_pool *ipool = age_info->ages_ipool;\n+\tstruct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);\n+\n+\tif (param != NULL)\n+\t\tparam->nb_cnts--;\n+}\n+\n+static __rte_always_inline bool\n+mlx5_hws_age_is_indirect(uint32_t age_idx)\n+{\n+\treturn (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==\n+\t\tMLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;\n+}\n+\n /* init HWS counter pool. */\n struct mlx5_hws_cnt_pool *\n-mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,\n-\t\tconst struct mlx5_hws_cache_param *ccfg);\n+mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,\n+\t\t       const struct mlx5_hws_cnt_pool_cfg *pcfg,\n+\t\t       const struct mlx5_hws_cache_param *ccfg);\n \n void\n mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool *cntp);\n@@ -555,4 +676,28 @@ mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh);\n void\n mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);\n \n+int\n+mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,\n+\t\t\t    struct rte_flow_error *error);\n+\n+uint32_t\n+mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,\n+\t\t\t   bool shared, const struct rte_flow_action_age *age,\n+\t\t\t   uint32_t flow_idx, struct rte_flow_error *error);\n+\n+int\n+mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,\n+\t\t\t   const void *update, struct rte_flow_error *error);\n+\n+void *\n+mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);\n+\n+int\n+mlx5_hws_age_pool_init(struct rte_eth_dev *dev,\n+\t\t       const struct rte_flow_port_attr *attr,\n+\t\t       uint16_t nb_queues);\n+\n+void\n+mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);\n+\n #endif /* _MLX5_HWS_CNT_H_ */\n",
    "prefixes": [
        "v2",
        "13/17"
    ]
}