get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/125149/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 125149,
    "url": "https://patches.dpdk.org/api/patches/125149/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230315170342.214127-3-mattias.ronnblom@ericsson.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230315170342.214127-3-mattias.ronnblom@ericsson.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230315170342.214127-3-mattias.ronnblom@ericsson.com",
    "date": "2023-03-15T17:03:42",
    "name": "[RFC,v2,2/2] eal: add high-performance timer facility",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "9fb92ef0349c77aaf2b2caafbc0f1acbfbbd96ad",
    "submitter": {
        "id": 1077,
        "url": "https://patches.dpdk.org/api/people/1077/?format=api",
        "name": "Mattias Rönnblom",
        "email": "mattias.ronnblom@ericsson.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230315170342.214127-3-mattias.ronnblom@ericsson.com/mbox/",
    "series": [
        {
            "id": 27404,
            "url": "https://patches.dpdk.org/api/series/27404/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=27404",
            "date": "2023-03-15T17:03:40",
            "name": "Add high-performance timer facility",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/27404/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/125149/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/125149/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5EC8741EA2;\n\tWed, 15 Mar 2023 18:09:56 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B844142C76;\n\tWed, 15 Mar 2023 18:09:54 +0100 (CET)",
            "from EUR05-VI1-obe.outbound.protection.outlook.com\n (mail-vi1eur05on2082.outbound.protection.outlook.com [40.107.21.82])\n by mails.dpdk.org (Postfix) with ESMTP id 7A02842BAC\n for <dev@dpdk.org>; Wed, 15 Mar 2023 18:09:53 +0100 (CET)",
            "from AM6P191CA0034.EURP191.PROD.OUTLOOK.COM (2603:10a6:209:8b::47)\n by PAXPR07MB8387.eurprd07.prod.outlook.com (2603:10a6:102:229::16) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6178.26; Wed, 15 Mar\n 2023 17:09:48 +0000",
            "from AM0EUR02FT016.eop-EUR02.prod.protection.outlook.com\n (2603:10a6:209:8b:cafe::34) by AM6P191CA0034.outlook.office365.com\n (2603:10a6:209:8b::47) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6178.29 via Frontend\n Transport; Wed, 15 Mar 2023 17:09:48 +0000",
            "from oa.msg.ericsson.com (192.176.1.74) by\n AM0EUR02FT016.mail.protection.outlook.com (10.13.54.178) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256) id\n 15.20.6178.20 via Frontend Transport; Wed, 15 Mar 2023 17:09:48 +0000",
            "from ESESBMB502.ericsson.se (153.88.183.169) by\n ESESBMB505.ericsson.se (153.88.183.172) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id\n 15.1.2507.17; Wed, 15 Mar 2023 18:09:42 +0100",
            "from seliicinfr00050.seli.gic.ericsson.se (153.88.183.153) by\n smtp.internal.ericsson.com (153.88.183.185) with Microsoft SMTP Server id\n 15.1.2507.17 via Frontend Transport; Wed, 15 Mar 2023 18:09:42 +0100",
            "from breslau.. (seliicwb00002.seli.gic.ericsson.se [10.156.25.100])\n by seliicinfr00050.seli.gic.ericsson.se (Postfix) with ESMTP id\n 120361C006A; Wed, 15 Mar 2023 18:09:42 +0100 (CET)"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Rs5oiGhBemYnIlNNM0Fp+AmFG6PWruBlAIHGs5+KfEfUECKg4ZJqCT/+8cnamkRhzjtWa/Nxlql2/S/xgZWtoqABEdVRVydoDX7vSYcDb2TDNfgj1VVSW27lKHm/w8d5GNKFjsNDyrwRvfzOxSgJzsvb+s0X8JrDOMOlwDbkrL5x64WDSRAd8j0XEe8EkjR6QQjJ/52M0/lRZXo2l07aIY/BfvOeWbOXcl+OdgT15Xpk+FsQT/7T//CXMFOTVqmzdn8reCYYniHOHVB9HUnojN/KAvx58mEYHKUy1nu4+7mkyICN6t67z1ASOlo478+owngV3GXqtezou3yqaLu6KA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=8W0TxLMkJYwaIXTqYhiFuXMpf+liC1mfkEa9Fnx8tPM=;\n b=MWEy1oPFbEQY23LZaKdML43HPs2yHazFzRVnf8viPIK5fG4IHEtqcCbfXNC6UR7XmslsgLFbE7c868BJXjGBko67yc3Kq74SWUaMFH9LkqP87LQM09dAO/ToWNiUCLBu9podf0nfOSMMAXs3CkO8vcuhzTJMcQpZCccXwHWCGapEs+8O/Qb1M8qNM1RuS0Bpmb3HIDiuyNLqt98ghYUnwIreRfAy1ldsXGxYD+ObUsQQmr6Wbjh2LaiVwnHHa+/5ic0rnrzRSDL5p8/s3j1fNegAP/2f5yrUb4VrTNg0wQcHe1XukJiqUakf3ZQlZwyfku8U9RCDg1mpN2ksjUV13A==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 192.176.1.74) smtp.rcpttodomain=dpdk.org smtp.mailfrom=ericsson.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=ericsson.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=ericsson.com;\n s=selector1;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=8W0TxLMkJYwaIXTqYhiFuXMpf+liC1mfkEa9Fnx8tPM=;\n b=WJ34yNaYMFO1vBswAUxNH8LuHR7zqFLDoYUp2YXlG9/49uXrZ9DT1HfejjaIptG/2VFMdrf95OJHy5wNCu+L/xsJG2PugsuMKwEMfrVyhODkcBYHoeF052CKMSimGD+N0sjZyT8idw+xuk5t2mmR1M45dLIoghtP/CY0d6++r/w=",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 192.176.1.74)\n smtp.mailfrom=ericsson.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=ericsson.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of ericsson.com designates\n 192.176.1.74 as permitted sender)\n receiver=protection.outlook.com;\n client-ip=192.176.1.74; helo=oa.msg.ericsson.com; pr=C",
        "From": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,\n David Marchand <david.marchand@redhat.com>, <maria.lingemark@ericsson.com>,\n Stefan Sundkvist <stefan.sundkvist@ericsson.com>,\n Stephen Hemminger <stephen@networkplumber.org>, =?utf-8?q?Morten_Br=C3=B8ru?=\n\t=?utf-8?q?p?= <mb@smartsharesystems.com>,\n Tyler Retzlaff <roretzla@linux.microsoft.com>, =?utf-8?q?Mattias_R=C3=B6nnb?=\n\t=?utf-8?q?lom?= <mattias.ronnblom@ericsson.com>",
        "Subject": "[RFC v2 2/2] eal: add high-performance timer facility",
        "Date": "Wed, 15 Mar 2023 18:03:42 +0100",
        "Message-ID": "<20230315170342.214127-3-mattias.ronnblom@ericsson.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20230315170342.214127-1-mattias.ronnblom@ericsson.com>",
        "References": "<20230228093916.87206-1-mattias.ronnblom@ericsson.com>\n <20230315170342.214127-1-mattias.ronnblom@ericsson.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "AM0EUR02FT016:EE_|PAXPR07MB8387:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "0bc6a83b-934d-4528-d77d-08db25781509",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 5/xdqe0d54y06rifrg/2yKhLAK2R6gvxlmGmYMOkDTVfL3xv/AJwlO5N2I499XOQPr1ibFCBZN/htyVSIMczMmoJgI8/CX3+/Z27+UIfJRgaWTR5OXtOipV05odWZWqUw3R0MkJlgNV25dl9DG5tjuJmmKZKiuYMgE8fB/9YtDKZwEvXgpijtPgUTnum98PKyKGrQRyMU4VHKz7SBgxSxtQ9QQKhpAIZPV2sXPKuCxqQzbhmg5d3RSTZlp3Y+XKzXm02oPgfHjPDh96VTF/Ggszjn1mTnPQWKyAiHCy73ZnJy9/tfNlN1EIRZMv/a5ZAGGPnA6ZA0rRctDaotdrw3NN45qnXwivHOJw4NkZF5xKY+KziOlj/1rjuRnTvlq5fKdriQv7nf69Dm6KLVewLspFZT3kVyeH+9s4wu2DJ6otHzX12JH84rX7wBBybcJqIeRA3gQR+612z48YkKfbaI6uedL4r916tGNqhk1G2gIyWTvlQ1yOJYSSsPEzz/w1XEQgzhavkZJv7s9Vg0Vna4XNLqgnn2EskY9/BjD2AzqfTWmX4zq5H+WtgJt+sLlyPD6DURn2hUzLRBO7XX00MRsriwFaABvmR7rlkzr6XDaD7yKlmLiklSXTYg0LO7DQhD9Ns4xHj+q+MK+2Her1GJAYIbYLa6sXCS+vObPZy6Rors49OIvqqm3CZomSqDQURsXDB7ACSBV9EuPNNcF+HP4yDRmhTiBzlja9IMCz1dlaRCsGxJLsdEDywgC0hdNzR",
        "X-Forefront-Antispam-Report": "CIP:192.176.1.74; CTRY:SE; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:oa.msg.ericsson.com; PTR:office365.se.ericsson.net;\n CAT:NONE;\n SFS:(13230025)(4636009)(136003)(376002)(396003)(346002)(39860400002)(451199018)(46966006)(40470700004)(36840700001)(66899018)(2906002)(30864003)(83380400001)(40480700001)(5660300002)(107886003)(1076003)(26005)(36756003)(336012)(66574015)(2616005)(316002)(186003)(6266002)(47076005)(41300700001)(86362001)(8936002)(54906003)(82310400005)(356005)(40460700003)(82960400001)(6916009)(82740400003)(478600001)(8676002)(7636003)(4326008)(36860700001)(70206006)(70586007)(579004)(559001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "ericsson.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "15 Mar 2023 17:09:48.1545 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0bc6a83b-934d-4528-d77d-08db25781509",
        "X-MS-Exchange-CrossTenant-Id": "92e84ceb-fbfd-47ab-be52-080c6b87953f",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=92e84ceb-fbfd-47ab-be52-080c6b87953f; Ip=[192.176.1.74];\n Helo=[oa.msg.ericsson.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n AM0EUR02FT016.eop-EUR02.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PAXPR07MB8387",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The htimer library attempts at providing a timer facility with roughly\nthe same functionality, but less overhead and better scalability than\nDPDK timer library.\n\nThe htimer library employs per-lcore hierarchical timer wheels and a\nmessage-based synchronization/MT-safety scheme.\n\nRFC v2:\n * Fix spelling.\n * Fix signed/unsigned comparisons and discontinue the use of name-less\n   function parameters, both of which may result in compiler warnings.\n * Undo the accidental removal of the bitset tests from the 'fast_tests'.\n * Add a number of missing include files, causing build failures\n   (e.g., on AArch64 builds).\n * Add perf test attempting to compare rte_timer, rte_htimer and rte_htw.\n * Use nanoseconds (instead of TSC) as the default time unit.\n * add() and manage() has flags which allows the caller to specify the\n   time unit (nanoseconds, TSC, or ticks) for the times provided.\n\nSigned-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>\n---\n app/test/meson.build                  |   8 +\n app/test/test_htimer_mgr.c            | 674 +++++++++++++++++++++++++\n app/test/test_htimer_mgr_perf.c       | 322 ++++++++++++\n app/test/test_htw.c                   | 478 ++++++++++++++++++\n app/test/test_htw_perf.c              | 181 +++++++\n app/test/test_timer_htimer_htw_perf.c | 693 ++++++++++++++++++++++++++\n doc/api/doxy-api-index.md             |   5 +-\n doc/api/doxy-api.conf.in              |   1 +\n lib/htimer/meson.build                |   7 +\n lib/htimer/rte_htimer.h               |  68 +++\n lib/htimer/rte_htimer_mgr.c           | 547 ++++++++++++++++++++\n lib/htimer/rte_htimer_mgr.h           | 516 +++++++++++++++++++\n lib/htimer/rte_htimer_msg.h           |  44 ++\n lib/htimer/rte_htimer_msg_ring.c      |  18 +\n lib/htimer/rte_htimer_msg_ring.h      |  55 ++\n lib/htimer/rte_htw.c                  | 445 +++++++++++++++++\n lib/htimer/rte_htw.h                  |  49 ++\n lib/htimer/version.map                |  17 +\n lib/meson.build                       |   1 +\n 19 files changed, 4128 insertions(+), 1 deletion(-)\n create mode 100644 app/test/test_htimer_mgr.c\n create mode 100644 app/test/test_htimer_mgr_perf.c\n create mode 100644 app/test/test_htw.c\n create mode 100644 app/test/test_htw_perf.c\n create mode 100644 app/test/test_timer_htimer_htw_perf.c\n create mode 100644 lib/htimer/meson.build\n create mode 100644 lib/htimer/rte_htimer.h\n create mode 100644 lib/htimer/rte_htimer_mgr.c\n create mode 100644 lib/htimer/rte_htimer_mgr.h\n create mode 100644 lib/htimer/rte_htimer_msg.h\n create mode 100644 lib/htimer/rte_htimer_msg_ring.c\n create mode 100644 lib/htimer/rte_htimer_msg_ring.h\n create mode 100644 lib/htimer/rte_htw.c\n create mode 100644 lib/htimer/rte_htw.h\n create mode 100644 lib/htimer/version.map",
    "diff": "diff --git a/app/test/meson.build b/app/test/meson.build\nindex 03811ff692..d0308ac09d 100644\n--- a/app/test/meson.build\n+++ b/app/test/meson.build\n@@ -140,9 +140,14 @@ test_sources = files(\n         'test_thash_perf.c',\n         'test_threads.c',\n         'test_timer.c',\n+        'test_timer_htimer_htw_perf.c',\n         'test_timer_perf.c',\n         'test_timer_racecond.c',\n         'test_timer_secondary.c',\n+        'test_htimer_mgr.c',\n+        'test_htimer_mgr_perf.c',\n+        'test_htw.c',\n+        'test_htw_perf.c',\n         'test_ticketlock.c',\n         'test_trace.c',\n         'test_trace_register.c',\n@@ -193,6 +198,7 @@ fast_tests = [\n         ['fib6_autotest', true, true],\n         ['func_reentrancy_autotest', false, true],\n         ['hash_autotest', true, true],\n+        ['htimer_mgr_autotest', true, true],\n         ['interrupt_autotest', true, true],\n         ['ipfrag_autotest', false, true],\n         ['lcores_autotest', true, true],\n@@ -265,6 +271,8 @@ perf_test_names = [\n         'memcpy_perf_autotest',\n         'hash_perf_autotest',\n         'timer_perf_autotest',\n+        'htimer_mgr_perf_autotest',\n+        'htw_perf_autotest',\n         'reciprocal_division',\n         'reciprocal_division_perf',\n         'lpm_perf_autotest',\ndiff --git a/app/test/test_htimer_mgr.c b/app/test/test_htimer_mgr.c\nnew file mode 100644\nindex 0000000000..9e46dec53e\n--- /dev/null\n+++ b/app/test/test_htimer_mgr.c\n@@ -0,0 +1,674 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include \"test.h\"\n+\n+#include <sys/queue.h>\n+#include <stdlib.h>\n+#include <inttypes.h>\n+\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_htimer_mgr.h>\n+#include <rte_launch.h>\n+#include <rte_lcore.h>\n+#include <rte_random.h>\n+\n+static int\n+timer_lcore(void *arg)\n+{\n+\tbool *stop = arg;\n+\n+\twhile (!__atomic_load_n(stop, __ATOMIC_RELAXED))\n+\t\trte_htimer_mgr_manage();\n+\n+\treturn 0;\n+}\n+\n+static void\n+count_timer_cb(struct rte_htimer *timer __rte_unused, void *arg)\n+{\n+\tunsigned int *count = arg;\n+\n+\t__atomic_fetch_add(count, 1, __ATOMIC_RELAXED);\n+}\n+\n+static void\n+count_async_cb(struct rte_htimer *timer __rte_unused, int result,\n+\t       void *cb_arg)\n+{\n+\tunsigned int *count = cb_arg;\n+\n+\tif (result == RTE_HTIMER_MGR_ASYNC_RESULT_ADDED)\n+\t\t__atomic_fetch_add(count, 1, __ATOMIC_RELAXED);\n+}\n+\n+static uint64_t\n+s_to_tsc(double s)\n+{\n+\treturn s * rte_get_tsc_hz();\n+}\n+\n+#define ASYNC_ADD_TEST_EXPIRATION_TIME (250*1000) /* ns */\n+#define ASYNC_TEST_TICK (10*1000) /* ns */\n+\n+static int\n+test_htimer_mgr_async_add(unsigned int num_timers_per_lcore)\n+{\n+\tstruct rte_htimer *timers;\n+\tunsigned int timer_idx;\n+\tunsigned int lcore_id;\n+\tbool stop = false;\n+\tunsigned int timeout_count = 0;\n+\tunsigned int async_count = 0;\n+\tunsigned int num_workers = 0;\n+\tuint64_t expiration_time;\n+\tunsigned int num_total_timers;\n+\n+\trte_htimer_mgr_init(ASYNC_TEST_TICK);\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tif (rte_eal_remote_launch(timer_lcore, &stop, lcore_id) != 0)\n+\t\t\trte_panic(\"Unable to launch timer lcore\\n\");\n+\t\tnum_workers++;\n+\t}\n+\n+\tnum_total_timers = num_workers * num_timers_per_lcore;\n+\n+\ttimers = malloc(num_total_timers * sizeof(struct rte_htimer));\n+\ttimer_idx = 0;\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate heap memory\\n\");\n+\n+\texpiration_time = ASYNC_ADD_TEST_EXPIRATION_TIME;\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < num_timers_per_lcore; i++) {\n+\t\t\tstruct rte_htimer *timer = &timers[timer_idx++];\n+\n+\t\t\tfor (;;) {\n+\t\t\t\tint rc;\n+\n+\t\t\t\trc = rte_htimer_mgr_async_add(timer, lcore_id,\n+\t\t\t\t\t\t      expiration_time,\n+\t\t\t\t\t\t      RTE_HTIMER_FLAG_TIME_TSC,\n+\t\t\t\t\t\t      count_timer_cb,\n+\t\t\t\t\t\t      &timeout_count, 0,\n+\t\t\t\t\t\t      count_async_cb,\n+\t\t\t\t\t\t      &async_count);\n+\t\t\t\tif (unlikely(rc == -EBUSY))\n+\t\t\t\t\trte_htimer_mgr_process();\n+\t\t\t\telse\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\twhile (__atomic_load_n(&async_count, __ATOMIC_RELAXED) !=\n+\t       num_total_timers ||\n+\t       __atomic_load_n(&timeout_count, __ATOMIC_RELAXED) !=\n+\t       num_total_timers)\n+\t\trte_htimer_mgr_manage();\n+\n+\t__atomic_store_n(&stop, true, __ATOMIC_RELAXED);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_htimer_mgr_deinit();\n+\n+\tfree(timers);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+struct async_recorder_state {\n+\tbool timer_cb_run;\n+\tbool async_add_cb_run;\n+\tbool async_cancel_cb_run;\n+\tbool failed;\n+};\n+\n+static void\n+record_async_add_cb(struct rte_htimer *timer __rte_unused,\n+\t\t    int result, void *cb_arg)\n+{\n+\tstruct async_recorder_state *state = cb_arg;\n+\n+\tif (state->failed)\n+\t\treturn;\n+\n+\tif (state->async_add_cb_run ||\n+\t    result != RTE_HTIMER_MGR_ASYNC_RESULT_ADDED) {\n+\t\tputs(\"async add run already\");\n+\t\tstate->failed = true;\n+\t}\n+\n+\tstate->async_add_cb_run = true;\n+}\n+\n+static void\n+record_async_cancel_cb(struct rte_htimer *timer __rte_unused,\n+\t\t       int result, void *cb_arg)\n+{\n+\tstruct async_recorder_state *state = cb_arg;\n+\n+\tif (state->failed)\n+\t\treturn;\n+\n+\tif (state->async_cancel_cb_run) {\n+\t\tstate->failed = true;\n+\t\treturn;\n+\t}\n+\n+\tswitch (result) {\n+\tcase RTE_HTIMER_MGR_ASYNC_RESULT_EXPIRED:\n+\t\tif (!state->timer_cb_run)\n+\t\t\tstate->failed = true;\n+\t\tbreak;\n+\tcase RTE_HTIMER_MGR_ASYNC_RESULT_CANCELED:\n+\t\tif (state->timer_cb_run)\n+\t\t\tstate->failed = true;\n+\t\tbreak;\n+\tcase RTE_HTIMER_MGR_ASYNC_RESULT_ALREADY_CANCELED:\n+\t\tstate->failed = true;\n+\t}\n+\n+\tstate->async_cancel_cb_run = true;\n+}\n+\n+static int\n+record_check_consistency(struct async_recorder_state *state)\n+{\n+\tif (state->failed)\n+\t\treturn -1;\n+\n+\treturn state->async_cancel_cb_run ? 1 : 0;\n+}\n+\n+static int\n+records_check_consistency(struct async_recorder_state *states,\n+\t\t\t  unsigned int num_states)\n+{\n+\tunsigned int i;\n+\tint canceled = 0;\n+\n+\tfor (i = 0; i < num_states; i++) {\n+\t\tint rc;\n+\n+\t\trc = record_check_consistency(&states[i]);\n+\n+\t\tif (rc < 0)\n+\t\t\treturn -1;\n+\t\tcanceled += rc;\n+\t}\n+\n+\treturn canceled;\n+}\n+\n+static void\n+log_timer_expiry_cb(struct rte_htimer *timer __rte_unused,\n+\t\t    void *arg)\n+{\n+\tbool *timer_run = arg;\n+\n+\t*timer_run = true;\n+}\n+\n+\n+#define ASYNC_ADD_CANCEL_TEST_EXPIRATION_TIME_MAX 10e-3 /* s */\n+\n+static int\n+test_htimer_mgr_async_add_cancel(unsigned int num_timers_per_lcore)\n+{\n+\tstruct rte_htimer *timers;\n+\tstruct async_recorder_state *recorder_states;\n+\tunsigned int timer_idx = 0;\n+\tunsigned int lcore_id;\n+\tuint64_t now;\n+\tunsigned int num_workers = 0;\n+\tbool stop = false;\n+\tuint64_t max_expiration_time =\n+\t\ts_to_tsc(ASYNC_ADD_CANCEL_TEST_EXPIRATION_TIME_MAX);\n+\tunsigned int num_total_timers;\n+\tint canceled = 0;\n+\n+\trte_htimer_mgr_init(ASYNC_TEST_TICK);\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tif (rte_eal_remote_launch(timer_lcore, &stop, lcore_id) != 0)\n+\t\t\trte_panic(\"Unable to launch timer lcore\\n\");\n+\t\tnum_workers++;\n+\t}\n+\n+\tnum_total_timers = num_workers * num_timers_per_lcore;\n+\n+\ttimers = malloc(num_total_timers * sizeof(struct rte_htimer));\n+\trecorder_states =\n+\t\tmalloc(num_total_timers * sizeof(struct async_recorder_state));\n+\n+\tif (timers == NULL || recorder_states == NULL)\n+\t\trte_panic(\"Unable to allocate heap memory\\n\");\n+\n+\tnow = rte_get_tsc_cycles();\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < num_timers_per_lcore; i++) {\n+\t\t\tstruct rte_htimer *timer = &timers[timer_idx];\n+\t\t\tstruct async_recorder_state *state =\n+\t\t\t\t&recorder_states[timer_idx];\n+\n+\t\t\ttimer_idx++;\n+\n+\t\t\t*state = (struct async_recorder_state) {};\n+\n+\t\t\tuint64_t expiration_time =\n+\t\t\t\tnow + rte_rand_max(max_expiration_time);\n+\n+\t\t\tfor (;;) {\n+\t\t\t\tint rc;\n+\n+\t\t\t\trc = rte_htimer_mgr_async_add(timer, lcore_id,\n+\t\t\t\t\t\t\t expiration_time,\n+\t\t\t\t\t\t\t 0,\n+\t\t\t\t\t\t\t log_timer_expiry_cb,\n+\t\t\t\t\t\t\t &state->timer_cb_run,\n+\t\t\t\t\t\t\t 0,\n+\t\t\t\t\t\t\t record_async_add_cb,\n+\t\t\t\t\t\t\t state);\n+\n+\t\t\t\tif (unlikely(rc == -EBUSY))\n+\t\t\t\t\trte_htimer_mgr_process();\n+\t\t\t\telse\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\ttimer_idx = 0;\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < num_timers_per_lcore; i++) {\n+\t\t\tstruct rte_htimer *timer = &timers[timer_idx];\n+\t\t\tstruct async_recorder_state *state =\n+\t\t\t\t&recorder_states[timer_idx];\n+\n+\t\t\ttimer_idx++;\n+\n+\t\t\t/* cancel roughly half of the timers */\n+\t\t\tif (rte_rand_max(2) == 0)\n+\t\t\t\tcontinue;\n+\n+\t\t\tfor (;;) {\n+\t\t\t\tint rc;\n+\n+\t\t\t\trc = rte_htimer_mgr_async_cancel(timer,\n+\t\t\t\t\t\t\trecord_async_cancel_cb,\n+\t\t\t\t\t\t\tstate);\n+\n+\t\t\t\tif (unlikely(rc == -EBUSY)) {\n+\t\t\t\t\tputs(\"busy\");\n+\t\t\t\t\trte_htimer_mgr_process();\n+\t\t\t\t} else\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tcanceled++;\n+\t\t}\n+\t}\n+\n+\tfor (;;) {\n+\t\tint cancel_completed;\n+\n+\t\tcancel_completed = records_check_consistency(recorder_states,\n+\t\t\t\t\t\t\t     num_total_timers);\n+\n+\t\tif (cancel_completed < 0) {\n+\t\t\tputs(\"Inconstinency found\");\n+\t\t\treturn TEST_FAILED;\n+\t\t}\n+\n+\t\tif (cancel_completed == canceled)\n+\t\t\tbreak;\n+\n+\t\trte_htimer_mgr_process();\n+\t}\n+\n+\t__atomic_store_n(&stop, true, __ATOMIC_RELAXED);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_htimer_mgr_deinit();\n+\n+\tfree(timers);\n+\tfree(recorder_states);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+/*\n+ * This is a test case where one thread asynchronously adds two timers,\n+ * with the same expiration time; one on the local lcore and one on a\n+ * remote lcore. This creates a tricky situation for the timer\n+ * manager, and for the application as well, if the htimer struct is\n+ * dynamically allocated.\n+ */\n+\n+struct test_timer {\n+\tuint32_t ref_cnt;\n+\tuint64_t expiration_time; /* in TSC, not tick */\n+\tuint32_t *timeout_count;\n+\tbool *failure_occurred;\n+\tstruct rte_htimer htimer;\n+};\n+\n+\n+static struct test_timer *\n+test_timer_create(uint64_t expiration_time, uint32_t *timeout_count,\n+\t\t  bool *failure_occurred)\n+{\n+\tstruct test_timer *timer;\n+\n+\ttimer = malloc(sizeof(struct test_timer));\n+\n+\tif (timer == NULL)\n+\t\trte_panic(\"Unable to allocate timer memory\\n\");\n+\n+\ttimer->ref_cnt = 1;\n+\ttimer->expiration_time = expiration_time;\n+\ttimer->timeout_count = timeout_count;\n+\ttimer->failure_occurred = failure_occurred;\n+\n+\treturn timer;\n+}\n+\n+static void\n+test_timer_inc_ref_cnt(struct test_timer *timer)\n+{\n+\t__atomic_add_fetch(&timer->ref_cnt, 1, __ATOMIC_RELEASE);\n+}\n+\n+static void\n+test_timer_dec_ref_cnt(struct test_timer *timer)\n+{\n+\tif (timer != NULL) {\n+\t\tuint32_t cnt = __atomic_sub_fetch(&timer->ref_cnt, 1,\n+\t\t\t\t\t\t  __ATOMIC_RELEASE);\n+\t\tif (cnt == 0)\n+\t\t\tfree(timer);\n+\t}\n+}\n+\n+static void\n+test_timer_cb(struct rte_htimer *timer, void *arg __rte_unused)\n+{\n+\tstruct test_timer *test_timer =\n+\t\tcontainer_of(timer, struct test_timer, htimer);\n+\tuint64_t now = rte_get_tsc_cycles();\n+\n+\tif (now < test_timer->expiration_time)\n+\t\t*(test_timer->failure_occurred) = true;\n+\n+\t__atomic_fetch_add(test_timer->timeout_count, 1, __ATOMIC_RELAXED);\n+\n+\ttest_timer_dec_ref_cnt(test_timer);\n+}\n+\n+static int\n+worker_lcore(void *arg)\n+{\n+\tbool *stop = arg;\n+\n+\twhile (!__atomic_load_n(stop, __ATOMIC_RELAXED))\n+\t\trte_htimer_mgr_manage();\n+\n+\treturn 0;\n+}\n+\n+struct cancel_timer {\n+\tbool cancel;\n+\tstruct rte_htimer *target_timer;\n+\tuint32_t *cancel_count;\n+\tuint32_t *expired_count;\n+\tbool *failure_occurred;\n+\tstruct rte_htimer htimer;\n+};\n+\n+static struct cancel_timer *\n+cancel_timer_create(bool cancel, struct rte_htimer *target_timer,\n+\t\t    uint32_t *cancel_count, uint32_t *expired_count,\n+\t\t    bool *failure_occurred)\n+{\n+\tstruct cancel_timer *timer;\n+\n+\ttimer = malloc(sizeof(struct cancel_timer));\n+\n+\tif (timer == NULL)\n+\t\trte_panic(\"Unable to allocate timer memory\\n\");\n+\n+\ttimer->cancel = cancel;\n+\ttimer->target_timer = target_timer;\n+\ttimer->cancel_count = cancel_count;\n+\ttimer->expired_count = expired_count;\n+\ttimer->failure_occurred = failure_occurred;\n+\n+\treturn timer;\n+}\n+\n+static void\n+async_cancel_cb(struct rte_htimer *timer, int result, void *cb_arg)\n+{\n+\tstruct test_timer *test_timer =\n+\t\tcontainer_of(timer, struct test_timer, htimer);\n+\tstruct cancel_timer *cancel_timer = cb_arg;\n+\tbool *failure_occurred = cancel_timer->failure_occurred;\n+\n+\tif (!cancel_timer->cancel || cancel_timer->target_timer != timer)\n+\t\t*failure_occurred = true;\n+\n+\tif (result == RTE_HTIMER_MGR_ASYNC_RESULT_CANCELED) {\n+\t\tuint32_t *cancel_count = cancel_timer->cancel_count;\n+\n+\t\t/* decrease target lcore's ref count */\n+\t\ttest_timer_dec_ref_cnt(test_timer);\n+\t\t(*cancel_count)++;\n+\t} else if (result == RTE_HTIMER_MGR_ASYNC_RESULT_EXPIRED) {\n+\t\tuint32_t *expired_count = cancel_timer->expired_count;\n+\n+\t\t(*expired_count)++;\n+\t} else\n+\t\t*failure_occurred = true;\n+\n+\t/* source lcore's ref count */\n+\ttest_timer_dec_ref_cnt(test_timer);\n+\n+\tfree(cancel_timer);\n+}\n+\n+static void\n+cancel_timer_cb(struct rte_htimer *timer, void *arg __rte_unused)\n+{\n+\tstruct cancel_timer *cancel_timer =\n+\t\tcontainer_of(timer, struct cancel_timer, htimer);\n+\n+\tif (cancel_timer->cancel) {\n+\t\tint rc;\n+\n+\t\trc = rte_htimer_mgr_async_cancel(cancel_timer->target_timer,\n+\t\t\t\t\t\t async_cancel_cb, cancel_timer);\n+\n+\t\tif (rc == -EBUSY)\n+\t\t\trte_htimer_mgr_add(timer, 0, 0, cancel_timer_cb,\n+\t\t\t\t\t   NULL, 0);\n+\t} else\n+\t\tfree(cancel_timer);\n+}\n+\n+#define REF_CNT_TEST_TICK 10 /* ns */\n+#define REF_CNT_AVG_EXPIRATION_TIME (50 * 1000) /* ns */\n+#define REF_CNT_MAX_EXPIRATION_TIME (2 * REF_CNT_AVG_EXPIRATION_TIME)\n+#define REF_CNT_CANCEL_FUZZ(expiration_time) \\\n+\t((uint64_t)((expiration_time) * (rte_drand()/10 + 0.95)))\n+\n+static int\n+test_htimer_mgr_ref_cnt_timers(unsigned int num_timers_per_lcore)\n+{\n+\tunsigned int lcore_id;\n+\tbool stop = false;\n+\tunsigned int num_workers = 0;\n+\tstruct test_timer **timers;\n+\tstruct cancel_timer **cancel_timers;\n+\tunsigned int num_timers;\n+\tuint32_t timeout_count = 0;\n+\tuint32_t cancel_count = 0;\n+\tuint32_t expired_count = 0;\n+\tbool failure_occurred = false;\n+\tunsigned int timer_idx;\n+\tunsigned int expected_cancel_attempts;\n+\tuint64_t deadline;\n+\tuint64_t now;\n+\n+\trte_htimer_mgr_init(REF_CNT_TEST_TICK);\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tif (rte_eal_remote_launch(worker_lcore, &stop, lcore_id) != 0)\n+\t\t\trte_panic(\"Unable to launch timer lcore\\n\");\n+\t\tnum_workers++;\n+\t}\n+\n+\t/* give the workers a chance to get going */\n+\trte_delay_us_block(10*1000);\n+\n+\tnum_timers = num_timers_per_lcore * num_workers;\n+\n+\ttimers = malloc(sizeof(struct test_timer *) * num_timers);\n+\tcancel_timers = malloc(sizeof(struct cancel_timer *) * num_timers);\n+\n+\tif (timers == NULL || cancel_timers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\ttimer_idx = 0;\n+\texpected_cancel_attempts = 0;\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < num_timers_per_lcore; i++) {\n+\t\t\tuint64_t expiration_time;\n+\t\t\tstruct test_timer *timer;\n+\t\t\tstruct rte_htimer *htimer;\n+\t\t\tbool cancel;\n+\t\t\tstruct cancel_timer *cancel_timer;\n+\t\t\tuint64_t cancel_expiration_time;\n+\n+\t\t\texpiration_time =\n+\t\t\t\tREF_CNT_MAX_EXPIRATION_TIME * rte_drand();\n+\n+\t\t\ttimer = test_timer_create(expiration_time,\n+\t\t\t\t\t\t  &timeout_count,\n+\t\t\t\t\t\t  &failure_occurred);\n+\t\t\thtimer = &timer->htimer;\n+\n+\t\t\ttimers[timer_idx++] = timer;\n+\n+\t\t\t/* for the target lcore's usage of this time */\n+\t\t\ttest_timer_inc_ref_cnt(timer);\n+\n+\t\t\tfor (;;) {\n+\t\t\t\tint rc;\n+\n+\t\t\t\trc = rte_htimer_mgr_async_add(htimer, lcore_id,\n+\t\t\t\t\t\t\t      expiration_time,\n+\t\t\t\t\t\t\t      0, test_timer_cb,\n+\t\t\t\t\t\t\t      NULL, 0, NULL,\n+\t\t\t\t\t\t\t      NULL);\n+\t\t\t\tif (unlikely(rc == -EBUSY))\n+\t\t\t\t\trte_htimer_mgr_process();\n+\t\t\t\telse\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tcancel = rte_rand_max(2);\n+\n+\t\t\tcancel_timer =\n+\t\t\t\tcancel_timer_create(cancel, &timer->htimer,\n+\t\t\t\t\t\t    &cancel_count,\n+\t\t\t\t\t\t    &expired_count,\n+\t\t\t\t\t\t    &failure_occurred);\n+\n+\t\t\tcancel_expiration_time =\n+\t\t\t\tREF_CNT_CANCEL_FUZZ(expiration_time);\n+\n+\t\t\trte_htimer_mgr_add(&cancel_timer->htimer,\n+\t\t\t\t\t   cancel_expiration_time, 0,\n+\t\t\t\t\t   cancel_timer_cb, NULL, 0);\n+\n+\t\t\tif (cancel)\n+\t\t\t\texpected_cancel_attempts++;\n+\t\t}\n+\t}\n+\n+\tdeadline = rte_get_tsc_cycles() + REF_CNT_MAX_EXPIRATION_TIME +\n+\t\ts_to_tsc(0.25);\n+\n+\tdo {\n+\t\tnow = rte_get_tsc_cycles();\n+\n+\t\trte_htimer_mgr_manage_time(now, RTE_HTIMER_FLAG_TIME_TSC);\n+\n+\t} while (now < deadline);\n+\n+\t__atomic_store_n(&stop, true, __ATOMIC_RELAXED);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\tif (failure_occurred)\n+\t\treturn TEST_FAILED;\n+\n+\tif ((cancel_count + expired_count) != expected_cancel_attempts)\n+\t\treturn TEST_FAILED;\n+\n+\tif (timeout_count != (num_timers - cancel_count))\n+\t\treturn TEST_FAILED;\n+\n+\trte_htimer_mgr_deinit();\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_htimer_mgr(void)\n+{\n+\tint rc;\n+\n+\trc = test_htimer_mgr_async_add(1);\n+\tif (rc != TEST_SUCCESS)\n+\t\treturn rc;\n+\n+\trc = test_htimer_mgr_async_add(100000);\n+\tif (rc != TEST_SUCCESS)\n+\t\treturn rc;\n+\n+\trc = test_htimer_mgr_async_add_cancel(100);\n+\tif (rc != TEST_SUCCESS)\n+\t\treturn rc;\n+\n+\trc = test_htimer_mgr_ref_cnt_timers(10);\n+\tif (rc != TEST_SUCCESS)\n+\t\treturn rc;\n+\n+\trc = test_htimer_mgr_ref_cnt_timers(10000);\n+\tif (rc != TEST_SUCCESS)\n+\t\treturn rc;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(htimer_mgr_autotest, test_htimer_mgr);\ndiff --git a/app/test/test_htimer_mgr_perf.c b/app/test/test_htimer_mgr_perf.c\nnew file mode 100644\nindex 0000000000..cdc513228f\n--- /dev/null\n+++ b/app/test/test_htimer_mgr_perf.c\n@@ -0,0 +1,322 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include \"test.h\"\n+\n+#include <sys/queue.h>\n+#include <stdlib.h>\n+#include <inttypes.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_htimer_mgr.h>\n+#include <rte_launch.h>\n+#include <rte_lcore.h>\n+#include <rte_malloc.h>\n+#include <rte_random.h>\n+\n+static void\n+nop_cb(struct rte_htimer *timer __rte_unused, void *cb_arg __rte_unused)\n+{\n+}\n+\n+static uint64_t\n+add_rand_timers(struct rte_htimer *timers, uint64_t num,\n+\t\tuint64_t timeout_start, uint64_t max_timeout)\n+{\n+\tuint64_t i;\n+\tuint64_t expiration_times[num];\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\n+\tfor (i = 0; i < num; i++)\n+\t\texpiration_times[i] =\n+\t\t\t1 + timeout_start + rte_rand_max(max_timeout - 1);\n+\n+\tstart_ts = rte_get_tsc_cycles();\n+\n+\tfor (i = 0; i < num; i++)\n+\t\trte_htimer_mgr_add(&timers[i], expiration_times[i], 0, nop_cb,\n+\t\t\t\t   NULL, RTE_HTIMER_FLAG_ABSOLUTE_TIME);\n+\n+\t/* make sure the timers are actually scheduled in the wheel */\n+\trte_htimer_mgr_process();\n+\n+\tend_ts = rte_get_tsc_cycles();\n+\n+\treturn end_ts - start_ts;\n+}\n+\n+#define TIME_STEP 16\n+\n+static void\n+test_add_manage_perf(const char *scenario_name, uint64_t num_timers,\n+\t\t     uint64_t timespan)\n+{\n+\tuint64_t manage_calls;\n+\tstruct rte_htimer *timers;\n+\tuint64_t start;\n+\tuint64_t now;\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\tuint64_t add_latency;\n+\tuint64_t manage_latency;\n+\n+\trte_htimer_mgr_init(1);\n+\n+\tmanage_calls = timespan / TIME_STEP;\n+\n+\tprintf(\"Scenario: %s\\n\", scenario_name);\n+\tprintf(\"    Configuration:\\n\");\n+\tprintf(\"        Timers: %\"PRIu64\"\\n\", num_timers);\n+\tprintf(\"        Max timeout: %\"PRIu64\" ticks\\n\", timespan);\n+\tprintf(\"        Average timeouts/manage call: %.3f\\n\",\n+\t       num_timers / (double)manage_calls);\n+\tprintf(\"        Time advance per manage call: %d\\n\", TIME_STEP);\n+\n+\tprintf(\"    Results:\\n\");\n+\n+\ttimers = rte_malloc(NULL, sizeof(struct rte_htimer) * num_timers, 0);\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\tstart = 1 + rte_rand_max(UINT64_MAX / 2);\n+\n+\trte_htimer_mgr_manage_time(start - 1, 0);\n+\n+\tadd_latency = add_rand_timers(timers, num_timers, start, timespan);\n+\n+\tstart_ts = rte_get_tsc_cycles();\n+\n+\tfor (now = start; now < (start + timespan); now += TIME_STEP)\n+\t\trte_htimer_mgr_manage_time(now, 0);\n+\n+\tend_ts = rte_get_tsc_cycles();\n+\n+\tmanage_latency = end_ts - start_ts;\n+\n+\tprintf(\"        %.0f TSC cycles / add op\\n\",\n+\t       (double)add_latency / num_timers);\n+\tprintf(\"        %.0f TSC cycles / manage call\\n\",\n+\t       (double)manage_latency / manage_calls);\n+\tprintf(\"        %.1f TSC cycles / tick\\n\",\n+\t       (double)manage_latency / timespan);\n+\n+\trte_htimer_mgr_deinit();\n+\n+\trte_free(timers);\n+}\n+\n+static uint64_t\n+s_to_tsc(double s)\n+{\n+\treturn s * rte_get_tsc_hz();\n+}\n+\n+static double\n+tsc_to_s(uint64_t tsc)\n+{\n+\treturn (double)tsc / (double)rte_get_tsc_hz();\n+}\n+\n+#define ITERATIONS 500\n+\n+static int\n+test_del_perf(uint64_t num_timers, uint64_t timespan)\n+{\n+\tstruct rte_htimer *timers;\n+\tuint64_t start;\n+\tuint64_t i, j;\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\tuint64_t latency = 0;\n+\n+\trte_htimer_mgr_init(1);\n+\n+\ttimers =\n+\t    rte_malloc(NULL, sizeof(struct rte_htimer) * num_timers, 0);\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\tstart = 1 + rte_rand_max(UINT64_MAX / 2);\n+\n+\tfor (i = 0; i < ITERATIONS; i++) {\n+\t\trte_htimer_mgr_manage_time(start - 1, 0);\n+\n+\t\tadd_rand_timers(timers, num_timers, start, timespan);\n+\n+\t\t/* A manage (or process) call is required to get all\n+\t\t * timers scheduled, which may in turn make them a\n+\t\t * little more expensive to remove.\n+\t\t */\n+\t\trte_htimer_mgr_manage_time(start, 0);\n+\n+\t\tstart_ts = rte_get_tsc_cycles();\n+\n+\t\tfor (j = 0; j < num_timers; j++)\n+\t\t\tif (rte_htimer_mgr_cancel(&timers[j]) < 0)\n+\t\t\t\treturn TEST_FAILED;\n+\n+\t\tend_ts = rte_get_tsc_cycles();\n+\n+\t\tlatency += (end_ts - start_ts);\n+\n+\t\tstart += (timespan + 1);\n+\t}\n+\n+\tprintf(\"Timer delete: %.0f TSC cycles / call\\n\",\n+\t       (double)latency / (double)ITERATIONS / (double)num_timers);\n+\n+\trte_htimer_mgr_deinit();\n+\n+\trte_free(timers);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+target_lcore(void *arg)\n+{\n+\tbool *stop = arg;\n+\n+\twhile (!__atomic_load_n(stop, __ATOMIC_RELAXED))\n+\t\trte_htimer_mgr_manage();\n+\n+\treturn 0;\n+}\n+\n+static void\n+count_async_cb(struct rte_htimer *timer __rte_unused, int result,\n+\t       void *cb_arg)\n+{\n+\tunsigned int *count = cb_arg;\n+\n+\tif (result == RTE_HTIMER_MGR_ASYNC_RESULT_ADDED)\n+\t\t(*count)++;\n+}\n+\n+#define ASYNC_ADD_TEST_TICK s_to_tsc(500e-9)\n+/*\n+ * The number of test timers must be kept less than size of the\n+ * htimer-internal message ring for this test case to work.\n+ */\n+#define ASYNC_ADD_TEST_NUM_TIMERS 1000\n+#define ASYNC_ADD_TEST_MIN_TIMEOUT (ASYNC_ADD_TEST_NUM_TIMERS * s_to_tsc(1e-6))\n+#define ASYNC_ADD_TEST_MAX_TIMEOUT (2 * ASYNC_ADD_TEST_MIN_TIMEOUT)\n+\n+static void\n+test_async_add_perf(void)\n+{\n+\tuint64_t max_timeout = ASYNC_ADD_TEST_MAX_TIMEOUT;\n+\tuint64_t min_timeout = ASYNC_ADD_TEST_MIN_TIMEOUT;\n+\tunsigned int num_timers = ASYNC_ADD_TEST_NUM_TIMERS;\n+\tstruct rte_htimer *timers;\n+\tbool *stop;\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tunsigned int target_lcore_id =\n+\t\trte_get_next_lcore(lcore_id, true, true);\n+\tuint64_t now;\n+\tuint64_t request_latency = 0;\n+\tuint64_t response_latency = 0;\n+\tunsigned int i;\n+\n+\trte_htimer_mgr_init(ASYNC_ADD_TEST_TICK);\n+\n+\ttimers = rte_malloc(NULL, sizeof(struct rte_htimer) * num_timers,\n+\t\t\t    RTE_CACHE_LINE_SIZE);\n+\tstop = rte_malloc(NULL, sizeof(bool), RTE_CACHE_LINE_SIZE);\n+\n+\tif (timers == NULL || stop == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\t*stop = false;\n+\n+\tif (rte_eal_remote_launch(target_lcore, stop, target_lcore_id) != 0)\n+\t\trte_panic(\"Unable to launch worker lcore\\n\");\n+\n+\t/* wait for launch to complete */\n+\trte_delay_us_block(100);\n+\n+\tfor (i = 0; i < ITERATIONS; i++) {\n+\t\tuint64_t expiration_times[num_timers];\n+\t\tunsigned int j;\n+\t\tuint64_t start_ts;\n+\t\tuint64_t end_ts;\n+\t\tunsigned int count = 0;\n+\n+\t\tnow = rte_get_tsc_cycles();\n+\n+\t\tfor (j = 0; j < num_timers; j++)\n+\t\t\texpiration_times[j] = now + min_timeout +\n+\t\t\t\trte_rand_max(max_timeout - min_timeout);\n+\n+\t\tstart_ts = rte_get_tsc_cycles();\n+\n+\t\tfor (j = 0; j < num_timers; j++)\n+\t\t\trte_htimer_mgr_async_add(&timers[j], target_lcore_id,\n+\t\t\t\t\t     expiration_times[j], 0,\n+\t\t\t\t\t     nop_cb, NULL,\n+\t\t\t\t\t     RTE_HTIMER_FLAG_ABSOLUTE_TIME,\n+\t\t\t\t\t     count_async_cb, &count);\n+\n+\t\tend_ts = rte_get_tsc_cycles();\n+\n+\t\trequest_latency += (end_ts - start_ts);\n+\n+\t\t/* wait long-enough for the target lcore to answered */\n+\t\trte_delay_us_block(1 * num_timers);\n+\n+\t\tstart_ts = rte_get_tsc_cycles();\n+\n+\t\twhile (count != num_timers)\n+\t\t\trte_htimer_mgr_process();\n+\n+\t\tend_ts = rte_get_tsc_cycles();\n+\n+\t\tresponse_latency += (end_ts - start_ts);\n+\n+\t\t/* wait until all timeouts have fired */\n+\t\trte_delay_us_block(tsc_to_s(max_timeout) * 1e6);\n+\t}\n+\n+\t__atomic_store_n(stop, true, __ATOMIC_RELAXED);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\trte_free(timers);\n+\n+\trte_htimer_mgr_deinit();\n+\n+\tprintf(\"Timer async add:\\n\");\n+\tprintf(\"    Configuration:\\n\");\n+\tprintf(\"        Timers: %d\\n\", ASYNC_ADD_TEST_NUM_TIMERS);\n+\tprintf(\"    Results:\\n\");\n+\tprintf(\"        Source lcore cost: %.0f TSC cycles / add request\\n\",\n+\t       (double)request_latency / (double)ITERATIONS / num_timers);\n+\tprintf(\"                           %.0f TSC cycles / add response\\n\",\n+\t       (double)response_latency / (double)ITERATIONS / num_timers);\n+}\n+\n+static int\n+test_htimer_mgr_perf(void)\n+{\n+\t/* warm up */\n+\trte_delay_us_block(10000);\n+\n+\ttest_add_manage_perf(\"Sparse\", 100000, 10000000);\n+\n+\ttest_add_manage_perf(\"Dense\", 100000, 200000);\n+\n+\ttest_add_manage_perf(\"Idle\", 10, 100000);\n+\n+\tif (test_del_perf(100000, 100000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\ttest_async_add_perf();\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(htimer_mgr_perf_autotest, test_htimer_mgr_perf);\ndiff --git a/app/test/test_htw.c b/app/test/test_htw.c\nnew file mode 100644\nindex 0000000000..3cddfaed7f\n--- /dev/null\n+++ b/app/test/test_htw.c\n@@ -0,0 +1,478 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include \"test.h\"\n+\n+#include <sys/queue.h>\n+#include <stdlib.h>\n+#include <inttypes.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_htw.h>\n+#include <rte_random.h>\n+\n+struct recorder {\n+\tstruct rte_htimer_list timeout_list;\n+\tuint64_t num_timeouts;\n+};\n+\n+static void\n+recorder_init(struct recorder *recorder)\n+{\n+\trecorder->num_timeouts = 0;\n+\tLIST_INIT(&recorder->timeout_list);\n+}\n+\n+static void\n+recorder_cb(struct rte_htimer *timer, void *arg)\n+{\n+\tstruct recorder *recorder = arg;\n+\n+\trecorder->num_timeouts++;\n+\n+\tLIST_INSERT_HEAD(&recorder->timeout_list, timer, entry);\n+}\n+\n+static int\n+recorder_verify(struct recorder *recorder, uint64_t min_expiry,\n+\t\tuint64_t max_expiry)\n+{\n+\tstruct rte_htimer *timer;\n+\n+\tLIST_FOREACH(timer, &recorder->timeout_list, entry) {\n+\t\tif (timer->expiration_time > max_expiry)\n+\t\t\treturn TEST_FAILED;\n+\n+\t\tif (timer->expiration_time < min_expiry)\n+\t\t\treturn TEST_FAILED;\n+\t}\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+add_rand_timers(struct rte_htw *htw, struct rte_htimer *timers,\n+\t\tuint64_t num, uint64_t timeout_start, uint64_t max_timeout,\n+\t\trte_htimer_cb_t cb, void *cb_arg)\n+{\n+\tuint64_t i;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct rte_htimer *timer = &timers[i];\n+\t\tbool use_absolute = rte_rand() & 1;\n+\t\tunsigned int flags = 0;\n+\t\tuint64_t expiration_time;\n+\n+\t\texpiration_time = timeout_start + rte_rand_max(max_timeout);\n+\n+\t\tif (use_absolute)\n+\t\t\tflags |= RTE_HTIMER_FLAG_ABSOLUTE_TIME;\n+\t\telse {\n+\t\t\tuint64_t htw_current_time;\n+\n+\t\t\thtw_current_time = rte_htw_current_time(htw);\n+\n+\t\t\tif (expiration_time < htw_current_time)\n+\t\t\t\texpiration_time = 0;\n+\t\t\telse\n+\t\t\t\texpiration_time -= htw_current_time;\n+\t\t}\n+\n+\t\trte_htw_add(htw, timer, expiration_time, 0, cb, cb_arg, flags);\n+\t}\n+}\n+\n+#define ADVANCE_TIME_MAX_STEP 16\n+\n+static int\n+test_rand_timers(uint64_t in_flight_timers, uint64_t max_timeout,\n+\t\t uint64_t runtime)\n+{\n+\tstruct recorder recorder;\n+\tstruct rte_htimer *timers;\n+\tuint64_t fired = 0;\n+\tuint64_t start;\n+\tuint64_t now;\n+\tstruct rte_htw *htw;\n+\tuint64_t added;\n+\n+\trecorder_init(&recorder);\n+\n+\ttimers = malloc(sizeof(struct rte_htimer) * in_flight_timers);\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate heap memory\\n\");\n+\n+\tstart = rte_rand_max(UINT64_MAX - max_timeout);\n+\n+\thtw = rte_htw_create();\n+\n+\tif (htw == NULL)\n+\t\treturn TEST_FAILED;\n+\n+\tadded = in_flight_timers;\n+\tadd_rand_timers(htw, timers, added, start + 1, max_timeout,\n+\t\t\trecorder_cb, &recorder);\n+\n+\tfor (now = start; now < (start + runtime); ) {\n+\t\tuint64_t advance;\n+\n+\t\tadvance = rte_rand_max(ADVANCE_TIME_MAX_STEP);\n+\n+\t\tnow += advance;\n+\n+\t\trte_htw_manage(htw, now);\n+\n+\t\tif (recorder.num_timeouts > 0) {\n+\t\t\tstruct rte_htimer *timer;\n+\n+\t\t\tif (advance == 0)\n+\t\t\t\treturn TEST_FAILED;\n+\n+\t\t\tif (recorder_verify(&recorder, now - advance + 1, now)\n+\t\t\t    != TEST_SUCCESS)\n+\t\t\t\treturn TEST_FAILED;\n+\n+\t\t\twhile ((timer = LIST_FIRST(&recorder.timeout_list))\n+\t\t\t       != NULL) {\n+\t\t\t\tLIST_REMOVE(timer, entry);\n+\n+\t\t\t\tadd_rand_timers(htw, timer, 1,\n+\t\t\t\t\t\tnow + 1, max_timeout,\n+\t\t\t\t\t\trecorder_cb, &recorder);\n+\t\t\t\tadded++;\n+\t\t\t\tfired++;\n+\t\t\t}\n+\n+\t\t\trecorder.num_timeouts = 0;\n+\t\t}\n+\t}\n+\n+\t/* finish the remaining timeouts */\n+\n+\trte_htw_manage(htw, now + max_timeout);\n+\n+\tif (recorder_verify(&recorder, now, now + max_timeout) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\tfired += recorder.num_timeouts;\n+\n+\tif (fired != added)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_destroy(htw);\n+\n+\tfree(timers);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+struct counter_state {\n+\tint calls;\n+\tstruct rte_htw *htw;\n+\tbool cancel;\n+};\n+\n+static void\n+count_timeouts_cb(struct rte_htimer *timer __rte_unused, void *arg)\n+{\n+\tstruct counter_state *state = arg;\n+\n+\tstate->calls++;\n+\n+\tif (state->cancel)\n+\t\trte_htw_cancel(state->htw, timer);\n+}\n+\n+static int\n+test_single_timeout_type(uint64_t now, uint64_t distance, bool use_absolute)\n+{\n+\tstruct rte_htw *htw;\n+\tstruct counter_state cstate = {};\n+\tstruct rte_htimer timer;\n+\tuint64_t expiration_time;\n+\tunsigned int flags = 0;\n+\n+\thtw = rte_htw_create();\n+\n+\trte_htw_manage(htw, now);\n+\n+\tif (use_absolute) {\n+\t\texpiration_time = now + distance;\n+\t\tflags |= RTE_HTIMER_FLAG_ABSOLUTE_TIME;\n+\t} else\n+\t\texpiration_time = distance;\n+\n+\trte_htw_add(htw, &timer, expiration_time, 0, count_timeouts_cb,\n+\t\t    &cstate, flags);\n+\n+\trte_htw_manage(htw, now);\n+\n+\tif (cstate.calls != 0)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + distance - 1);\n+\n+\tif (cstate.calls != 0)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + distance);\n+\n+\n+\tif (cstate.calls != 1)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + distance);\n+\n+\tif (cstate.calls != 1)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + distance + 1);\n+\n+\tif (cstate.calls != 1)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_destroy(htw);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_single_timeout(uint64_t now, uint64_t distance)\n+{\n+\n+\tint rc;\n+\n+\trc = test_single_timeout_type(now, distance, true);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\trc = test_single_timeout_type(now, distance, false);\n+\tif (rc < 0)\n+\t\treturn rc;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_periodical_timer(uint64_t now, uint64_t start, uint64_t period)\n+{\n+\tstruct rte_htw *htw;\n+\tstruct counter_state cstate;\n+\tstruct rte_htimer timer;\n+\n+\thtw = rte_htw_create();\n+\n+\tcstate = (struct counter_state) {\n+\t\t.htw = htw\n+\t};\n+\n+\trte_htw_manage(htw, now);\n+\n+\trte_htw_add(htw, &timer, start, period, count_timeouts_cb,\n+\t\t    &cstate, RTE_HTIMER_FLAG_PERIODICAL);\n+\n+\trte_htw_manage(htw, now);\n+\n+\tif (cstate.calls != 0)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + start - 1);\n+\n+\tif (cstate.calls != 0)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + start);\n+\n+\tif (cstate.calls != 1)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + start + 1);\n+\n+\tif (cstate.calls != 1)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + start + period);\n+\n+\tif (cstate.calls != 2)\n+\t\treturn TEST_FAILED;\n+\n+\tcstate.cancel = true;\n+\n+\trte_htw_manage(htw, now + start + 2 * period);\n+\n+\tif (cstate.calls != 3)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now + start + 3 * period);\n+\n+\tif (cstate.calls != 3)\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_destroy(htw);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+#define CANCEL_ITERATIONS 1000\n+#define CANCEL_NUM_TIMERS 1000\n+#define CANCEL_MAX_DISTANCE 10000\n+\n+static int\n+test_cancel_timer(void)\n+{\n+\tuint64_t now;\n+\tstruct rte_htw *htw;\n+\tint i;\n+\tstruct rte_htimer timers[CANCEL_NUM_TIMERS];\n+\tstruct counter_state timeouts[CANCEL_NUM_TIMERS];\n+\n+\tnow = rte_rand_max(UINT64_MAX / 2);\n+\n+\thtw = rte_htw_create();\n+\n+\tfor (i = 0; i < CANCEL_ITERATIONS; i++) {\n+\t\tint j;\n+\t\tint target;\n+\n+\t\tfor (j = 0; j < CANCEL_NUM_TIMERS; j++) {\n+\t\t\tstruct rte_htimer *timer = &timers[j];\n+\t\t\tuint64_t expiration_time;\n+\n+\t\t\ttimeouts[j] = (struct counter_state) {};\n+\n+\t\t\texpiration_time = now + 1 +\n+\t\t\t\trte_rand_max(CANCEL_MAX_DISTANCE);\n+\n+\t\t\trte_htw_add(htw, timer, expiration_time, 0,\n+\t\t\t\t    count_timeouts_cb, &timeouts[j],\n+\t\t\t\t    RTE_HTIMER_FLAG_ABSOLUTE_TIME);\n+\t\t}\n+\n+\t\ttarget = rte_rand_max(CANCEL_NUM_TIMERS);\n+\n+\t\trte_htw_cancel(htw, &timers[target]);\n+\n+\t\tnow += CANCEL_MAX_DISTANCE;\n+\n+\t\trte_htw_manage(htw, now);\n+\n+\t\tfor (j = 0; j < CANCEL_NUM_TIMERS; j++) {\n+\t\t\tif (j != target) {\n+\t\t\t\tif (timeouts[j].calls != 1)\n+\t\t\t\t\treturn TEST_FAILED;\n+\t\t\t} else {\n+\t\t\t\tif (timeouts[j].calls > 0)\n+\t\t\t\t\treturn TEST_FAILED;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\trte_htw_destroy(htw);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static void\n+nop_cb(struct rte_htimer *timer __rte_unused, void *arg __rte_unused)\n+{\n+}\n+\n+#define NEXT_NUM_TIMERS 1000\n+#define NEXT_MAX_DISTANCE 10000\n+\n+static int\n+test_next_timeout(void)\n+{\n+\tuint64_t now;\n+\tstruct rte_htw *htw;\n+\tint i;\n+\tstruct rte_htimer timers[NEXT_NUM_TIMERS];\n+\tuint64_t last_expiration;\n+\n+\tnow = rte_rand_max(NEXT_MAX_DISTANCE);\n+\n+\thtw = rte_htw_create();\n+\n+\tif (rte_htw_next_timeout(htw, UINT64_MAX) != UINT64_MAX)\n+\t\treturn TEST_FAILED;\n+\tif (rte_htw_next_timeout(htw, now + 1) != (now + 1))\n+\t\treturn TEST_FAILED;\n+\n+\trte_htw_manage(htw, now);\n+\n+\tlast_expiration = now + NEXT_MAX_DISTANCE * NEXT_NUM_TIMERS;\n+\n+\tfor (i = 0; i < NEXT_NUM_TIMERS; i++) {\n+\t\tstruct rte_htimer *timer = &timers[i];\n+\t\tuint64_t expiration;\n+\t\tuint64_t upper_bound;\n+\n+\t\t/* add timers, each new one closer than the last */\n+\n+\t\texpiration = last_expiration - rte_rand_max(NEXT_MAX_DISTANCE);\n+\n+\t\trte_htw_add(htw, timer, expiration, 0, nop_cb, NULL,\n+\t\t\t    RTE_HTIMER_FLAG_ABSOLUTE_TIME);\n+\n+\t\tif (rte_htw_next_timeout(htw, UINT64_MAX) != expiration)\n+\t\t\treturn TEST_FAILED;\n+\n+\t\tupper_bound = expiration + rte_rand_max(100000);\n+\n+\t\tif (rte_htw_next_timeout(htw, upper_bound) != expiration)\n+\t\t\treturn TEST_FAILED;\n+\n+\t\tupper_bound = expiration - rte_rand_max(expiration);\n+\n+\t\tif (rte_htw_next_timeout(htw, upper_bound) != upper_bound)\n+\t\t\treturn TEST_FAILED;\n+\n+\t\tlast_expiration = expiration;\n+\t}\n+\n+\trte_htw_destroy(htw);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_htw(void)\n+{\n+\tif (test_single_timeout(0, 10) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_single_timeout(0, 254) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_single_timeout(0, 255) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_single_timeout(255, 1) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_single_timeout(254, 2) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_periodical_timer(10000, 500, 2) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_periodical_timer(1234567, 12345, 100000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_cancel_timer() != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_rand_timers(1000, 100000, 100000000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_rand_timers(100000, 100000, 1000000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_next_timeout() != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(htw_autotest, test_htw);\ndiff --git a/app/test/test_htw_perf.c b/app/test/test_htw_perf.c\nnew file mode 100644\nindex 0000000000..65901f0874\n--- /dev/null\n+++ b/app/test/test_htw_perf.c\n@@ -0,0 +1,181 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include \"test.h\"\n+\n+#include <sys/queue.h>\n+#include <stdlib.h>\n+#include <inttypes.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_htw.h>\n+#include <rte_malloc.h>\n+#include <rte_random.h>\n+\n+static void\n+nop_cb(struct rte_htimer *timer __rte_unused, void *arg __rte_unused)\n+{\n+}\n+\n+static void\n+add_rand_timers(struct rte_htw *htw, struct rte_htimer *timers,\n+\t\tuint64_t num, uint64_t timeout_start, uint64_t max_timeout)\n+{\n+\tuint64_t i;\n+\tuint64_t expiration_times[num];\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\n+\tfor (i = 0; i < num; i++)\n+\t\texpiration_times[i] = timeout_start + rte_rand_max(max_timeout);\n+\n+\tstart_ts = rte_get_tsc_cycles();\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tstruct rte_htimer *timer = &timers[i];\n+\n+\t\trte_htw_add(htw, timer, expiration_times[i], 0, nop_cb, NULL,\n+\t\t\t    RTE_HTIMER_FLAG_ABSOLUTE_TIME);\n+\t}\n+\n+\t/* actually install the timers */\n+\trte_htw_process(htw);\n+\n+\tend_ts = rte_get_tsc_cycles();\n+\n+\tprintf(\"        %.0f TSC cycles / add op\\n\",\n+\t       (double)(end_ts - start_ts) / num);\n+}\n+\n+#define TIME_STEP 16\n+\n+static int\n+test_add_manage_perf(const char *scenario_name, uint64_t num_timers,\n+\t\t     uint64_t timespan)\n+{\n+\tuint64_t manage_calls;\n+\tstruct rte_htimer *timers;\n+\tuint64_t start;\n+\tuint64_t now;\n+\tstruct rte_htw *htw;\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\tdouble latency;\n+\n+\tmanage_calls = timespan / TIME_STEP;\n+\n+\tprintf(\"Scenario: %s\\n\", scenario_name);\n+\tprintf(\"    Configuration:\\n\");\n+\tprintf(\"        Timers: %\"PRIu64\"\\n\", num_timers);\n+\tprintf(\"        Max timeout: %\"PRIu64\" ticks\\n\", timespan);\n+\tprintf(\"        Average timeouts/manage call: %.3f\\n\",\n+\t       num_timers / (double)manage_calls);\n+\tprintf(\"        Time advance per manage call: %d\\n\", TIME_STEP);\n+\n+\tprintf(\"    Results:\\n\");\n+\n+\ttimers = rte_malloc(NULL, sizeof(struct rte_htimer) *\n+\t\t\t    num_timers, 0);\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\thtw = rte_htw_create();\n+\n+\tif (htw == NULL)\n+\t\treturn TEST_FAILED;\n+\n+\tstart = 1 + rte_rand_max(UINT64_MAX / 2);\n+\n+\trte_htw_manage(htw, start - 1);\n+\n+\tadd_rand_timers(htw, timers, num_timers, start, timespan);\n+\n+\tstart_ts = rte_get_tsc_cycles();\n+\n+\tfor (now = start; now < (start + timespan); now += TIME_STEP)\n+\t\trte_htw_manage(htw, now);\n+\n+\tend_ts = rte_get_tsc_cycles();\n+\n+\tlatency = end_ts - start_ts;\n+\n+\tprintf(\"        %.0f TSC cycles / manage call\\n\",\n+\t       latency / manage_calls);\n+\tprintf(\"        %.1f TSC cycles / tick\\n\", latency / timespan);\n+\n+\trte_htw_destroy(htw);\n+\n+\trte_free(timers);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_cancel_perf(uint64_t num_timers, uint64_t timespan)\n+{\n+\tstruct rte_htimer *timers;\n+\tuint64_t start;\n+\tstruct rte_htw *htw;\n+\tuint64_t i;\n+\tuint64_t start_ts;\n+\tuint64_t end_ts;\n+\tdouble latency;\n+\n+\ttimers = rte_malloc(NULL, sizeof(struct rte_htimer) * num_timers, 0);\n+\n+\tif (timers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\thtw = rte_htw_create();\n+\n+\tif (htw == NULL)\n+\t\treturn TEST_FAILED;\n+\n+\tstart = 1 + rte_rand_max(UINT64_MAX / 2);\n+\n+\trte_htw_manage(htw, start - 1);\n+\n+\tadd_rand_timers(htw, timers, num_timers, start, timespan);\n+\n+\tstart_ts = rte_get_tsc_cycles();\n+\n+\tfor (i = 0; i < num_timers; i++)\n+\t\trte_htw_cancel(htw, &timers[i]);\n+\n+\tend_ts = rte_get_tsc_cycles();\n+\n+\tlatency = end_ts - start_ts;\n+\n+\tprintf(\"Timer delete: %.0f TSC cycles / call\\n\",\n+\t       latency / num_timers);\n+\n+\trte_htw_destroy(htw);\n+\n+\trte_free(timers);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+static int\n+test_htw_perf(void)\n+{\n+\trte_delay_us_block(100);\n+\n+\tif (test_add_manage_perf(\"Sparse\", 100000, 10000000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_add_manage_perf(\"Dense\", 100000, 200000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_add_manage_perf(\"Idle\", 10, 100000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\tif (test_cancel_perf(100000, 100000) != TEST_SUCCESS)\n+\t\treturn TEST_FAILED;\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(htw_perf_autotest, test_htw_perf);\ndiff --git a/app/test/test_timer_htimer_htw_perf.c b/app/test/test_timer_htimer_htw_perf.c\nnew file mode 100644\nindex 0000000000..e51fc7282f\n--- /dev/null\n+++ b/app/test/test_timer_htimer_htw_perf.c\n@@ -0,0 +1,693 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include \"test.h\"\n+\n+#include <inttypes.h>\n+#include <stdlib.h>\n+#include <string.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_htimer_mgr.h>\n+#include <rte_htw.h>\n+#include <rte_lcore.h>\n+#include <rte_malloc.h>\n+#include <rte_random.h>\n+#include <rte_timer.h>\n+\n+static uint64_t\n+s_to_tsc(double s)\n+{\n+\treturn s * rte_get_tsc_hz();\n+}\n+\n+static double\n+tsc_to_s(uint64_t tsc)\n+{\n+\treturn (double)tsc / (double)rte_get_tsc_hz();\n+}\n+\n+struct timer_conf {\n+\tuint64_t start;\n+\tuint64_t interval;\n+};\n+\n+static void\n+get_timer_confs(double aggregate_expiration_rate,\n+\t\tstruct timer_conf *timer_confs,\n+\t\tsize_t num_timers)\n+{\n+\tdouble avg_expiration_rate;\n+\tsize_t i;\n+\n+\tavg_expiration_rate = aggregate_expiration_rate / num_timers;\n+\n+\tfor (i = 0; i < num_timers; i++) {\n+\t\tstruct timer_conf *conf = &timer_confs[i];\n+\t\tdouble expiration_rate;\n+\n+\t\texpiration_rate = avg_expiration_rate * (rte_drand() + 0.5);\n+\n+\t\tconf->interval = rte_get_tsc_hz() / expiration_rate;\n+\t\tconf->start = rte_rand_max(conf->interval);\n+\t}\n+}\n+\n+struct timer_lib_ops {\n+\tconst char *name;\n+\n+\tvoid *(*create)(const struct timer_conf *timer_confs,\n+\t\t\tsize_t num_timers, bool cancel, uint64_t *fired);\n+\tvoid (*manage_time)(void *data, uint64_t current_time);\n+\tvoid (*manage)(void *data);\n+\tvoid (*destroy)(void *data);\n+};\n+\n+static void *\n+nop_create(const struct timer_conf *timer_confs __rte_unused,\n+\t   size_t num_timers __rte_unused, bool cancel __rte_unused,\n+\t   uint64_t *fired __rte_unused)\n+{\n+\treturn NULL;\n+}\n+\n+static __rte_noinline void\n+nop_manage(void *data __rte_unused)\n+{\n+}\n+\n+static __rte_noinline void\n+nop_manage_time(void *data __rte_unused, uint64_t current_time __rte_unused)\n+{\n+}\n+\n+static void\n+nop_destroy(void *data __rte_unused)\n+{\n+}\n+\n+static struct timer_lib_ops nop_ops = {\n+\t.name = \"nop\",\n+\t.create = nop_create,\n+\t.manage = nop_manage,\n+\t.manage_time = nop_manage_time,\n+\t.destroy = nop_destroy\n+};\n+\n+struct ctimer {\n+\tuint64_t interval;\n+\tstruct rte_timer timer;\n+\tuint64_t cancel_offset;\n+\tstruct rte_timer canceled_timer;\n+};\n+\n+static void\n+crash_cb(struct rte_timer *timer __rte_unused, void *cb_arg __rte_unused)\n+{\n+\tabort();\n+}\n+\n+#define CANCELED_OFFSET (0.5) /* s */\n+\n+static void\n+test_cb(struct rte_timer *timer, void *cb_arg)\n+{\n+\tstruct ctimer *ctimer =\n+\t\tcontainer_of(timer, struct ctimer, timer);\n+\tuint64_t *fired = cb_arg;\n+\n+\trte_timer_reset(timer, ctimer->interval, SINGLE,\n+\t\t\trte_lcore_id(), test_cb, cb_arg);\n+\n+\tif (ctimer->cancel_offset > 0)\n+\t\trte_timer_reset(&ctimer->canceled_timer,\n+\t\t\t\tctimer->interval + ctimer->cancel_offset,\n+\t\t\t\tSINGLE, rte_lcore_id(), crash_cb, NULL);\n+\n+\t(*fired)++;\n+}\n+\n+static void *\n+timer_create1(const struct timer_conf *timer_confs, size_t num_timers,\n+\t      bool cancel, uint64_t *fired)\n+{\n+\tstruct ctimer *ctimers;\n+\tunsigned int i;\n+\n+\tctimers = rte_malloc(NULL, sizeof(struct ctimer) * num_timers, 0);\n+\n+\tif (num_timers > 0 && ctimers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\trte_timer_subsystem_init();\n+\n+\tfor (i = 0; i < num_timers; i++) {\n+\t\tconst struct timer_conf *timer_conf = &timer_confs[i];\n+\t\tstruct ctimer *ctimer = &ctimers[i];\n+\t\tstruct rte_timer *timer = &ctimer->timer;\n+\n+\t\trte_timer_init(timer);\n+\n+\t\tctimer->interval = timer_conf->interval;\n+\n+\t\trte_timer_reset(timer, timer_conf->start, SINGLE,\n+\t\t\t\trte_lcore_id(),\ttest_cb, fired);\n+\n+\t\tif (cancel) {\n+\t\t\tctimer->cancel_offset = s_to_tsc(CANCELED_OFFSET);\n+\n+\t\t\trte_timer_reset(&ctimer->canceled_timer,\n+\t\t\t\t    timer_conf->start + ctimer->cancel_offset,\n+\t\t\t\t    SINGLE, rte_lcore_id(),\n+\t\t\t\t    crash_cb, NULL);\n+\t\t} else\n+\t\t\tctimer->cancel_offset = 0;\n+\t}\n+\n+\treturn ctimers;\n+}\n+\n+static void\n+timer_manage(void *data __rte_unused)\n+{\n+\trte_timer_manage();\n+}\n+\n+static void\n+timer_manage_time(void *data __rte_unused, uint64_t current_time __rte_unused)\n+{\n+\trte_timer_manage();\n+}\n+\n+static void\n+timer_destroy(void *data)\n+{\n+\trte_free(data);\n+\n+\trte_timer_subsystem_finalize();\n+}\n+\n+static struct timer_lib_ops timer_ops = {\n+\t.name = \"timer\",\n+\t.create = timer_create1,\n+\t.manage = timer_manage,\n+\t.manage_time = timer_manage_time,\n+\t.destroy = timer_destroy\n+};\n+\n+struct chtimer {\n+\tuint64_t interval;\n+\tstruct rte_htimer htimer;\n+\tuint64_t cancel_offset;\n+\tstruct rte_htimer canceled_htimer;\n+};\n+\n+static void\n+hcrash_cb(struct rte_htimer *timer __rte_unused, void *cb_arg __rte_unused)\n+{\n+\tabort();\n+}\n+\n+static void\n+htest_cb(struct rte_htimer *timer, void *cb_arg)\n+{\n+\tstruct chtimer *chtimer =\n+\t\tcontainer_of(timer, struct chtimer, htimer);\n+\tuint64_t *fired = cb_arg;\n+\n+\trte_htimer_mgr_add(timer, chtimer->interval, 0, htest_cb, cb_arg,\n+\t\t\t   RTE_HTIMER_FLAG_TIME_TSC);\n+\n+\tif (chtimer->cancel_offset > 0) {\n+\t\tstruct rte_htimer *canceled_htimer =\n+\t\t\t&chtimer->canceled_htimer;\n+\t\tuint64_t cancel_expiration_time = chtimer->interval +\n+\t\t\tchtimer->cancel_offset;\n+\n+\t\trte_htimer_mgr_cancel(canceled_htimer);\n+\n+\t\trte_htimer_mgr_add(canceled_htimer, cancel_expiration_time, 0,\n+\t\t\t\t   hcrash_cb, NULL, RTE_HTIMER_FLAG_TIME_TSC);\n+\t}\n+\n+\t(*fired)++;\n+}\n+\n+#define TICK_LENGTH (1e-6)\n+\n+static void *\n+htimer_create(const struct timer_conf *timer_confs, size_t num_timers,\n+\t      bool cancel, uint64_t *fired)\n+{\n+\tstruct chtimer *chtimers;\n+\tunsigned int i;\n+\n+\tchtimers = rte_malloc(NULL, sizeof(struct chtimer) * num_timers, 0);\n+\n+\tif (num_timers > 0 && chtimers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\trte_htimer_mgr_init(TICK_LENGTH * NS_PER_S);\n+\n+\trte_htimer_mgr_manage();\n+\n+\tfor (i = 0; i < num_timers; i++) {\n+\t\tconst struct timer_conf *timer_conf = &timer_confs[i];\n+\t\tstruct chtimer *chtimer = &chtimers[i];\n+\n+\t\tchtimer->interval = timer_conf->interval;\n+\n+\t\trte_htimer_mgr_add(&chtimer->htimer, timer_conf->start, 0,\n+\t\t\t\t   htest_cb, fired, RTE_HTIMER_FLAG_TIME_TSC);\n+\n+\t\tif (cancel) {\n+\t\t\tuint64_t cancel_start;\n+\n+\t\t\tchtimer->cancel_offset = s_to_tsc(CANCELED_OFFSET);\n+\n+\t\t\tcancel_start =\n+\t\t\t\ttimer_conf->start + chtimer->cancel_offset;\n+\n+\t\t\trte_htimer_mgr_add(&chtimer->canceled_htimer,\n+\t\t\t\t\t   cancel_start, 0,\n+\t\t\t\t\t   hcrash_cb, NULL,\n+\t\t\t\t\t   RTE_HTIMER_FLAG_TIME_TSC);\n+\t\t} else\n+\t\t\tchtimer->cancel_offset = 0;\n+\t}\n+\n+\trte_htimer_mgr_process();\n+\n+\treturn chtimers;\n+}\n+\n+static void\n+htimer_manage(void *data __rte_unused)\n+{\n+\trte_htimer_mgr_manage();\n+}\n+\n+static void\n+htimer_manage_time(void *data __rte_unused, uint64_t current_time)\n+{\n+\trte_htimer_mgr_manage_time(current_time, RTE_HTIMER_FLAG_TIME_TSC);\n+}\n+\n+static void\n+htimer_destroy(void *data)\n+{\n+\trte_free(data);\n+\n+\trte_htimer_mgr_deinit();\n+}\n+\n+static struct timer_lib_ops htimer_ops = {\n+\t.name = \"htimer\",\n+\t.create = htimer_create,\n+\t.manage = htimer_manage,\n+\t.manage_time = htimer_manage_time,\n+\t.destroy = htimer_destroy,\n+};\n+\n+struct htw {\n+\tstruct rte_htw *htw;\n+\tstruct chtimer *chtimers;\n+\tuint64_t tsc_per_tick;\n+\tuint64_t *fired;\n+};\n+\n+static void\n+htw_manage_time(void *timer_data, uint64_t current_time)\n+{\n+\tstruct htw *htw = timer_data;\n+\tuint64_t tick;\n+\n+\ttick = current_time / htw->tsc_per_tick;\n+\n+\trte_htw_manage(htw->htw, tick);\n+}\n+\n+static void\n+htw_manage(void *timer_data)\n+{\n+\tuint64_t now;\n+\n+\tnow = rte_get_tsc_cycles();\n+\n+\thtw_manage_time(timer_data, now);\n+}\n+\n+static void\n+htwcrash_cb(struct rte_htimer *timer __rte_unused, void *cb_arg __rte_unused)\n+{\n+\tabort();\n+}\n+\n+static void\n+htwtest_cb(struct rte_htimer *timer, void *cb_arg)\n+{\n+\tstruct chtimer *chtimer =\n+\t\tcontainer_of(timer, struct chtimer, htimer);\n+\tstruct htw *htw = cb_arg;\n+\n+\trte_htw_add(htw->htw, timer, chtimer->interval, 0, htwtest_cb,\n+\t\t    cb_arg, 0);\n+\n+\tif (chtimer->cancel_offset > 0) {\n+\t\tstruct rte_htimer *canceled_htimer =\n+\t\t\t&chtimer->canceled_htimer;\n+\t\tuint64_t cancel_expiration_time = chtimer->interval +\n+\t\t\tchtimer->cancel_offset;\n+\n+\t\trte_htw_cancel(htw->htw, canceled_htimer);\n+\n+\t\trte_htw_add(htw->htw, canceled_htimer,\n+\t\t\t    cancel_expiration_time, 0,\n+\t\t\t    htwcrash_cb, cb_arg, 0);\n+\t}\n+\n+\t(*htw->fired)++;\n+}\n+\n+static void *\n+htw_create(const struct timer_conf *timer_confs, size_t num_timers,\n+\t   bool cancel, uint64_t *fired)\n+{\n+\tunsigned int i;\n+\tstruct htw *htw;\n+\n+\thtw = rte_malloc(NULL, sizeof(struct htw), 0);\n+\tif (htw == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\thtw->htw = rte_htw_create();\n+\tif (htw == NULL)\n+\t\trte_panic(\"Unable to create HTW\\n\");\n+\n+\thtw->chtimers =\n+\t\trte_malloc(NULL, sizeof(struct chtimer) * num_timers, 0);\n+\tif (num_timers > 0 && htw->chtimers == NULL)\n+\t\trte_panic(\"Unable to allocate memory\\n\");\n+\n+\thtw->tsc_per_tick = s_to_tsc(TICK_LENGTH);\n+\n+\thtw->fired = fired;\n+\n+\thtw_manage(htw);\n+\n+\tfor (i = 0; i < num_timers; i++) {\n+\t\tconst struct timer_conf *timer_conf = &timer_confs[i];\n+\t\tstruct chtimer *chtimer = &htw->chtimers[i];\n+\t\tuint64_t start;\n+\n+\t\tchtimer->interval = timer_conf->interval / htw->tsc_per_tick;\n+\n+\t\tstart = timer_conf->start / htw->tsc_per_tick;\n+\n+\t\trte_htw_add(htw->htw, &chtimer->htimer,\n+\t\t\t    start, 0, htwtest_cb, htw, 0);\n+\n+\t\tif (cancel) {\n+\t\t\tuint64_t cancel_start;\n+\n+\t\t\tchtimer->cancel_offset =\n+\t\t\t\ts_to_tsc(CANCELED_OFFSET) / htw->tsc_per_tick;\n+\n+\t\t\tcancel_start = start + chtimer->cancel_offset;\n+\n+\t\t\trte_htw_add(htw->htw, &chtimer->canceled_htimer,\n+\t\t\t\t    cancel_start, 0, htwcrash_cb, NULL, 0);\n+\t\t} else\n+\t\t\tchtimer->cancel_offset = 0;\n+\t}\n+\n+\trte_htw_process(htw->htw);\n+\n+\treturn htw;\n+}\n+\n+static void\n+htw_destroy(void *data)\n+{\n+\tstruct htw *htw = data;\n+\n+\trte_htw_destroy(htw->htw);\n+\n+\trte_free(htw->chtimers);\n+\n+\trte_free(htw);\n+}\n+\n+static struct timer_lib_ops htw_ops = {\n+\t.name = \"htw\",\n+\t.create = htw_create,\n+\t.manage = htw_manage,\n+\t.manage_time = htw_manage_time,\n+\t.destroy = htw_destroy,\n+};\n+\n+static const struct timer_lib_ops *lib_ops[] = {\n+\t&timer_ops, &htimer_ops, &htw_ops\n+};\n+\n+#define DUMMY_TASK_SIZE (2500)\n+\n+static __rte_noinline uint64_t\n+do_dummy_task(void)\n+{\n+\tuint64_t result = 0;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < DUMMY_TASK_SIZE; i++)\n+\t\tresult += rte_rand();\n+\n+\treturn result;\n+}\n+\n+struct work_log {\n+\tuint64_t tasks_completed;\n+\tuint64_t runtime;\n+};\n+\n+#define TARGET_RUNTIME (4.0) /* s */\n+\n+struct run_result {\n+\tuint64_t tasks_completed;\n+\tuint64_t timer_fired;\n+\tuint64_t latency;\n+};\n+\n+static void\n+run_with_lib(const struct timer_lib_ops *timer_ops,\n+\t     const struct timer_conf *timer_confs, size_t num_timers,\n+\t     bool cancel, struct run_result *result)\n+{\n+\tvoid *timer_data;\n+\tuint64_t deadline;\n+\tuint64_t start;\n+\tuint64_t now;\n+\tvolatile uint64_t sum = 0;\n+\n+\tresult->tasks_completed = 0;\n+\tresult->timer_fired = 0;\n+\n+\ttimer_data = timer_ops->create(timer_confs, num_timers, cancel,\n+\t\t\t\t       &result->timer_fired);\n+\n+\tstart = rte_get_tsc_cycles();\n+\n+\tdeadline = start + s_to_tsc(TARGET_RUNTIME);\n+\n+\tdo {\n+\t\tsum += do_dummy_task();\n+\n+\t\tresult->tasks_completed++;\n+\n+\t\tnow = rte_get_tsc_cycles();\n+\n+\t\ttimer_ops->manage_time(timer_data, now);\n+\t} while (now < deadline);\n+\n+\tRTE_VERIFY(sum != 0);\n+\n+\tresult->latency = rte_get_tsc_cycles() - start;\n+\n+\ttimer_ops->destroy(timer_data);\n+}\n+\n+static void\n+benchmark_timer_libs(double aggregate_expiration_rate, uint64_t num_timers,\n+\t\t     bool cancel)\n+{\n+\tstruct timer_conf timer_confs[num_timers];\n+\tstruct run_result nop_result;\n+\tdouble nop_per_task_latency;\n+\tstruct run_result lib_results[RTE_DIM(lib_ops)];\n+\tuint64_t lib_overhead[RTE_DIM(lib_ops)];\n+\n+\tunsigned int i;\n+\n+\tprintf(\"Configuration:\\n\");\n+\tprintf(\"    Aggregate timer expiration rate: %.3e Hz\\n\",\n+\t       aggregate_expiration_rate);\n+\tif (cancel)\n+\t\tprintf(\"    Aggregate timer cancellation rate: %.3e Hz\\n\",\n+\t\t       aggregate_expiration_rate);\n+\tprintf(\"    Concurrent timers: %zd\\n\", num_timers);\n+\tprintf(\"    Tick length: %.1e s\\n\", TICK_LENGTH);\n+\n+\trte_srand(4711);\n+\n+\tget_timer_confs(aggregate_expiration_rate, timer_confs, num_timers);\n+\n+\trun_with_lib(&nop_ops, NULL, 0, false, &nop_result);\n+\tnop_per_task_latency =\n+\t\t(double)nop_result.latency / nop_result.tasks_completed;\n+\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++) {\n+\t\tstruct run_result *lib_result = &lib_results[i];\n+\t\tdouble per_task_latency;\n+\n+\t\trun_with_lib(lib_ops[i], timer_confs, num_timers, cancel,\n+\t\t\t     lib_result);\n+\n+\t\tper_task_latency = (double)lib_result->latency /\n+\t\t\tlib_result->tasks_completed;\n+\n+\t\tif (per_task_latency > nop_per_task_latency)\n+\t\t\tlib_overhead[i] =\n+\t\t\t\t(per_task_latency - nop_per_task_latency) *\n+\t\t\t\tlib_result->tasks_completed;\n+\t\telse\n+\t\t\tlib_overhead[i] = 0;\n+\t}\n+\n+\tprintf(\"Results:\\n\");\n+\n+\tprintf(\"    Work between manage calls: %.0f TSC cycles\\n\",\n+\t       (double)nop_result.latency / nop_result.tasks_completed);\n+\n+\tprintf(\"\\n\");\n+\tprintf(\"%-24s\", \"\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12s\", lib_ops[i]->name);\n+\tprintf(\"\\n\");\n+\n+\tprintf(\"%-24s\", \"    Runtime [s]\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12.3e\", tsc_to_s(lib_results[i].latency));\n+\tprintf(\"\\n\");\n+\n+\tprintf(\"%-24s\", \"    Expiration rate [Hz]\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12.3e\", lib_results[i].timer_fired /\n+\t\t       tsc_to_s(lib_results[i].latency));\n+\tprintf(\"\\n\");\n+\n+\tprintf(\"%-24s\", \"    Overhead [%%]\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12.3f\", 100 * (double)lib_overhead[i] /\n+\t\t       (double)lib_results[i].latency);\n+\tprintf(\"\\n\");\n+\n+\tprintf(\"%-24s\", \"    Per expiration [TSC]\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12\"PRIu64, lib_overhead[i] /\n+\t\t       lib_results[i].timer_fired);\n+\tprintf(\"\\n\");\n+\n+\tprintf(\"%-24s\", \"    Per manage() [TSC]\");\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++)\n+\t\tprintf(\"%12\"PRIu64, lib_overhead[i] /\n+\t\t       lib_results[i].tasks_completed);\n+\tprintf(\"\\n\");\n+}\n+\n+static void\n+benchmark_timer_libs_mode(double aggregate_expiration_rate, bool cancel)\n+{\n+\tbenchmark_timer_libs(aggregate_expiration_rate, 100, cancel);\n+\tbenchmark_timer_libs(aggregate_expiration_rate, 100000, cancel);\n+}\n+\n+static void\n+benchmark_timer_libs_rate(double aggregate_expiration_rate)\n+{\n+\tbenchmark_timer_libs_mode(aggregate_expiration_rate, false);\n+\tbenchmark_timer_libs_mode(aggregate_expiration_rate, true);\n+}\n+\n+#define MANAGE_ITERATIONS (10000000)\n+\n+static uint64_t\n+run_manage(const struct timer_lib_ops *timer_ops, bool user_provided_time)\n+{\n+\tuint64_t start;\n+\tuint64_t latency;\n+\tvoid *timer_data;\n+\n+\ttimer_data = timer_ops->create(NULL, 0, NULL, false);\n+\n+\tstart = rte_get_tsc_cycles();\n+\n+\tunsigned int i;\n+\tfor (i = 0; i < MANAGE_ITERATIONS; i++)\n+\t\tif (user_provided_time && timer_ops->manage_time != NULL) {\n+\t\t\tuint64_t now;\n+\n+\t\t\tnow = rte_get_tsc_cycles();\n+\n+\t\t\ttimer_ops->manage_time(timer_data, now);\n+\t\t} else\n+\t\t\ttimer_ops->manage(timer_data);\n+\n+\tlatency = rte_get_tsc_cycles() - start;\n+\n+\ttimer_ops->destroy(timer_data);\n+\n+\treturn latency / MANAGE_ITERATIONS;\n+}\n+\n+static void\n+benchmark_timer_libs_timeless_manage(bool user_provided_time)\n+{\n+\tunsigned int i;\n+\tuint64_t nop_latency;\n+\n+\tnop_latency = run_manage(&nop_ops, user_provided_time);\n+\n+\tprintf(\"Zero-timers manage() overhead%s:\\n\", user_provided_time ?\n+\t       \" (w/ user-provided time)\" : \"\");\n+\n+\tfor (i = 0; i < RTE_DIM(lib_ops); i++) {\n+\t\tconst struct timer_lib_ops *ops = lib_ops[i];\n+\t\tuint64_t latency;\n+\n+\t\tlatency = run_manage(ops, user_provided_time);\n+\n+\t\tif (latency > nop_latency)\n+\t\t\tlatency -= nop_latency;\n+\t\telse\n+\t\t\tlatency = 0;\n+\n+\t\tprintf(\"    %s: %\"PRIu64\" TSC cycles\\n\", ops->name, latency);\n+\t}\n+}\n+\n+static int\n+test_timer_htimer_htw_perf(void)\n+{\n+\t/* warm up */\n+\trte_delay_us_block(10000);\n+\n+\tbenchmark_timer_libs_rate(1e6);\n+\n+\tbenchmark_timer_libs_timeless_manage(false);\n+\tbenchmark_timer_libs_timeless_manage(true);\n+\n+\treturn TEST_SUCCESS;\n+}\n+\n+REGISTER_TEST_COMMAND(timer_htimer_htw_perf_autotest,\n+\t\t      test_timer_htimer_htw_perf);\ndiff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md\nindex 2deec7ea19..5ea1dfa262 100644\n--- a/doc/api/doxy-api-index.md\n+++ b/doc/api/doxy-api-index.md\n@@ -67,6 +67,8 @@ The public API headers are grouped by topics:\n - **timers**:\n   [cycles](@ref rte_cycles.h),\n   [timer](@ref rte_timer.h),\n+  [htimer_mgr](@ref rte_htimer_mgr.h),\n+  [htimer](@ref rte_htimer.h),\n   [alarm](@ref rte_alarm.h)\n \n - **locks**:\n@@ -163,7 +165,8 @@ The public API headers are grouped by topics:\n   [ring](@ref rte_ring.h),\n   [stack](@ref rte_stack.h),\n   [tailq](@ref rte_tailq.h),\n-  [bitmap](@ref rte_bitmap.h)\n+  [bitmap](@ref rte_bitmap.h),\n+  [bitset](@ref rte_bitset.h)\n \n - **packet framework**:\n   * [port](@ref rte_port.h):\ndiff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in\nindex e859426099..c0cd64db34 100644\n--- a/doc/api/doxy-api.conf.in\n+++ b/doc/api/doxy-api.conf.in\n@@ -45,6 +45,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \\\n                           @TOPDIR@/lib/gro \\\n                           @TOPDIR@/lib/gso \\\n                           @TOPDIR@/lib/hash \\\n+                          @TOPDIR@/lib/htimer \\\n                           @TOPDIR@/lib/ip_frag \\\n                           @TOPDIR@/lib/ipsec \\\n                           @TOPDIR@/lib/jobstats \\\ndiff --git a/lib/htimer/meson.build b/lib/htimer/meson.build\nnew file mode 100644\nindex 0000000000..2dd5d6a24b\n--- /dev/null\n+++ b/lib/htimer/meson.build\n@@ -0,0 +1,7 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2023 Ericsson AB\n+\n+sources = files('rte_htw.c', 'rte_htimer_msg_ring.c', 'rte_htimer_mgr.c')\n+headers = files('rte_htimer_mgr.h', 'rte_htimer.h')\n+\n+deps += ['ring']\ndiff --git a/lib/htimer/rte_htimer.h b/lib/htimer/rte_htimer.h\nnew file mode 100644\nindex 0000000000..6ac86292b5\n--- /dev/null\n+++ b/lib/htimer/rte_htimer.h\n@@ -0,0 +1,68 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#ifndef _RTE_HTIMER_H_\n+#define _RTE_HTIMER_H_\n+\n+#include <stdbool.h>\n+#include <stdint.h>\n+#include <sys/queue.h>\n+\n+#include <rte_bitops.h>\n+\n+struct rte_htimer;\n+\n+typedef void (*rte_htimer_cb_t)(struct rte_htimer *, void *);\n+\n+struct rte_htimer {\n+\t/**\n+\t * Absolute timer expiration time (in ticks).\n+\t */\n+\tuint64_t expiration_time;\n+\t/**\n+\t * Time between expirations (in ticks). Zero for one-shot timers.\n+\t */\n+\tuint64_t period;\n+\t/**\n+\t * Owning lcore. May safely be read from any thread.\n+\t */\n+\tuint32_t owner_lcore_id;\n+\t/**\n+\t * The current state of the timer.\n+\t */\n+\tuint32_t state:4;\n+\t/**\n+\t * Flags set on this timer.\n+\t */\n+\tuint32_t flags:28;\n+\t/**\n+\t * User-specified callback function pointer.\n+\t */\n+\trte_htimer_cb_t cb;\n+\t/**\n+\t * Argument for user callback.\n+\t */\n+\tvoid *cb_arg;\n+\t/**\n+\t * Pointers used to add timer to various internal lists.\n+\t */\n+\tLIST_ENTRY(rte_htimer) entry;\n+};\n+\n+#define RTE_HTIMER_FLAG_ABSOLUTE_TIME RTE_BIT32(0)\n+#define RTE_HTIMER_FLAG_PERIODICAL RTE_BIT32(1)\n+#define RTE_HTIMER_FLAG_TIME_TICK RTE_BIT32(2)\n+#define RTE_HTIMER_FLAG_TIME_TSC RTE_BIT32(3)\n+\n+#define RTE_HTIMER_STATE_PENDING 1\n+#define RTE_HTIMER_STATE_EXPIRED 2\n+#define RTE_HTIMER_STATE_CANCELED 3\n+\n+LIST_HEAD(rte_htimer_list, rte_htimer);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_HTIMER_H_ */\ndiff --git a/lib/htimer/rte_htimer_mgr.c b/lib/htimer/rte_htimer_mgr.c\nnew file mode 100644\nindex 0000000000..efdfcf0985\n--- /dev/null\n+++ b/lib/htimer/rte_htimer_mgr.c\n@@ -0,0 +1,547 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#include <inttypes.h>\n+#include <math.h>\n+#include <stdbool.h>\n+#include <sys/queue.h>\n+#include <unistd.h>\n+\n+#include <rte_branch_prediction.h>\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_errno.h>\n+#include <rte_htw.h>\n+#include <rte_prefetch.h>\n+#include <rte_ring_elem.h>\n+\n+#include \"rte_htimer_mgr.h\"\n+#include \"rte_htimer_msg.h\"\n+#include \"rte_htimer_msg_ring.h\"\n+\n+#define MAX_MSG_BATCH_SIZE 16\n+\n+struct htimer_mgr {\n+\tstruct rte_htimer_msg_ring *msg_ring;\n+\tstruct rte_htw *htw;\n+\n+\tunsigned int async_msgs_idx __rte_cache_aligned;\n+\tunsigned int num_async_msgs;\n+\tstruct rte_htimer_msg async_msgs[MAX_MSG_BATCH_SIZE];\n+} __rte_cache_aligned;\n+\n+static uint64_t ns_per_tick;\n+static double tsc_per_tick;\n+\n+static struct htimer_mgr mgrs[RTE_MAX_LCORE + 1];\n+\n+#define MAX_ASYNC_TRANSACTIONS 1024\n+#define MSG_RING_SIZE MAX_ASYNC_TRANSACTIONS\n+\n+static inline uint64_t\n+tsc_to_tick(uint64_t tsc)\n+{\n+\treturn tsc / tsc_per_tick;\n+}\n+\n+static inline uint64_t\n+tsc_to_tick_round_up(uint64_t tsc)\n+{\n+\tuint64_t tick;\n+\n+\ttick = (tsc + tsc_per_tick / 2) / tsc_per_tick;\n+\n+\treturn tick;\n+}\n+\n+static inline uint64_t\n+ns_to_tick(uint64_t ns)\n+{\n+\treturn ns / ns_per_tick;\n+}\n+\n+static inline uint64_t\n+ns_to_tick_round_up(uint64_t ns)\n+{\n+\tuint64_t tick;\n+\n+\ttick = ceil(ns / ns_per_tick);\n+\n+\treturn tick;\n+}\n+\n+static inline uint64_t\n+tick_to_ns(uint64_t tick)\n+{\n+\treturn tick * ns_per_tick;\n+}\n+\n+static struct htimer_mgr *\n+mgr_get(unsigned int lcore_id)\n+{\n+\treturn &mgrs[lcore_id];\n+}\n+\n+static int\n+mgr_init(unsigned int lcore_id)\n+{\n+\tchar ring_name[RTE_RING_NAMESIZE];\n+\tunsigned int socket_id;\n+\tstruct htimer_mgr *mgr = &mgrs[lcore_id];\n+\n+\tsocket_id = rte_lcore_to_socket_id(lcore_id);\n+\n+\tsnprintf(ring_name, sizeof(ring_name), \"htimer_%d\", lcore_id);\n+\n+\tmgr->msg_ring =\n+\t\trte_htimer_msg_ring_create(ring_name, MSG_RING_SIZE, socket_id,\n+\t\t\t\t\t   RING_F_SC_DEQ);\n+\n+\tif (mgr->msg_ring == NULL)\n+\t\tgoto err;\n+\n+\tmgr->htw = rte_htw_create();\n+\n+\tif (mgr->htw == NULL)\n+\t\tgoto err_free_ring;\n+\n+\tmgr->async_msgs_idx = 0;\n+\tmgr->num_async_msgs = 0;\n+\n+\treturn 0;\n+\n+err_free_ring:\n+\trte_htimer_msg_ring_free(mgr->msg_ring);\n+err:\n+\treturn -ENOMEM;\n+}\n+\n+static void\n+mgr_deinit(unsigned int lcore_id)\n+{\n+\tstruct htimer_mgr *mgr = &mgrs[lcore_id];\n+\n+\trte_htw_destroy(mgr->htw);\n+\n+\trte_htimer_msg_ring_free(mgr->msg_ring);\n+}\n+\n+static volatile bool initialized;\n+\n+static void\n+assure_initialized(void)\n+{\n+\tRTE_ASSERT(initialized);\n+}\n+\n+int\n+rte_htimer_mgr_init(uint64_t _ns_per_tick)\n+{\n+\tunsigned int lcore_id;\n+\n+\tRTE_VERIFY(!initialized);\n+\n+\tns_per_tick = _ns_per_tick;\n+\n+\ttsc_per_tick = (ns_per_tick / 1e9) * rte_get_tsc_hz();\n+\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {\n+\t\tint rc;\n+\n+\t\trc = mgr_init(lcore_id);\n+\n+\t\tif (rc < 0) {\n+\t\t\tunsigned int deinit_lcore_id;\n+\n+\t\t\tfor (deinit_lcore_id = 0; deinit_lcore_id < lcore_id;\n+\t\t\t     deinit_lcore_id++)\n+\t\t\t\tmgr_deinit(deinit_lcore_id);\n+\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\tinitialized = true;\n+\n+\treturn 0;\n+}\n+\n+void\n+rte_htimer_mgr_deinit(void)\n+{\n+\tunsigned int lcore_id;\n+\n+\tassure_initialized();\n+\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)\n+\t\tmgr_deinit(lcore_id);\n+\n+\tinitialized = false;\n+}\n+\n+static void\n+assure_valid_time_conversion_flags(uint32_t flags __rte_unused)\n+{\n+\tRTE_ASSERT(!((flags & RTE_HTIMER_FLAG_TIME_TSC) &&\n+\t\t     (flags & RTE_HTIMER_FLAG_TIME_TICK)));\n+}\n+\n+static void\n+assure_valid_add_flags(uint32_t flags)\n+{\n+\tassure_valid_time_conversion_flags(flags);\n+\n+\tRTE_ASSERT(!(flags & ~(RTE_HTIMER_FLAG_PERIODICAL |\n+\t\t\t       RTE_HTIMER_FLAG_ABSOLUTE_TIME |\n+\t\t\t       RTE_HTIMER_FLAG_TIME_TSC |\n+\t\t\t       RTE_HTIMER_FLAG_TIME_TICK)));\n+}\n+\n+static uint64_t\n+convert_time(uint64_t t, uint32_t flags)\n+{\n+\tif (flags & RTE_HTIMER_FLAG_TIME_TSC)\n+\t\treturn tsc_to_tick(t);\n+\telse if (flags & RTE_HTIMER_FLAG_TIME_TICK)\n+\t\treturn t;\n+\telse\n+\t\treturn ns_to_tick(t);\n+}\n+\n+void\n+rte_htimer_mgr_add(struct rte_htimer *timer, uint64_t expiration_time,\n+\t\t   uint64_t period, rte_htimer_cb_t timer_cb,\n+\t\t   void *timer_cb_arg, uint32_t flags)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\tuint64_t expiration_time_tick;\n+\tuint64_t period_tick;\n+\n+\tassure_initialized();\n+\n+\tassure_valid_add_flags(flags);\n+\n+\texpiration_time_tick = convert_time(expiration_time, flags);\n+\n+\tperiod_tick = convert_time(period, flags);\n+\n+\trte_htw_add(mgr->htw, timer, expiration_time_tick, period_tick,\n+\t\t    timer_cb, timer_cb_arg, flags);\n+\n+\ttimer->owner_lcore_id = lcore_id;\n+}\n+\n+int\n+rte_htimer_mgr_cancel(struct rte_htimer *timer)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\n+\tassure_initialized();\n+\n+\tRTE_ASSERT(timer->owner_lcore_id == lcore_id);\n+\n+\tswitch (timer->state) {\n+\tcase RTE_HTIMER_STATE_PENDING:\n+\t\trte_htw_cancel(mgr->htw, timer);\n+\t\treturn 0;\n+\tcase RTE_HTIMER_STATE_EXPIRED:\n+\t\treturn -ETIME;\n+\tdefault:\n+\t\tRTE_ASSERT(timer->state == RTE_HTIMER_STATE_CANCELED);\n+\t\treturn -ENOENT;\n+\t}\n+}\n+\n+static int\n+send_msg(unsigned int receiver_lcore_id, enum rte_htimer_msg_type msg_type,\n+\t struct rte_htimer *timer, rte_htimer_mgr_async_op_cb_t async_cb,\n+\t void *async_cb_arg, const struct rte_htimer_msg_request *request,\n+\t const struct rte_htimer_msg_response *response)\n+{\n+\tstruct htimer_mgr *receiver_mgr;\n+\tstruct rte_htimer_msg_ring *receiver_ring;\n+\tstruct rte_htimer_msg msg = (struct rte_htimer_msg) {\n+\t\t.msg_type = msg_type,\n+\t\t.timer = timer,\n+\t\t.async_cb = async_cb,\n+\t\t.async_cb_arg = async_cb_arg\n+\t};\n+\tint rc;\n+\n+\tif (request != NULL)\n+\t\tmsg.request = *request;\n+\telse\n+\t\tmsg.response = *response;\n+\n+\treceiver_mgr = mgr_get(receiver_lcore_id);\n+\n+\treceiver_ring = receiver_mgr->msg_ring;\n+\n+\trc = rte_htimer_msg_ring_enqueue(receiver_ring, &msg);\n+\n+\treturn rc;\n+}\n+\n+static int\n+send_request(unsigned int receiver_lcore_id, enum rte_htimer_msg_type msg_type,\n+\t     struct rte_htimer *timer,\n+\t     rte_htimer_mgr_async_op_cb_t async_cb, void *async_cb_arg)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct rte_htimer_msg_request request = {\n+\t\t.source_lcore_id = lcore_id\n+\t};\n+\n+\treturn send_msg(receiver_lcore_id, msg_type, timer, async_cb,\n+\t\t\tasync_cb_arg, &request, NULL);\n+}\n+\n+static int\n+send_response(unsigned int receiver_lcore_id, enum rte_htimer_msg_type msg_type,\n+\t      struct rte_htimer *timer,\n+\t      rte_htimer_mgr_async_op_cb_t async_cb, void *async_cb_arg,\n+\t      int result)\n+{\n+\tstruct rte_htimer_msg_response response = {\n+\t\t.result = result\n+\t};\n+\n+\treturn send_msg(receiver_lcore_id, msg_type, timer, async_cb,\n+\t\t\tasync_cb_arg, NULL, &response);\n+}\n+\n+int\n+rte_htimer_mgr_async_add(struct rte_htimer *timer,\n+\t\t\t unsigned int target_lcore_id,\n+\t\t\t uint64_t expiration_time, uint64_t period,\n+\t\t\t rte_htimer_cb_t timer_cb, void *timer_cb_arg,\n+\t\t\t uint32_t flags,\n+\t\t\t rte_htimer_mgr_async_op_cb_t async_cb,\n+\t\t\t void *async_cb_arg)\n+{\n+\t*timer = (struct rte_htimer) {\n+\t\t.expiration_time = expiration_time,\n+\t\t.period = period,\n+\t\t.owner_lcore_id = target_lcore_id,\n+\t\t.flags = flags,\n+\t\t.cb = timer_cb,\n+\t\t.cb_arg = timer_cb_arg\n+\t};\n+\n+\tassure_initialized();\n+\n+\tif (send_request(target_lcore_id, rte_htimer_msg_type_add_request,\n+\t\t\t timer, async_cb, async_cb_arg) < 0)\n+\t\treturn -EBUSY;\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_htimer_mgr_async_cancel(struct rte_htimer *timer,\n+\t\t\t    rte_htimer_mgr_async_op_cb_t async_cb,\n+\t\t\t    void *async_cb_arg)\n+{\n+\tif (send_request(timer->owner_lcore_id,\n+\t\t\t rte_htimer_msg_type_cancel_request,\n+\t\t\t timer, async_cb, async_cb_arg) < 0)\n+\t\treturn -EBUSY;\n+\n+\treturn 0;\n+}\n+\n+static int\n+process_add_request(struct rte_htimer_msg *request)\n+{\n+\tstruct rte_htimer *timer = request->timer;\n+\n+\tif (request->async_cb != NULL &&\n+\t    send_response(request->request.source_lcore_id,\n+\t\t\t  rte_htimer_msg_type_add_response, timer,\n+\t\t\t  request->async_cb, request->async_cb_arg,\n+\t\t\t  RTE_HTIMER_MGR_ASYNC_RESULT_ADDED) < 0)\n+\t\treturn -EBUSY;\n+\n+\trte_htimer_mgr_add(timer, timer->expiration_time, timer->period,\n+\t\t\t   timer->cb, timer->cb_arg, timer->flags);\n+\n+\treturn 0;\n+}\n+\n+static int\n+process_cancel_request(struct rte_htimer_msg *request)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\tstruct rte_htimer *timer = request->timer;\n+\tint result;\n+\n+\tswitch (timer->state) {\n+\tcase RTE_HTIMER_STATE_PENDING:\n+\t\tresult = RTE_HTIMER_MGR_ASYNC_RESULT_CANCELED;\n+\t\tbreak;\n+\tcase RTE_HTIMER_STATE_CANCELED:\n+\t\tresult = RTE_HTIMER_MGR_ASYNC_RESULT_ALREADY_CANCELED;\n+\t\tbreak;\n+\tcase RTE_HTIMER_STATE_EXPIRED:\n+\t\tresult = RTE_HTIMER_MGR_ASYNC_RESULT_EXPIRED;\n+\t\tbreak;\n+\tdefault:\n+\t\tRTE_ASSERT(0);\n+\t\tresult = -1;\n+\t}\n+\n+\tif (request->async_cb != NULL &&\n+\t    send_response(request->request.source_lcore_id,\n+\t\t\t  rte_htimer_msg_type_cancel_response, timer,\n+\t\t\t  request->async_cb, request->async_cb_arg,\n+\t\t\t  result) < 0)\n+\t\treturn -EBUSY;\n+\n+\tif (timer->state == RTE_HTIMER_STATE_PENDING)\n+\t\trte_htw_cancel(mgr->htw, timer);\n+\n+\treturn 0;\n+}\n+\n+static int\n+process_response(struct rte_htimer_msg *msg)\n+{\n+\tstruct rte_htimer_msg_response *response = &msg->response;\n+\n+\tif (msg->async_cb != NULL)\n+\t\tmsg->async_cb(msg->timer, response->result, msg->async_cb_arg);\n+\n+\treturn 0;\n+}\n+\n+static int\n+process_msg(struct rte_htimer_msg *msg)\n+{\n+\tswitch (msg->msg_type) {\n+\tcase rte_htimer_msg_type_add_request:\n+\t\treturn process_add_request(msg);\n+\tcase rte_htimer_msg_type_cancel_request:\n+\t\treturn process_cancel_request(msg);\n+\tcase rte_htimer_msg_type_add_response:\n+\tcase rte_htimer_msg_type_cancel_response:\n+\t\treturn process_response(msg);\n+\tdefault:\n+\t\tRTE_ASSERT(0);\n+\t\treturn -EBUSY;\n+\t}\n+}\n+\n+static void\n+dequeue_async_msgs(struct htimer_mgr *mgr)\n+{\n+\tunsigned int i;\n+\n+\tif (likely(rte_htimer_msg_ring_empty(mgr->msg_ring)))\n+\t\treturn;\n+\n+\tif (unlikely(mgr->num_async_msgs > 0))\n+\t\treturn;\n+\n+\tmgr->async_msgs_idx = 0;\n+\n+\tmgr->num_async_msgs =\n+\t\trte_htimer_msg_ring_dequeue_burst(mgr->msg_ring,\n+\t\t\t\t\t\t  mgr->async_msgs,\n+\t\t\t\t\t\t  MAX_MSG_BATCH_SIZE);\n+\n+\tfor (i = 0; i < mgr->num_async_msgs; i++)\n+\t\trte_prefetch1(mgr->async_msgs[i].timer);\n+}\n+\n+static void\n+process_async(struct htimer_mgr *mgr)\n+{\n+\tfor (;;) {\n+\t\tstruct rte_htimer_msg *msg;\n+\n+\t\tdequeue_async_msgs(mgr);\n+\n+\t\tif (mgr->num_async_msgs == 0)\n+\t\t\tbreak;\n+\n+\t\tmsg = &mgr->async_msgs[mgr->async_msgs_idx];\n+\n+\t\tif (process_msg(msg) < 0)\n+\t\t\tbreak;\n+\n+\t\tmgr->num_async_msgs--;\n+\t\tmgr->async_msgs_idx++;\n+\t}\n+}\n+\n+static __rte_always_inline void\n+htimer_mgr_manage_time(uint64_t current_time, uint32_t flags)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\tuint64_t current_tick;\n+\n+\tassure_initialized();\n+\n+\tassure_valid_time_conversion_flags(flags);\n+\n+\tprocess_async(mgr);\n+\n+\tcurrent_tick = convert_time(current_time, flags);\n+\n+\trte_htw_manage(mgr->htw, current_tick);\n+}\n+\n+void\n+rte_htimer_mgr_manage_time(uint64_t current_time, uint32_t flags)\n+{\n+\thtimer_mgr_manage_time(current_time, flags);\n+}\n+\n+void\n+rte_htimer_mgr_manage(void)\n+{\n+\tuint64_t current_time;\n+\n+\tcurrent_time = rte_get_tsc_cycles();\n+\n+\thtimer_mgr_manage_time(current_time, RTE_HTIMER_FLAG_TIME_TSC);\n+}\n+\n+void\n+rte_htimer_mgr_process(void)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\n+\tprocess_async(mgr);\n+\tassure_initialized();\n+\n+\trte_htw_process(mgr->htw);\n+}\n+\n+uint64_t\n+rte_htimer_mgr_current_time(void)\n+{\n+\tuint64_t current_tick;\n+\n+\tcurrent_tick = rte_htimer_mgr_current_tick();\n+\n+\treturn tick_to_ns(current_tick);\n+}\n+\n+uint64_t\n+rte_htimer_mgr_current_tick(void)\n+{\n+\tunsigned int lcore_id = rte_lcore_id();\n+\tstruct htimer_mgr *mgr = mgr_get(lcore_id);\n+\tuint64_t current_tick;\n+\n+\tcurrent_tick = rte_htw_current_time(mgr->htw);\n+\n+\treturn current_tick;\n+}\ndiff --git a/lib/htimer/rte_htimer_mgr.h b/lib/htimer/rte_htimer_mgr.h\nnew file mode 100644\nindex 0000000000..173a95f9c0\n--- /dev/null\n+++ b/lib/htimer/rte_htimer_mgr.h\n@@ -0,0 +1,516 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#ifndef _RTE_HTIMER_MGR_H_\n+#define _RTE_HTIMER_MGR_H_\n+\n+/**\n+ * @file\n+ *\n+ * RTE High-performance Timer Manager\n+ *\n+ * The high-performance timer manager (htimer_mgr) API provides access\n+ * to a low-overhead, scalable timer service.\n+ *\n+ * The functionality offered similar to that of <rte_timer.h>, but the\n+ * internals differs significantly, and there are slight differences\n+ * in the programming interface as well.\n+ *\n+ * Core timer management is implemented by means of a hierarchical\n+ * timer wheel (HTW), as per the Varghese and Lauck paper <em>Hashed\n+ * and Hierarchical Timing Wheels: Data Structures for the Efficient\n+ * Implementation of a Timer Facility</em>.\n+ *\n+ * Varghese et al's approach is further enhanced by the placement of a\n+ * bitset in front of each wheel's slots. Each slot has a\n+ * corresponding bit in the bitset. If a bit is clear, there are no\n+ * pending timers scheduled for that slot. A set bit means there\n+ * potentially are timers scheduled for that slot. This scheme reduces\n+ * the overhead of the rte_htimer_mgr_manage() function, where slots\n+ * of one or more of the wheels of the thread's HTW are scanned if\n+ * time has progressed since last call. This improves performance is\n+ * all cases, except for very densely populated timer wheels.\n+ *\n+ * One such HTW is instantiated for each lcore (EAL thread), and\n+ * instances are also available for registered non-EAL threads.\n+ *\n+ * The <rte_htimer_mgr.h> API may not be called from unregistered\n+ * non-EAL threads.\n+ *\n+ * The per-lcore-id HTW instance is private to that thread.\n+ *\n+ * The htimer API supports scheduling timers to a different thread\n+ * (and thus, a different HTW) than the caller's. It is also possible\n+ * to cancel timers managed by a \"remote\" timer wheel.\n+ *\n+ * All interaction (i.e., adding timers to or removing timers from) a\n+ * remote HTW is done by sending a request, in the form of message on\n+ * a DPDK ring, to that instance. Such requests are processed and, if\n+ * required, acknowledged when the remote (target) thread calls\n+ * rte_htimer_mgr_manage(), rte_htimer_mgr_manage_time() or\n+ * rte_htimer_mgr_process().\n+ *\n+ * This message-based interaction avoid comparatively heavy-weight\n+ * synchronization primitives such as spinlocks. Only release-acquire\n+ * type synchronization on the rings are needed.\n+ *\n+ * Timer memory management is the responsibility of the\n+ * application. After library-level initialization has completed, no\n+ * more dynamic memory is allocated by the htimer library. When\n+ * installing timers on remote lcores, care must be taken by the\n+ * application to avoid race conditions, in particular use-after-free\n+ * (or use-after-recycle) issues of the rte_timer structure. A timer\n+ * struct may only be deallocated and/or recycled if the application\n+ * can guarantee that there are no cancel requests in flight.\n+ *\n+ * The htimer library is able to give a definitive answer to the\n+ * question if a remote timer's had expired or not, at the time of\n+ * cancellation.\n+ *\n+ * The htimer library uses TSC as the default time source. A different\n+ * time source may be used, in which case the application must\n+ * explicitly provide the time using rte_htimer_mgr_manage_time().\n+ * This function may also be used even if TSC is the time source, in\n+ * cases where the application for some other purpose already is in\n+ * possession of the current TSC time, to avoid the overhead of\n+ * htimer's `rdtsc` instruction (or its equivalent on non-x86 ISAs).\n+ *\n+ * The htimer supports periodic and single-shot timers.\n+ *\n+ * The timer tick defines a quantum of time in the htimer library. The\n+ * length of a tick (quantified in nanoseconds) is left to the\n+ * application to specify. The core HTW implementation allows for all\n+ * 64 bits to be used.\n+ *\n+ * Very fine-grained ticks increase the HTW overhead (since more slots\n+ * needs to be scanned). Long ticks will only allow for very\n+ * course-grained timers, and in timer-heavy application may cause\n+ * load spikes when time advances into a new tick.\n+ *\n+ * Seemingly reasonable timer tick length range in between 100 ns and\n+ * 100 us (or maybe up to as high as 1 ms), depending on the\n+ * application.\n+ */\n+\n+#include <stdint.h>\n+\n+#include <rte_common.h>\n+#include <rte_compat.h>\n+#include <rte_htimer.h>\n+\n+/**\n+ * The timer has been added to the timer manager on the target lcore.\n+ */\n+#define RTE_HTIMER_MGR_ASYNC_RESULT_ADDED 1\n+\n+/**\n+ * The timer cancellation request has completed, before the timer expired\n+ * on the target lcore.\n+ */\n+#define RTE_HTIMER_MGR_ASYNC_RESULT_CANCELED 2\n+\n+/**\n+ * The timer cancellation request was denied, since the timer was\n+ * already marked as canceled.\n+ */\n+#define RTE_HTIMER_MGR_ASYNC_RESULT_ALREADY_CANCELED 3\n+\n+/**\n+ * At the time of the cancellation request process on the target\n+ * lcore, the timer had already expired.\n+ */\n+#define RTE_HTIMER_MGR_ASYNC_RESULT_EXPIRED 4\n+\n+typedef void (*rte_htimer_mgr_async_op_cb_t)(struct rte_htimer *timer,\n+\t\t\t\t\t     int result, void *cb_arg);\n+\n+/**\n+ * Initialize the htimer library.\n+ *\n+ * Instantiates per-lcore (or per-registered non-EAL thread) timer\n+ * wheels and other htimer library data structures, for all current\n+ * and future threads.\n+ *\n+ * This function must be called prior to any other <rte_htimer.h> API\n+ * call.\n+ *\n+ * This function may not be called if the htimer library is already\n+ * initialized, but may be called multiple times, provided the library\n+ * is deinitialized in between rte_htimer_mgr_init() calls.\n+ *\n+ * For applications not using TSC as the time source, the \\c ns_per_tick\n+ * parameter will denote the number of such application time-source-units\n+ * per tick.\n+ *\n+ * This function is not multi-thread safe.\n+ *\n+ * @param ns_per_tick\n+ *   The length (in nanoseconds) of a timer wheel tick.\n+ *\n+ * @return\n+ *   - 0: Success\n+ *   - -ENOMEM: Unable to allocate memory needed to initialize timer\n+ *      subsystem\n+ *\n+ * @see rte_htimer_mgr_deinit()\n+ * @see rte_get_tsc_hz()\n+ */\n+\n+__rte_experimental\n+int\n+rte_htimer_mgr_init(uint64_t ns_per_tick);\n+\n+/**\n+ * Deinitialize the htimer library.\n+ *\n+ * This function deallocates all dynamic memory used by the library,\n+ * including HTW instances used by other threads than the caller.\n+ *\n+ * After this call has been made, no <rte_htimer.h> API call may be\n+ * made, except rte_htimer_mgr_init().\n+ *\n+ * This function may not be called if the htimer library has never be\n+ * initialized, or has been be deinitialized but not yet initialized\n+ * again.\n+ *\n+ * This function is not multi-thread safe. In particular, no thread\n+ * may call any <rte_htimer.h> functions (e.g., rte_htimer_mgr_manage())\n+ * during (or after) the htimer library is deinitialized, except if it\n+ * is initialized again.\n+ *\n+ * @see rte_htimer_mgr_init()\n+ */\n+\n+__rte_experimental\n+void\n+rte_htimer_mgr_deinit(void);\n+\n+/**\n+ * Adds a timer to the calling thread's timer wheel.\n+ *\n+ * This function schedules a timer on the calling thread's HTW.\n+ *\n+ * The \\c timer_cb callback is called at a point when this thread\n+ * calls rte_htimer_mgr_process(), rte_htimer_mgr_manage(), or\n+ * rte_htimer_mgr_manage_time() and the expiration time has passed the\n+ * current time (either as retrieved by rte_htimer_mgr_manage() or\n+ * specified by the application in rte_htimer_mgr_manage_time().\n+ *\n+ * The HTW trackes times in units of \\c ticks, which are likely more\n+ * coarse-grained than nanosecond and TSC resolution.\n+ *\n+ * By default, the \\c expiration_time is interpreted as the number of\n+ * the nanoseconds into the future the timer should expired, relative\n+ * to the last known current time, rounded up to the nearest tick.\n+ * Thus, a timer with a certain expiration time maybe not expire even\n+ * though this time was supplied in rte_timer_manage_time(). The\n+ * maximum error is the length of one tick (plus any delays caused by\n+ * infrequent manage calls).\n+ *\n+ * If the \\c RTE_HTIMER_FLAG_ABSOLUTE_TIME is set in \\c flags, the\n+ * expiration time is relative to time zero.\n+ *\n+ * If the \\c RTE_HTIMER_FLAG_PERIODICAL flag is set, the timer is\n+ * peridoical, and will expire first at the time specified by\n+ * the \\c expiration_time, and then with an interval as specified\n+ * by the \\c period parameter.\n+ *\n+ * An added timer may be canceled using rte_htimer_mgr_cancel() or\n+ * rte_htimer_mgr_async_cancel().\n+ *\n+ * rte_htimer_mgr_add() is multi-thread safe, and may only be called\n+ * from an EAL thread or a registered non-EAL thread.\n+ *\n+ * @param timer\n+ *   The chunk of memory used for managing this timer. This memory\n+ *   must not be read or written (or free'd) by the application until\n+ *   this timer has expired, or any cancellation attempts have\n+ *   completed.\n+ * @param expiration_time\n+ *   The expiration time (in nanoseconds by default). For periodical\n+ *   timers, this time represent the first expiration time.\n+ * @param period\n+ *   The time in between periodic timer expirations (in nanoseconds by\n+ *   default).  Must be set to zero unless the\n+ *   \\c RTE_HTIMER_FLAG_PERIODICAL flag is set, in case it must be a\n+ *   positive integer.\n+ * @param timer_cb\n+ *   The timer callback to be called upon timer expiration.\n+ * @param timer_cb_arg\n+ *   A pointer which will be supplied back to the application in the\n+ *   timer callback call.\n+ * @param flags\n+ *   A bitmask which may contain these flags:\n+ *     * \\c RTE_HTIMER_FLAG_PERIODICAL\n+ *     * \\c RTE_HTIMER_FLAG_ABSOLUTE_TIME\n+ *     * Either \\c RTE_HTIMER_FLAG_TIME_TICK or \\c RTE_HTIMER_FLAG_TIME_TSC\n+ */\n+\n+__rte_experimental\n+void\n+rte_htimer_mgr_add(struct rte_htimer *timer, uint64_t expiration_time,\n+\t\t   uint64_t period, rte_htimer_cb_t timer_cb,\n+\t\t   void *timer_cb_arg, uint32_t flags);\n+\n+/**\n+ * Cancel a timer scheduled in the calling thread's timer wheel.\n+ *\n+ * This function cancel a timer scheduled on the calling thread's HTW.\n+ *\n+ * rte_htimer_mgr_cancel() may be called on a timer which has already\n+ * (synchronously or asynchronously) been canceled, or may have expired.\n+ * However, the \\c rte_htimer struct pointed to by \\c timer may not\n+ * have been freed or recycled since.\n+ *\n+ * rte_htimer_mgr_cancel() may not be called for a timer that was\n+ * never (or, not yet) added.\n+ *\n+ * A timer added using rte_htimer_mgr_async_add() may be not be\n+ * canceled using this function until after the add operation has\n+ * completed (i.e, the completion callback has been run).\n+ *\n+ * rte_htimer_mgr_cancel() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ *\n+ * @param timer\n+ *   The timer to be canceled.\n+ * @return\n+ *   - 0: Success\n+ *   - -ETIME: Timer has expired, and thus could not be canceled.\n+ *   - -ENOENT: Timer was already canceled.\n+ */\n+\n+__rte_experimental\n+int\n+rte_htimer_mgr_cancel(struct rte_htimer *timer);\n+\n+/**\n+ * Asynchronuosly add a timer to the specified lcore's timer wheel.\n+ *\n+ * This function is the equivalent of rte_htimer_mgr_add(), but allows\n+ * the calling (\"source\") thread to scheduled a timer in a HTW other\n+ * than it's own. The operation is asynchronous.\n+ *\n+ * The timer works the same as a timer added locally. Thus, the \\c\n+ * timer_cb callback is called by the target thread, and it may be\n+ * canceled using rte_htimer_mgr_cancel().\n+ *\n+ * The source thread may be the same as the target thread.\n+ *\n+ * Only EAL threads or registered non-EAL thread may be targeted.\n+ *\n+ * A successful rte_htimer_mgr_async_add() call guarantees that the\n+ * timer will be scheduled on the target lcore at some future time,\n+ * provided the target thread calls either rte_htimer_mgr_process(),\n+ * rte_htimer_mgr_manage(), and/or rte_htimer_mgr_manage_time().\n+ *\n+ * The \\c async_cb callback is called on the source thread as a part\n+ * of its rte_htimer_mgr_process(), rte_htimer_mgr_manage(), or\n+ * rte_htimer_mgr_manage_time() call, when the asynchronous add\n+ * operation has completed (i.e., the timer is scheduled in the target\n+ * HTW).\n+ *\n+ * \\c async_cb may be NULL, in which case no notification is given.\n+ *\n+ * An asynchronously added timer may be asynchronously canceled (i.e.,\n+ * using rte_htimer_mgr_async_cancel()) at any point, by any thread,\n+ * after the rte_htimer_mgr_async_add() call. A asynchronously added\n+ * timer may be not be canceled using rte_htimer_mgr_cancel() until\n+ * after the completion callback has been executed.\n+ *\n+ * rte_htimer_mgr_async_add() is multi-thread safe, and may only be called\n+ * from an EAL thread or a registered non-EAL thread.\n+ *\n+ * @param timer\n+ *   The chunk of memory used for managing this timer. This memory\n+ *   must not be read or written (or free'd) by the application until\n+ *   this timer has expired, or any cancellation attempts have\n+ *   completed.\n+ * @param target_lcore_id\n+ *   The lcore id of the thread which HTW will be manage this timer.\n+ * @param expiration_time\n+ *   The expiration time (measured in nanoseconds). For periodical\n+ *   timers, this time represent the first expiration time.\n+ * @param period\n+ *   The time in between periodic timer expirations (measured in\n+ *   nanoseconds).  Must be set to zero unless the\n+ *   RTE_HTIMER_FLAG_PERIODICAL flag is set, in case it must be a\n+ *   positive integer.\n+ * @param timer_cb\n+ *   The timer callback to be called upon timer expiration.\n+ * @param timer_cb_arg\n+ *   A pointer which will be supplied back to the application in the\n+ *   timer callback call.\n+ * @param async_cb\n+ *   The asynchronous operationg callback to be called when the\n+ *   add operation is completed.\n+ * @param async_cb_arg\n+ *   A pointer which will be supplied back to the application in the\n+ *   \\c async_cb callback call.\n+ * @param flags\n+ *   RTE_HTIMER_FLAG_ABSOLUTE_TIME and/or RTE_HTIMER_FLAG_PERIODICAL.\n+ * @return\n+ *   - 0: Success\n+ *   - -EBUSY: The maximum number of concurrently queued asynchronous\n+ *      operations has been reached.\n+ */\n+\n+__rte_experimental\n+int\n+rte_htimer_mgr_async_add(struct rte_htimer *timer,\n+\t\t\t unsigned int target_lcore_id,\n+\t\t\t uint64_t expiration_time, uint64_t period,\n+\t\t\t rte_htimer_cb_t timer_cb, void *timer_cb_arg,\n+\t\t\t uint32_t flags,\n+\t\t\t rte_htimer_mgr_async_op_cb_t async_cb,\n+\t\t\t void *async_cb_arg);\n+\n+/**\n+ * Asynchronuosly cancel a timer in any thread's timer wheel.\n+ *\n+ * This function is the equivalent of rte_htimer_mgr_cancel(), but\n+ * allows the calling (\"source\") thread to also cancel a timer in a\n+ * HTW other than it's own. The operation is asynchronous.\n+ *\n+ * A thread may asynchronously cancel a timer scheduled on its own\n+ * HTW.\n+ *\n+ * The \\c async_cb callback is called on the source thread as a part\n+ * of its rte_htimer_mgr_process(), rte_htimer_mgr_manage(), or\n+ * rte_htimer_mgr_manage_time() call, when the asynchronous add\n+ * operation has completed (i.e., the timer is scheduled in the target\n+ * HTW).\n+ *\n+ * \\c async_cb may be NULL, in which case no notification is given.\n+ *\n+ * A timer may be asynchronously canceled at any point, by any thread,\n+ * after it has been either synchronously or asynchronously added.\n+ *\n+ * rte_htimer_mgr_async_cancel() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ *\n+ * @param timer\n+ *   The memory used for managing this timer. This memory must not be\n+ *   read or written (or free'd) by the application until this timer\n+ *   has expired, or any cancellation attempts have completed.\n+ * @param async_cb\n+ *   The asynchronous operationg callback to be called when the\n+ *   add operation is completed.\n+ * @param async_cb_arg\n+ *   A pointer which will be supplied back to the application in the\n+ *   \\c async_cb callback call.\n+ * @return\n+ *   - 0: Success\n+ *   - -EBUSY: The maximum number of concurrently queued asynchronous\n+ *      operations has been reached.\n+ */\n+\n+__rte_experimental\n+int\n+rte_htimer_mgr_async_cancel(struct rte_htimer *timer,\n+\t\t\t    rte_htimer_mgr_async_op_cb_t async_cb,\n+\t\t\t    void *async_cb_arg);\n+\n+/**\n+ * Update HTW time and perform timer expiry and asynchronous operation\n+ * processing.\n+ *\n+ * This function is the equivalent of retrieving the current TSC time,\n+ * and calling rte_htimer_mgr_manage_time().\n+ *\n+ * rte_htimer_mgr_manage() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ */\n+\n+__rte_experimental\n+void\n+rte_htimer_mgr_manage(void);\n+\n+/**\n+ * Progress HTW time, and perform timer expiry and asynchronous\n+ * operation processing in the process.\n+ *\n+ * This function progress the calling thread's HTW up to the point\n+ * specified by \\c current_time, calling the callbacks of any expired\n+ * timers.\n+ *\n+ * The time source must be a monotonic clock, and thus each new \\c\n+ * current_time must be equal or greater than the time supplied in the\n+ * previous call.\n+ *\n+ * The timer precision for timers scheduled on a particular thread's\n+ * HTW depends on that threads call frequency to this function.\n+ *\n+ * rte_htimer_mgr_manage_time() also performs asynchronous operation\n+ * processing. See rte_htimer_mgr_process() for details.\n+ *\n+ * rte_htimer_mgr_manage_time() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ *\n+ * @param current_time\n+ *   The current time (in nanoseconds, by default).\n+ * @param flags\n+ *   Either \\c RTE_HTIMER_FLAG_TIME_TICK or \\c RTE_HTIMER_FLAG_TIME_TSC.\n+ */\n+\n+__rte_experimental\n+void\n+rte_htimer_mgr_manage_time(uint64_t current_time, uint32_t flags);\n+\n+/**\n+ * Perform asynchronous operation processing.\n+ *\n+ * rte_htimer_mgr_process() serves pending asynchronous add or cancel\n+ * requests, and produces the necessary responses. The timer callbacks\n+ * of already-expired timers added are called.\n+ *\n+ * This function also processes asynchronous operation response\n+ * messages received, and calls the asynchronous callbacks, if such\n+ * was provided by the application.\n+ *\n+ * rte_htimer_mgr_process() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ */\n+\n+__rte_experimental\n+void\n+rte_htimer_mgr_process(void);\n+\n+/**\n+ * Return the current local HTW time in nanoseconds.\n+ *\n+ * This function returns the most recent time provided by this thread,\n+ * either via rte_htimer_mgr_manage_time(), or as sampled by\n+ * rte_htimer_mgr_manage().\n+ *\n+ * The initial time, prior to any manage-calls, is 0.\n+ *\n+ * rte_htimer_mgr_current_time() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ */\n+\n+__rte_experimental\n+uint64_t\n+rte_htimer_mgr_current_time(void);\n+\n+/**\n+ * Return the current local HTW time in ticks.\n+ *\n+ * This function returns the current time of the calling thread's HTW. The\n+ * tick is the current time provided by the application (via\n+ * rte_htimer_mgr_manage_time()), or as retrieved (using\n+ * rte_timer_get_tsc_cycles() in rte_htimer_mgr_manage()), divided by the\n+ * tick length (as provided in rte_htimer_mgr_init()).\n+ *\n+ * The initial time, prior to any manage-calls, is 0.\n+ *\n+ * rte_htimer_mgr_current_tick() is multi-thread safe, and may only be\n+ * called from an EAL thread or a registered non-EAL thread.\n+ */\n+\n+__rte_experimental\n+uint64_t\n+rte_htimer_mgr_current_tick(void);\n+\n+#endif\ndiff --git a/lib/htimer/rte_htimer_msg.h b/lib/htimer/rte_htimer_msg.h\nnew file mode 100644\nindex 0000000000..ceb106e263\n--- /dev/null\n+++ b/lib/htimer/rte_htimer_msg.h\n@@ -0,0 +1,44 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#ifndef _RTE_HTIMER_MSG_\n+#define _RTE_HTIMER_MSG_\n+\n+#include <rte_htimer.h>\n+\n+typedef void (*rte_htimer_msg_async_op_cb_t)(struct rte_htimer *timer,\n+\t\t\t\t\t     int result, void *cb_arg);\n+\n+typedef rte_htimer_msg_async_op_cb_t async_cb;\n+\n+enum rte_htimer_msg_type {\n+\trte_htimer_msg_type_add_request,\n+\trte_htimer_msg_type_add_response,\n+\trte_htimer_msg_type_cancel_request,\n+\trte_htimer_msg_type_cancel_response\n+};\n+\n+struct rte_htimer_msg_request {\n+\tunsigned int source_lcore_id;\n+};\n+\n+struct rte_htimer_msg_response {\n+\tint result;\n+};\n+\n+struct rte_htimer_msg {\n+\tenum rte_htimer_msg_type msg_type;\n+\n+\tstruct rte_htimer *timer;\n+\n+\trte_htimer_msg_async_op_cb_t async_cb;\n+\tvoid *async_cb_arg;\n+\n+\tunion {\n+\t\tstruct rte_htimer_msg_request request;\n+\t\tstruct rte_htimer_msg_response response;\n+\t};\n+};\n+\n+#endif\ndiff --git a/lib/htimer/rte_htimer_msg_ring.c b/lib/htimer/rte_htimer_msg_ring.c\nnew file mode 100644\nindex 0000000000..4019b7819a\n--- /dev/null\n+++ b/lib/htimer/rte_htimer_msg_ring.c\n@@ -0,0 +1,18 @@\n+#include \"rte_htimer_msg_ring.h\"\n+\n+struct rte_htimer_msg_ring *\n+rte_htimer_msg_ring_create(const char *name, unsigned int count, int socket_id,\n+\t\t\t   unsigned int flags)\n+{\n+\tstruct rte_ring *ring =\n+\t\trte_ring_create_elem(name, sizeof(struct rte_htimer_msg),\n+\t\t\t\t     count, socket_id, flags);\n+\n+\treturn (struct rte_htimer_msg_ring *)ring;\n+}\n+\n+void\n+rte_htimer_msg_ring_free(struct rte_htimer_msg_ring *msg_ring)\n+{\n+\trte_ring_free((struct rte_ring *)msg_ring);\n+}\ndiff --git a/lib/htimer/rte_htimer_msg_ring.h b/lib/htimer/rte_htimer_msg_ring.h\nnew file mode 100644\nindex 0000000000..48fcc99189\n--- /dev/null\n+++ b/lib/htimer/rte_htimer_msg_ring.h\n@@ -0,0 +1,55 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#ifndef _RTE_HTIMER_MSG_RING_\n+#define _RTE_HTIMER_MSG_RING_\n+\n+#include <rte_ring.h>\n+\n+#include \"rte_htimer_msg.h\"\n+\n+struct rte_htimer_msg_ring {\n+\tstruct rte_ring ring;\n+};\n+\n+struct rte_htimer_msg_ring *\n+rte_htimer_msg_ring_create(const char *name, unsigned int count, int socket_id,\n+\t\t\t   unsigned int flags);\n+\n+void\n+rte_htimer_msg_ring_free(struct rte_htimer_msg_ring *msg_ring);\n+\n+static inline int\n+rte_htimer_msg_ring_empty(struct rte_htimer_msg_ring *msg_ring)\n+{\n+\treturn rte_ring_empty(&msg_ring->ring);\n+}\n+\n+static inline unsigned int\n+rte_htimer_msg_ring_dequeue_burst(struct rte_htimer_msg_ring *msg_ring,\n+\t\t\t\t  struct rte_htimer_msg *msgs,\n+\t\t\t\t  unsigned int n)\n+{\n+\tunsigned int dequeued;\n+\n+\tdequeued = rte_ring_dequeue_burst_elem(&msg_ring->ring, msgs,\n+\t\t\t\t\t       sizeof(struct rte_htimer_msg),\n+\t\t\t\t\t       n, NULL);\n+\n+\treturn dequeued;\n+}\n+\n+static inline unsigned int\n+rte_htimer_msg_ring_enqueue(struct rte_htimer_msg_ring *msg_ring,\n+\t\t\t    struct rte_htimer_msg *msg)\n+{\n+\tint rc;\n+\n+\trc = rte_ring_enqueue_elem(&msg_ring->ring, msg,\n+\t\t\t\t   sizeof(struct rte_htimer_msg));\n+\n+\treturn rc;\n+}\n+\n+#endif\ndiff --git a/lib/htimer/rte_htw.c b/lib/htimer/rte_htw.c\nnew file mode 100644\nindex 0000000000..67fcb8c623\n--- /dev/null\n+++ b/lib/htimer/rte_htw.c\n@@ -0,0 +1,445 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+/*\n+ * This is an implementation of a hierarchical timer wheel based on\n+ * Hashed and Hierarchical Timing Wheels: Data Structures\n+ * for the Efficient Implementation of a Timer Facility by Varghese and\n+ * Lauck.\n+ *\n+ * To improve efficiency when the slots are sparsely populate (i.e.,\n+ * many ticks do not have any timers), each slot is represented by a\n+ * bit in a separately-managed, per-wheel, bitset. This allows for\n+ * very efficient scanning. The cost of managing this bitset is small.\n+ */\n+\n+#include <rte_bitset.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_debug.h>\n+#include <rte_errno.h>\n+#include <rte_malloc.h>\n+\n+#include \"rte_htw.h\"\n+\n+#define TICK_BITS 64\n+\n+#define WHEEL_BITS 8\n+#define WHEEL_SLOTS (1U << WHEEL_BITS)\n+#define WHEEL_LEVELS (TICK_BITS / WHEEL_BITS)\n+\n+struct wheel {\n+\tuint64_t wheel_time;\n+\tRTE_BITSET_DECLARE(used_slots, WHEEL_SLOTS);\n+\tstruct rte_htimer_list slots[WHEEL_SLOTS];\n+};\n+\n+struct rte_htw {\n+\tuint64_t current_time;\n+\n+\tstruct wheel wheels[WHEEL_LEVELS];\n+\n+\tstruct rte_htimer_list added;\n+\tstruct rte_htimer_list expiring;\n+\n+\tstruct rte_htimer *running_timer;\n+};\n+\n+static uint64_t\n+time_to_wheel_time(uint64_t t, uint16_t level)\n+{\n+\treturn t >> (level * WHEEL_BITS);\n+}\n+\n+static uint64_t\n+wheel_time_to_time(uint64_t wheel_time, uint16_t level)\n+{\n+\treturn wheel_time << (level * WHEEL_BITS);\n+}\n+\n+static void\n+wheel_init(struct wheel *wheel)\n+{\n+\tuint16_t i;\n+\n+\twheel->wheel_time = 0;\n+\n+\trte_bitset_init(wheel->used_slots, WHEEL_SLOTS);\n+\n+\tfor (i = 0; i < WHEEL_SLOTS; i++)\n+\t\tLIST_INIT(&wheel->slots[i]);\n+}\n+\n+static uint64_t\n+list_next_timeout(struct rte_htimer_list *timers)\n+{\n+\tstruct rte_htimer *timer;\n+\tuint64_t candidate = UINT64_MAX;\n+\n+\tLIST_FOREACH(timer, timers, entry)\n+\t\tcandidate = RTE_MIN(timer->expiration_time, candidate);\n+\n+\treturn candidate;\n+}\n+\n+static uint16_t\n+wheel_time_to_slot(uint64_t wheel_time)\n+{\n+\treturn wheel_time % WHEEL_SLOTS;\n+}\n+\n+static uint64_t\n+wheel_current_slot_time(struct wheel *wheel, uint16_t level)\n+{\n+\treturn wheel->wheel_time << (level * WHEEL_BITS);\n+}\n+\n+static uint64_t\n+wheel_next_timeout(struct wheel *wheel, uint16_t level, uint64_t upper_bound)\n+{\n+\tuint16_t start_slot;\n+\tssize_t slot;\n+\n+\tstart_slot = wheel_current_slot_time(wheel, level);\n+\n+\tif (wheel_time_to_time(wheel->wheel_time, level) >= upper_bound)\n+\t\treturn upper_bound;\n+\n+\tRTE_BITSET_FOREACH_SET_WRAP(slot, wheel->used_slots, WHEEL_SLOTS,\n+\t\t\t\t    (ssize_t)start_slot, WHEEL_SLOTS) {\n+\t\tstruct rte_htimer_list *timers = &wheel->slots[slot];\n+\t\tuint64_t next_timeout;\n+\n+\t\tnext_timeout = list_next_timeout(timers);\n+\n+\t\tif (next_timeout != UINT64_MAX)\n+\t\t\treturn next_timeout;\n+\t}\n+\n+\treturn UINT64_MAX;\n+}\n+\n+static uint16_t\n+get_slot(uint64_t t, uint16_t level)\n+{\n+\tuint64_t wheel_time;\n+\tuint16_t slot;\n+\n+\twheel_time = time_to_wheel_time(t, level);\n+\tslot = wheel_time_to_slot(wheel_time);\n+\n+\treturn slot;\n+}\n+\n+struct rte_htw *\n+rte_htw_create(void)\n+{\n+\tstruct rte_htw *htw;\n+\tuint16_t level;\n+\n+\tRTE_BUILD_BUG_ON((TICK_BITS % WHEEL_BITS) != 0);\n+\tRTE_BUILD_BUG_ON(sizeof(uint16_t) * CHAR_BIT <= WHEEL_BITS);\n+\n+\thtw = rte_malloc(NULL, sizeof(struct rte_htw), RTE_CACHE_LINE_SIZE);\n+\n+\tif (htw == NULL) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\thtw->current_time = 0;\n+\n+\tLIST_INIT(&htw->added);\n+\tLIST_INIT(&htw->expiring);\n+\n+\tfor (level = 0; level < WHEEL_LEVELS; level++)\n+\t\twheel_init(&htw->wheels[level]);\n+\n+\treturn htw;\n+}\n+\n+void\n+rte_htw_destroy(struct rte_htw *htw)\n+{\n+\trte_free(htw);\n+}\n+\n+static uint16_t\n+get_level(uint64_t remaining_time)\n+{\n+\tint last_set = 64 - __builtin_clzll(remaining_time);\n+\n+\treturn (last_set - 1) / WHEEL_BITS;\n+}\n+\n+static void\n+mark_added(struct rte_htw *htw, struct rte_htimer *timer)\n+{\n+\ttimer->state = RTE_HTIMER_STATE_PENDING;\n+\tLIST_INSERT_HEAD(&htw->added, timer, entry);\n+}\n+\n+static void\n+assure_valid_add_params(uint64_t period __rte_unused,\n+\t\t\tuint32_t flags __rte_unused)\n+{\n+\tRTE_ASSERT(!(flags & ~(RTE_HTIMER_FLAG_PERIODICAL |\n+\t\t\t       RTE_HTIMER_FLAG_ABSOLUTE_TIME)));\n+\tRTE_ASSERT(flags & RTE_HTIMER_FLAG_PERIODICAL ?\n+\t\t   period > 0 : period == 0);\n+}\n+\n+void\n+rte_htw_add(struct rte_htw *htw, struct rte_htimer *timer,\n+\t    uint64_t expiration_time, uint64_t period,\n+\t    rte_htimer_cb_t timer_cb, void *timer_cb_arg, uint32_t flags)\n+{\n+\tassure_valid_add_params(period, flags);\n+\n+\tif (flags & RTE_HTIMER_FLAG_ABSOLUTE_TIME)\n+\t\ttimer->expiration_time = expiration_time;\n+\telse\n+\t\ttimer->expiration_time = htw->current_time + expiration_time;\n+\n+\ttimer->period = period;\n+\ttimer->flags = flags;\n+\ttimer->cb = timer_cb;\n+\ttimer->cb_arg = timer_cb_arg;\n+\n+\tmark_added(htw, timer);\n+}\n+\n+void\n+rte_htw_cancel(struct rte_htw *htw, struct rte_htimer *timer)\n+{\n+\t/*\n+\t * One could consider clearing the relevant used_slots bit in\n+\t * case this was the last entry in the wheel's slot\n+\t * list. However, from a correctness point of view, a \"false\n+\t * positive\" is not an issue. From a performance perspective,\n+\t * checking the list head and clearing the bit is likely more\n+\t * expensive than just deferring a minor cost to a future\n+\t * rte_htw_manage() call.\n+\t */\n+\n+\tRTE_ASSERT(timer->state == RTE_HTIMER_STATE_PENDING ||\n+\t\t   timer->state == RTE_HTIMER_STATE_EXPIRED);\n+\n+\tif (likely(timer->state == RTE_HTIMER_STATE_PENDING)) {\n+\t\tLIST_REMOVE(timer, entry);\n+\t\ttimer->state = RTE_HTIMER_STATE_CANCELED;\n+\t} else if (timer == htw->running_timer) {\n+\t\t/* periodical timer being canceled by its own callback */\n+\t\tRTE_ASSERT(timer->flags & RTE_HTIMER_FLAG_PERIODICAL);\n+\n+\t\ttimer->state = RTE_HTIMER_STATE_CANCELED;\n+\n+\t\t/* signals running timer canceled */\n+\t\thtw->running_timer = NULL;\n+\t}\n+}\n+\n+static void\n+mark_expiring(struct rte_htw *htw, struct rte_htimer *timer)\n+{\n+\tLIST_INSERT_HEAD(&htw->expiring, timer, entry);\n+}\n+\n+static void\n+schedule_timer(struct rte_htw *htw, struct rte_htimer *timer)\n+{\n+\tuint64_t remaining_time;\n+\tuint16_t level;\n+\tstruct wheel *wheel;\n+\tuint16_t slot;\n+\tstruct rte_htimer_list *slot_timers;\n+\n+\tremaining_time = timer->expiration_time - htw->current_time;\n+\n+\tlevel = get_level(remaining_time);\n+\n+\twheel = &htw->wheels[level];\n+\n+\tslot = get_slot(timer->expiration_time, level);\n+\n+\tslot_timers = &htw->wheels[level].slots[slot];\n+\n+\tLIST_INSERT_HEAD(slot_timers, timer, entry);\n+\n+\trte_bitset_set(wheel->used_slots, slot);\n+}\n+\n+static void\n+process_added(struct rte_htw *htw)\n+{\n+\tstruct rte_htimer *timer;\n+\n+\twhile ((timer = LIST_FIRST(&htw->added)) != NULL) {\n+\t\tLIST_REMOVE(timer, entry);\n+\n+\t\tif (timer->expiration_time > htw->current_time)\n+\t\t\tschedule_timer(htw, timer);\n+\t\telse\n+\t\t\tmark_expiring(htw, timer);\n+\t}\n+}\n+\n+static void\n+process_expiring(struct rte_htw *htw)\n+{\n+\tstruct rte_htimer *timer;\n+\n+\twhile ((timer = LIST_FIRST(&htw->expiring)) != NULL) {\n+\t\tbool is_periodical;\n+\t\tbool running_timer_canceled;\n+\n+\t\t/*\n+\t\t * The timer struct may cannot be safely accessed\n+\t\t * after the callback has been called (except for\n+\t\t * non-canceled periodical timers), since the callback\n+\t\t * may have free'd (or reused) the memory.\n+\t\t */\n+\n+\t\tLIST_REMOVE(timer, entry);\n+\n+\t\tis_periodical = timer->flags & RTE_HTIMER_FLAG_PERIODICAL;\n+\n+\t\ttimer->state = RTE_HTIMER_STATE_EXPIRED;\n+\n+\t\thtw->running_timer = timer;\n+\n+\t\ttimer->cb(timer, timer->cb_arg);\n+\n+\t\trunning_timer_canceled = htw->running_timer == NULL;\n+\n+\t\thtw->running_timer = NULL;\n+\n+\t\tif (is_periodical && !running_timer_canceled) {\n+\t\t\ttimer->expiration_time += timer->period;\n+\t\t\tmark_added(htw, timer);\n+\t\t}\n+\t}\n+}\n+\n+uint64_t\n+rte_htw_current_time(struct rte_htw *htw)\n+{\n+\treturn htw->current_time;\n+}\n+\n+uint64_t\n+rte_htw_next_timeout(struct rte_htw *htw, uint64_t upper_bound)\n+{\n+\tuint16_t level;\n+\n+\t/* scheduling timeouts will sort them in temporal order */\n+\tprocess_added(htw);\n+\n+\tif (!LIST_EMPTY(&htw->expiring))\n+\t\treturn 0;\n+\n+\tfor (level = 0; level < WHEEL_LEVELS; level++) {\n+\t\tuint64_t wheel_timeout;\n+\n+\t\twheel_timeout = wheel_next_timeout(&htw->wheels[level],\n+\t\t\t\t\t\t   level, upper_bound);\n+\t\tif (wheel_timeout != UINT64_MAX)\n+\t\t\treturn RTE_MIN(wheel_timeout, upper_bound);\n+\t}\n+\n+\treturn upper_bound;\n+}\n+\n+static __rte_always_inline void\n+process_slot(struct rte_htw *htw, uint16_t level, struct wheel *wheel,\n+\t     uint16_t slot)\n+{\n+\tstruct rte_htimer_list *slot_timers;\n+\tstruct rte_htimer *timer;\n+\n+\tslot_timers = &wheel->slots[slot];\n+\n+\trte_bitset_clear(wheel->used_slots, slot);\n+\n+\twhile ((timer = LIST_FIRST(slot_timers)) != NULL) {\n+\t\tLIST_REMOVE(timer, entry);\n+\n+\t\tif (level == 0 || timer->expiration_time <= htw->current_time)\n+\t\t\tmark_expiring(htw, timer);\n+\t\telse\n+\t\t\tschedule_timer(htw, timer);\n+\t}\n+}\n+\n+static __rte_always_inline void\n+process_slots(struct rte_htw *htw, uint16_t level, struct wheel *wheel,\n+\t      uint16_t start_slot, uint16_t num_slots)\n+{\n+\tssize_t slot;\n+\n+\tRTE_BITSET_FOREACH_SET_WRAP(slot, wheel->used_slots, WHEEL_SLOTS,\n+\t\t\t\t    (ssize_t)start_slot, num_slots)\n+\t\tprocess_slot(htw, level, wheel, slot);\n+}\n+\n+static void\n+advance(struct rte_htw *htw)\n+{\n+\tuint16_t level;\n+\n+\tfor (level = 0; level < WHEEL_LEVELS; level++) {\n+\t\tstruct wheel *wheel = &htw->wheels[level];\n+\t\tuint64_t new_wheel_time;\n+\t\tuint16_t start_slot;\n+\t\tuint16_t num_slots;\n+\n+\t\tnew_wheel_time = time_to_wheel_time(htw->current_time, level);\n+\n+\t\tif (new_wheel_time == wheel->wheel_time)\n+\t\t\tbreak;\n+\n+\t\tstart_slot = wheel_time_to_slot(wheel->wheel_time + 1);\n+\t\tnum_slots = RTE_MIN(new_wheel_time - wheel->wheel_time,\n+\t\t\t\t    WHEEL_SLOTS);\n+\n+\t\twheel->wheel_time = new_wheel_time;\n+\n+\t\tprocess_slots(htw, level, wheel, start_slot, num_slots);\n+\t}\n+}\n+\n+void\n+rte_htw_manage(struct rte_htw *htw, uint64_t new_time)\n+{\n+\tRTE_VERIFY(new_time >= htw->current_time);\n+\n+\t/*\n+\t * Scheduling added timers, core timer wheeling processing and\n+\t * expiry callback execution is kept as separate stages, to\n+\t * avoid having the core wheel traversal code to deal with a\n+\t * situation where a timeout callbacks re-adding the timer.\n+\t * This split also results in seemingly reasonable semantics\n+\t * in regards to the execution of the callbacks of\n+\t * already-expired timeouts (e.g., with time 0) being added in\n+\t * a timeout callback. Instead of creating an end-less loop,\n+\t * with rte_htw_manage() never returning, it defers the\n+\t * execution of the timer until the next rte_htw_manage()\n+\t * call.\n+\t */\n+\n+\tprocess_added(htw);\n+\n+\tif (new_time > htw->current_time) {\n+\t\thtw->current_time = new_time;\n+\t\tadvance(htw);\n+\t}\n+\n+\tprocess_expiring(htw);\n+}\n+\n+void\n+rte_htw_process(struct rte_htw *htw)\n+{\n+\tprocess_added(htw);\n+\tprocess_expiring(htw);\n+}\ndiff --git a/lib/htimer/rte_htw.h b/lib/htimer/rte_htw.h\nnew file mode 100644\nindex 0000000000..c93358bb13\n--- /dev/null\n+++ b/lib/htimer/rte_htw.h\n@@ -0,0 +1,49 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Ericsson AB\n+ */\n+\n+#ifndef _RTE_HTW_H_\n+#define _RTE_HTW_H_\n+\n+#include <stdint.h>\n+#include <sys/queue.h>\n+\n+#include <rte_htimer.h>\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+struct rte_htw;\n+\n+struct rte_htw *\n+rte_htw_create(void);\n+\n+void\n+rte_htw_destroy(struct rte_htw *htw);\n+\n+void\n+rte_htw_add(struct rte_htw *htw, struct rte_htimer *timer,\n+\t    uint64_t expiration_time, uint64_t period,\n+\t    rte_htimer_cb_t cb, void *cb_arg, uint32_t flags);\n+\n+void\n+rte_htw_cancel(struct rte_htw *htw, struct rte_htimer *timer);\n+\n+uint64_t\n+rte_htw_current_time(struct rte_htw *htw);\n+\n+uint64_t\n+rte_htw_next_timeout(struct rte_htw *htw, uint64_t upper_bound);\n+\n+void\n+rte_htw_manage(struct rte_htw *htw, uint64_t new_time);\n+\n+void\n+rte_htw_process(struct rte_htw *htw);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_HTW_H_ */\ndiff --git a/lib/htimer/version.map b/lib/htimer/version.map\nnew file mode 100644\nindex 0000000000..0e71dc7d57\n--- /dev/null\n+++ b/lib/htimer/version.map\n@@ -0,0 +1,17 @@\n+EXPERIMENTAL {\n+\tglobal:\n+\n+\trte_htimer_mgr_init;\n+\trte_htimer_mgr_deinit;\n+\trte_htimer_mgr_add;\n+\trte_htimer_mgr_cancel;\n+\trte_htimer_mgr_async_add;\n+\trte_htimer_mgr_async_cancel;\n+\trte_htimer_mgr_manage;\n+\trte_htimer_mgr_manage_time;\n+\trte_htimer_mgr_process;\n+\trte_htimer_mgr_current_time;\n+\trte_htimer_mgr_current_tick;\n+\n+        local: *;\n+};\ndiff --git a/lib/meson.build b/lib/meson.build\nindex 2bc0932ad5..c7c0e42ae8 100644\n--- a/lib/meson.build\n+++ b/lib/meson.build\n@@ -37,6 +37,7 @@ libraries = [\n         'gpudev',\n         'gro',\n         'gso',\n+        'htimer',\n         'ip_frag',\n         'jobstats',\n         'kni',\n",
    "prefixes": [
        "RFC",
        "v2",
        "2/2"
    ]
}