get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102105/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102105,
    "url": "http://patches.dpdk.org/api/patches/102105/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211018224353.3362537-2-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211018224353.3362537-2-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211018224353.3362537-2-dkozlyuk@nvidia.com",
    "date": "2021-10-18T22:43:50",
    "name": "[v9,1/4] mempool: add event callbacks",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "45e7c49d96d0e51dc8a3811f37983bcd8ae60c4d",
    "submitter": {
        "id": 2367,
        "url": "http://patches.dpdk.org/api/people/2367/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@oss.nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211018224353.3362537-2-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 19761,
            "url": "http://patches.dpdk.org/api/series/19761/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19761",
            "date": "2021-10-18T22:43:49",
            "name": "net/mlx5: implicit mempool registration",
            "version": 9,
            "mbox": "http://patches.dpdk.org/series/19761/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/102105/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/102105/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 66792A0C45;\n\tTue, 19 Oct 2021 00:44:22 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E55A4410E7;\n\tTue, 19 Oct 2021 00:44:17 +0200 (CEST)",
            "from AZHDRRW-EX01.nvidia.com (azhdrrw-ex01.nvidia.com\n [20.51.104.162]) by mails.dpdk.org (Postfix) with ESMTP id DF98740E5A\n for <dev@dpdk.org>; Tue, 19 Oct 2021 00:44:14 +0200 (CEST)",
            "from NAM04-MW2-obe.outbound.protection.outlook.com (104.47.73.168)\n by mxs.oss.nvidia.com (10.13.234.36) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.2.858.15; Mon, 18 Oct 2021 15:44:13 -0700",
            "from DM3PR08CA0015.namprd08.prod.outlook.com (2603:10b6:0:52::25) by\n DM4PR12MB5279.namprd12.prod.outlook.com (2603:10b6:5:39f::8) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.4608.17; Mon, 18 Oct 2021 22:44:11 +0000",
            "from DM6NAM11FT056.eop-nam11.prod.protection.outlook.com\n (2603:10b6:0:52:cafe::3d) by DM3PR08CA0015.outlook.office365.com\n (2603:10b6:0:52::25) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.14 via Frontend\n Transport; Mon, 18 Oct 2021 22:44:11 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT056.mail.protection.outlook.com (10.13.173.99) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4608.15 via Frontend Transport; Mon, 18 Oct 2021 22:44:11 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 18 Oct\n 2021 22:44:08 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=hyd7hvrLZqjHoY+tuRBU/o2Y9571NQIZhUIpFZkbymoAK20Ggm6sgio8ZxDFaH+X9BUXLTBP8D4BpqEQTrBQSDNwkk282jFTlZspak3W7OZKE5Q/j4fKIQj/0sQ8VxzkAxDpibgwzWVbZVG6ka+2niksN5MqrnbKNdLwPFmqESk5WPv0Ps0F9oDsS98tNt8jmqjR8do58pUQFHu7LxSw1hKF5mMaDnCllUK1raW8PzNhNsB80RN3mybrj7FYnpc7OzxKvuU3h9vA82p05GO5b/fMBLoAiAWwf1BNnJfT3q/UwmaZD/zk4l1wqoWs5m5Xm82YAqZPoRo8iaO7vDGhLg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=ndYyhVSMo89jAvSKq+nqPcX0XPpJ3FzBfp7GjKr+++E=;\n b=SJegvHsMgXDrPljHQpmSXjcvrrHU+Thx0OIdeLfTT76fZbF5kLZWczTg6miUtut/QQPpMrN9U1bVnvlYVbyNa7pOxd5jGMSoug63oK1DFqXlzdXUw+5Msi7oYRnf82W/JzXRKZ87jhubat07kIRoykbETiX7Zfba+hwDFSlFHM5i3Fw4wi2o1p+qrh6bT2G8b2CVB1uZQTMG0YJSJ+aBaEYqEdEEaFoyM2Bcp0+6fE5G9Dr0CwK/I0F/QkbPu7NOCCrcL6xX2nyCeabfRttXvebQkN5t5ANEuUwguIRXcqyN5G26QAdpKSIkE4VvxwT3XIcJVcT3z/kh4qz9MijAlQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=6wind.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=ndYyhVSMo89jAvSKq+nqPcX0XPpJ3FzBfp7GjKr+++E=;\n b=DJ+f9mSYmeXQynHHBfJTZ3vBPj7PLWWPfA+GfsWb04AkaFRu7YsQwDNjZWcgSMTCOCNR4knyxV67cpGgKG1ci4vCqYlVpUqMTbJ1H7F/tg/OH+0J9Vrp5ebcAcBcHs8Rlv4+cAZ9zCFAs+9QuMgDxZwULetUFL8MCzB8v2CBaLBo1FrrSOs/aFDXOjapKKEzXnfR0vPpWHaYjW+Wj+i33K2v08y7D0DYL4K1bNDiAGhZUh86gpGAFaDTQp/DPVNkQxjfmMhm/ce7jP1hvoAS8O2EglQ/jcjaeRPVAQt8APxZoS7f+7uNk2kcOHsDnraDzTZi7MBcIig5wnDWzSL05w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; 6wind.com; dkim=none (message not signed)\n header.d=none;6wind.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@oss.nvidia.com>, Andrew Rybchenko\n <andrew.rybchenko@oktetlabs.ru>, Olivier Matz <olivier.matz@6wind.com>, \"Ray\n Kinsella\" <mdr@ashroe.eu>",
        "Date": "Tue, 19 Oct 2021 01:43:50 +0300",
        "Message-ID": "<20211018224353.3362537-2-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211018224353.3362537-1-dkozlyuk@nvidia.com>",
        "References": "<20211018144059.3303406-1-dkozlyuk@nvidia.com>\n <20211018224353.3362537-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "5a30c837-ef87-4180-a6fb-08d99288cdce",
        "X-MS-TrafficTypeDiagnostic": "DM4PR12MB5279:",
        "X-Microsoft-Antispam-PRVS": "\n <DM4PR12MB52790FF615CF055A3D66C6B4B9BC9@DM4PR12MB5279.namprd12.prod.outlook.com>",
        "X-MS-Exchange-Transport-Forked": "True",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:2201;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 6YeUouyIOIq9/gK2exd4QBhTzbWGuz0canWjfmFNoG9UtYv1pdA9aexo/+I950r43QYxF4FoaX/nZrwBR/ek0W9voxHebu+3nf1N8jls7mIUBwzdON8c5FAge3+n3+i4riPwoCZGgns6LhjoTlbtzdtaTEZrVlVWk4MOCDeZCG4mvBz3RjFGmmp79J9un32rw5cKXEAsiR/HwEgAlhsaaFvaaN4WjkWQZGOtYay5KhDsKggHlGYgIyIripjqTHlOVyzYXIB/Vv+kvHscAcMWqEtOlW7KXXbZTA/tysdE8+7inds4dQe1wkFGKl/QydK19Jg2G1k+2YDCBL2f3uXNVGggYkvkStTxD0ycNTORlm6zRFzuM2VwPQzjiHfpS8Irk4ZqjKRda/UANv8jUT25Elc/Y6Cp/nCevZvy3KSw6tGysHsXKyDSglFgJSArRZVaTRm3ladoWImxR3CdqmXH5XYLEJjbVltO4A8HUgv1GP5qeW0ZtqpmA/B0mjQOOv2cAsVF1TyE8fuUUltNDmWFp1kh0uvZwhuQJMWbQfB/EYl39yl9jzlK2A+seUUv7G/EFgtqJzxpAXzxTQLiqwyQXeT6ecNdR/ep0qpYdKv+UTfLyAYt95nMiKS2M/Ds7evIdoJaEhY/ofWSRUZZucobgrOjdcTiwmwUmS/SMVkd4yazl/ua13Kpyalyuo80zkYeESpEo7g2fmOweKR4z2Y8Lg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(55016002)(26005)(83380400001)(2616005)(6916009)(54906003)(82310400003)(6666004)(8936002)(70206006)(1076003)(70586007)(36756003)(6286002)(47076005)(7636003)(36860700001)(8676002)(36906005)(86362001)(508600001)(16526019)(30864003)(2906002)(426003)(107886003)(5660300002)(4326008)(316002)(356005)(186003)(336012)(7696005);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Oct 2021 22:44:11.1448 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 5a30c837-ef87-4180-a6fb-08d99288cdce",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT056.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB5279",
        "Subject": "[dpdk-dev] [PATCH v9 1/4] mempool: add event callbacks",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Data path performance can benefit if the PMD knows which memory it will\nneed to handle in advance, before the first mbuf is sent to the PMD.\nIt is impractical, however, to consider all allocated memory for this\npurpose. Most often mbuf memory comes from mempools that can come and\ngo. PMD can enumerate existing mempools on device start, but it also\nneeds to track creation and destruction of mempools after the forwarding\nstarts but before an mbuf from the new mempool is sent to the device.\n\nAdd an API to register callback for mempool life cycle events:\n* rte_mempool_event_callback_register()\n* rte_mempool_event_callback_unregister()\nCurrently tracked events are:\n* RTE_MEMPOOL_EVENT_READY (after populating a mempool)\n* RTE_MEMPOOL_EVENT_DESTROY (before freeing a mempool)\nProvide a unit test for the new API.\nThe new API is internal, because it is primarily demanded by PMDs that\nmay need to deal with any mempools and do not control their creation,\nwhile an application, on the other hand, knows which mempools it creates\nand doesn't care about internal mempools PMDs might create.\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\nReviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>\n---\n app/test/test_mempool.c   | 248 ++++++++++++++++++++++++++++++++++++++\n lib/mempool/rte_mempool.c | 124 +++++++++++++++++++\n lib/mempool/rte_mempool.h |  62 ++++++++++\n lib/mempool/version.map   |   8 ++\n 4 files changed, 442 insertions(+)",
    "diff": "diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c\nindex 66bc8d86b7..5339a4cbd8 100644\n--- a/app/test/test_mempool.c\n+++ b/app/test/test_mempool.c\n@@ -14,6 +14,7 @@\n #include <rte_common.h>\n #include <rte_log.h>\n #include <rte_debug.h>\n+#include <rte_errno.h>\n #include <rte_memory.h>\n #include <rte_launch.h>\n #include <rte_cycles.h>\n@@ -489,6 +490,245 @@ test_mp_mem_init(struct rte_mempool *mp,\n \tdata->ret = 0;\n }\n \n+struct test_mempool_events_data {\n+\tstruct rte_mempool *mp;\n+\tenum rte_mempool_event event;\n+\tbool invoked;\n+};\n+\n+static void\n+test_mempool_events_cb(enum rte_mempool_event event,\n+\t\t       struct rte_mempool *mp, void *user_data)\n+{\n+\tstruct test_mempool_events_data *data = user_data;\n+\n+\tdata->mp = mp;\n+\tdata->event = event;\n+\tdata->invoked = true;\n+}\n+\n+static int\n+test_mempool_events(int (*populate)(struct rte_mempool *mp))\n+{\n+#pragma push_macro(\"RTE_TEST_TRACE_FAILURE\")\n+#undef RTE_TEST_TRACE_FAILURE\n+#define RTE_TEST_TRACE_FAILURE(...) do { goto fail; } while (0)\n+\n+\tstatic const size_t CB_NUM = 3;\n+\tstatic const size_t MP_NUM = 2;\n+\n+\tstruct test_mempool_events_data data[CB_NUM];\n+\tstruct rte_mempool *mp[MP_NUM], *freed;\n+\tchar name[RTE_MEMPOOL_NAMESIZE];\n+\tsize_t i, j;\n+\tint ret;\n+\n+\tmemset(mp, 0, sizeof(mp));\n+\tfor (i = 0; i < CB_NUM; i++) {\n+\t\tret = rte_mempool_event_callback_register\n+\t\t\t\t(test_mempool_events_cb, &data[i]);\n+\t\tRTE_TEST_ASSERT_EQUAL(ret, 0, \"Failed to register the callback %zu: %s\",\n+\t\t\t\t      i, rte_strerror(rte_errno));\n+\t}\n+\tret = rte_mempool_event_callback_unregister(test_mempool_events_cb, mp);\n+\tRTE_TEST_ASSERT_NOT_EQUAL(ret, 0, \"Unregistered a non-registered callback\");\n+\t/* NULL argument has no special meaning in this API. */\n+\tret = rte_mempool_event_callback_unregister(test_mempool_events_cb,\n+\t\t\t\t\t\t    NULL);\n+\tRTE_TEST_ASSERT_NOT_EQUAL(ret, 0, \"Unregistered a non-registered callback with NULL argument\");\n+\n+\t/* Create mempool 0 that will be observed by all callbacks. */\n+\tmemset(&data, 0, sizeof(data));\n+\tstrcpy(name, \"empty0\");\n+\tmp[0] = rte_mempool_create_empty(name, MEMPOOL_SIZE,\n+\t\t\t\t\t MEMPOOL_ELT_SIZE, 0, 0,\n+\t\t\t\t\t SOCKET_ID_ANY, 0);\n+\tRTE_TEST_ASSERT_NOT_NULL(mp[0], \"Cannot create mempool %s: %s\",\n+\t\t\t\t name, rte_strerror(rte_errno));\n+\tfor (j = 0; j < CB_NUM; j++)\n+\t\tRTE_TEST_ASSERT_EQUAL(data[j].invoked, false,\n+\t\t\t\t      \"Callback %zu invoked on %s mempool creation\",\n+\t\t\t\t      j, name);\n+\n+\trte_mempool_set_ops_byname(mp[0], rte_mbuf_best_mempool_ops(), NULL);\n+\tret = populate(mp[0]);\n+\tRTE_TEST_ASSERT_EQUAL(ret, (int)mp[0]->size, \"Failed to populate mempool %s: %s\",\n+\t\t\t      name, rte_strerror(-ret));\n+\tfor (j = 0; j < CB_NUM; j++) {\n+\t\tRTE_TEST_ASSERT_EQUAL(data[j].invoked, true,\n+\t\t\t\t\t\"Callback %zu not invoked on mempool %s population\",\n+\t\t\t\t\tj, name);\n+\t\tRTE_TEST_ASSERT_EQUAL(data[j].event,\n+\t\t\t\t\tRTE_MEMPOOL_EVENT_READY,\n+\t\t\t\t\t\"Wrong callback invoked, expected READY\");\n+\t\tRTE_TEST_ASSERT_EQUAL(data[j].mp, mp[0],\n+\t\t\t\t\t\"Callback %zu invoked for a wrong mempool instead of %s\",\n+\t\t\t\t\tj, name);\n+\t}\n+\n+\t/* Check that unregistered callback 0 observes no events. */\n+\tret = rte_mempool_event_callback_unregister(test_mempool_events_cb,\n+\t\t\t\t\t\t    &data[0]);\n+\tRTE_TEST_ASSERT_EQUAL(ret, 0, \"Failed to unregister callback 0: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\tmemset(&data, 0, sizeof(data));\n+\tstrcpy(name, \"empty1\");\n+\tmp[1] = rte_mempool_create_empty(name, MEMPOOL_SIZE,\n+\t\t\t\t\t MEMPOOL_ELT_SIZE, 0, 0,\n+\t\t\t\t\t SOCKET_ID_ANY, 0);\n+\tRTE_TEST_ASSERT_NOT_NULL(mp[1], \"Cannot create mempool %s: %s\",\n+\t\t\t\t name, rte_strerror(rte_errno));\n+\trte_mempool_set_ops_byname(mp[1], rte_mbuf_best_mempool_ops(), NULL);\n+\tret = populate(mp[1]);\n+\tRTE_TEST_ASSERT_EQUAL(ret, (int)mp[1]->size, \"Failed to populate mempool %s: %s\",\n+\t\t\t      name, rte_strerror(-ret));\n+\tRTE_TEST_ASSERT_EQUAL(data[0].invoked, false,\n+\t\t\t      \"Unregistered callback 0 invoked on %s mempool populaton\",\n+\t\t\t      name);\n+\n+\tfor (i = 0; i < MP_NUM; i++) {\n+\t\tmemset(&data, 0, sizeof(data));\n+\t\tsprintf(name, \"empty%zu\", i);\n+\t\trte_mempool_free(mp[i]);\n+\t\t/*\n+\t\t * Save pointer to check that it was passed to the callback,\n+\t\t * but put NULL into the array in case cleanup is called early.\n+\t\t */\n+\t\tfreed = mp[i];\n+\t\tmp[i] = NULL;\n+\t\tfor (j = 1; j < CB_NUM; j++) {\n+\t\t\tRTE_TEST_ASSERT_EQUAL(data[j].invoked, true,\n+\t\t\t\t\t      \"Callback %zu not invoked on mempool %s destruction\",\n+\t\t\t\t\t      j, name);\n+\t\t\tRTE_TEST_ASSERT_EQUAL(data[j].event,\n+\t\t\t\t\t      RTE_MEMPOOL_EVENT_DESTROY,\n+\t\t\t\t\t      \"Wrong callback invoked, expected DESTROY\");\n+\t\t\tRTE_TEST_ASSERT_EQUAL(data[j].mp, freed,\n+\t\t\t\t\t      \"Callback %zu invoked for a wrong mempool instead of %s\",\n+\t\t\t\t\t      j, name);\n+\t\t}\n+\t\tRTE_TEST_ASSERT_EQUAL(data[0].invoked, false,\n+\t\t\t\t      \"Unregistered callback 0 invoked on %s mempool destruction\",\n+\t\t\t\t      name);\n+\t}\n+\n+\tfor (j = 1; j < CB_NUM; j++) {\n+\t\tret = rte_mempool_event_callback_unregister\n+\t\t\t\t\t(test_mempool_events_cb, &data[j]);\n+\t\tRTE_TEST_ASSERT_EQUAL(ret, 0, \"Failed to unregister the callback %zu: %s\",\n+\t\t\t\t      j, rte_strerror(rte_errno));\n+\t}\n+\treturn TEST_SUCCESS;\n+\n+fail:\n+\tfor (j = 0; j < CB_NUM; j++)\n+\t\trte_mempool_event_callback_unregister\n+\t\t\t\t\t(test_mempool_events_cb, &data[j]);\n+\tfor (i = 0; i < MP_NUM; i++)\n+\t\trte_mempool_free(mp[i]);\n+\treturn TEST_FAILED;\n+\n+#pragma pop_macro(\"RTE_TEST_TRACE_FAILURE\")\n+}\n+\n+struct test_mempool_events_safety_data {\n+\tbool invoked;\n+\tint (*api_func)(rte_mempool_event_callback *func, void *user_data);\n+\trte_mempool_event_callback *cb_func;\n+\tvoid *cb_user_data;\n+\tint ret;\n+};\n+\n+static void\n+test_mempool_events_safety_cb(enum rte_mempool_event event,\n+\t\t\t      struct rte_mempool *mp, void *user_data)\n+{\n+\tstruct test_mempool_events_safety_data *data = user_data;\n+\n+\tRTE_SET_USED(event);\n+\tRTE_SET_USED(mp);\n+\tdata->invoked = true;\n+\tdata->ret = data->api_func(data->cb_func, data->cb_user_data);\n+}\n+\n+static int\n+test_mempool_events_safety(void)\n+{\n+#pragma push_macro(\"RTE_TEST_TRACE_FAILURE\")\n+#undef RTE_TEST_TRACE_FAILURE\n+#define RTE_TEST_TRACE_FAILURE(...) do { \\\n+\t\tret = TEST_FAILED; \\\n+\t\tgoto exit; \\\n+\t} while (0)\n+\n+\tstruct test_mempool_events_data data;\n+\tstruct test_mempool_events_safety_data sdata[2];\n+\tstruct rte_mempool *mp;\n+\tsize_t i;\n+\tint ret;\n+\n+\t/* removes itself */\n+\tsdata[0].api_func = rte_mempool_event_callback_unregister;\n+\tsdata[0].cb_func = test_mempool_events_safety_cb;\n+\tsdata[0].cb_user_data = &sdata[0];\n+\tsdata[0].ret = -1;\n+\trte_mempool_event_callback_register(test_mempool_events_safety_cb,\n+\t\t\t\t\t    &sdata[0]);\n+\t/* inserts a callback after itself */\n+\tsdata[1].api_func = rte_mempool_event_callback_register;\n+\tsdata[1].cb_func = test_mempool_events_cb;\n+\tsdata[1].cb_user_data = &data;\n+\tsdata[1].ret = -1;\n+\trte_mempool_event_callback_register(test_mempool_events_safety_cb,\n+\t\t\t\t\t    &sdata[1]);\n+\n+\tmp = rte_mempool_create_empty(\"empty\", MEMPOOL_SIZE,\n+\t\t\t\t      MEMPOOL_ELT_SIZE, 0, 0,\n+\t\t\t\t      SOCKET_ID_ANY, 0);\n+\tRTE_TEST_ASSERT_NOT_NULL(mp, \"Cannot create mempool: %s\",\n+\t\t\t\t rte_strerror(rte_errno));\n+\tmemset(&data, 0, sizeof(data));\n+\tret = rte_mempool_populate_default(mp);\n+\tRTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, \"Failed to populate mempool: %s\",\n+\t\t\t      rte_strerror(-ret));\n+\n+\tRTE_TEST_ASSERT_EQUAL(sdata[0].ret, 0, \"Callback failed to unregister itself: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\tRTE_TEST_ASSERT_EQUAL(sdata[1].ret, 0, \"Failed to insert a new callback: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\tRTE_TEST_ASSERT_EQUAL(data.invoked, false,\n+\t\t\t      \"Inserted callback is invoked on mempool population\");\n+\n+\tmemset(&data, 0, sizeof(data));\n+\tsdata[0].invoked = false;\n+\trte_mempool_free(mp);\n+\tmp = NULL;\n+\tRTE_TEST_ASSERT_EQUAL(sdata[0].invoked, false,\n+\t\t\t      \"Callback that unregistered itself was called\");\n+\tRTE_TEST_ASSERT_EQUAL(sdata[1].ret, -EEXIST,\n+\t\t\t      \"New callback inserted twice\");\n+\tRTE_TEST_ASSERT_EQUAL(data.invoked, true,\n+\t\t\t      \"Inserted callback is not invoked on mempool destruction\");\n+\n+\trte_mempool_event_callback_unregister(test_mempool_events_cb, &data);\n+\tfor (i = 0; i < RTE_DIM(sdata); i++)\n+\t\trte_mempool_event_callback_unregister\n+\t\t\t\t(test_mempool_events_safety_cb, &sdata[i]);\n+\tret = TEST_SUCCESS;\n+\n+exit:\n+\t/* cleanup, don't care which callbacks are already removed */\n+\trte_mempool_event_callback_unregister(test_mempool_events_cb, &data);\n+\tfor (i = 0; i < RTE_DIM(sdata); i++)\n+\t\trte_mempool_event_callback_unregister\n+\t\t\t\t(test_mempool_events_safety_cb, &sdata[i]);\n+\t/* in case of failure before the planned destruction */\n+\trte_mempool_free(mp);\n+\treturn ret;\n+\n+#pragma pop_macro(\"RTE_TEST_TRACE_FAILURE\")\n+}\n+\n static int\n test_mempool(void)\n {\n@@ -666,6 +906,14 @@ test_mempool(void)\n \tif (test_mempool_basic(default_pool, 1) < 0)\n \t\tGOTO_ERR(ret, err);\n \n+\t/* test mempool event callbacks */\n+\tif (test_mempool_events(rte_mempool_populate_default) < 0)\n+\t\tGOTO_ERR(ret, err);\n+\tif (test_mempool_events(rte_mempool_populate_anon) < 0)\n+\t\tGOTO_ERR(ret, err);\n+\tif (test_mempool_events_safety() < 0)\n+\t\tGOTO_ERR(ret, err);\n+\n \trte_mempool_list_dump(stdout);\n \n \tret = 0;\ndiff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c\nindex 607419ccaf..8810d08ab5 100644\n--- a/lib/mempool/rte_mempool.c\n+++ b/lib/mempool/rte_mempool.c\n@@ -42,6 +42,18 @@ static struct rte_tailq_elem rte_mempool_tailq = {\n };\n EAL_REGISTER_TAILQ(rte_mempool_tailq)\n \n+TAILQ_HEAD(mempool_callback_list, rte_tailq_entry);\n+\n+static struct rte_tailq_elem callback_tailq = {\n+\t.name = \"RTE_MEMPOOL_CALLBACK\",\n+};\n+EAL_REGISTER_TAILQ(callback_tailq)\n+\n+/* Invoke all registered mempool event callbacks. */\n+static void\n+mempool_event_callback_invoke(enum rte_mempool_event event,\n+\t\t\t      struct rte_mempool *mp);\n+\n #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5\n #define CALC_CACHE_FLUSHTHRESH(c)\t\\\n \t((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))\n@@ -360,6 +372,10 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,\n \tSTAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);\n \tmp->nb_mem_chunks++;\n \n+\t/* Report the mempool as ready only when fully populated. */\n+\tif (mp->populated_size >= mp->size)\n+\t\tmempool_event_callback_invoke(RTE_MEMPOOL_EVENT_READY, mp);\n+\n \trte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);\n \treturn i;\n \n@@ -722,6 +738,7 @@ rte_mempool_free(struct rte_mempool *mp)\n \t}\n \trte_mcfg_tailq_write_unlock();\n \n+\tmempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);\n \trte_mempool_trace_free(mp);\n \trte_mempool_free_memchunks(mp);\n \trte_mempool_ops_free(mp);\n@@ -1356,3 +1373,110 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),\n \n \trte_mcfg_mempool_read_unlock();\n }\n+\n+struct mempool_callback_data {\n+\trte_mempool_event_callback *func;\n+\tvoid *user_data;\n+};\n+\n+static void\n+mempool_event_callback_invoke(enum rte_mempool_event event,\n+\t\t\t      struct rte_mempool *mp)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te;\n+\tvoid *tmp_te;\n+\n+\trte_mcfg_tailq_read_lock();\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tRTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {\n+\t\tstruct mempool_callback_data *cb = te->data;\n+\t\trte_mcfg_tailq_read_unlock();\n+\t\tcb->func(event, mp, cb->user_data);\n+\t\trte_mcfg_tailq_read_lock();\n+\t}\n+\trte_mcfg_tailq_read_unlock();\n+}\n+\n+int\n+rte_mempool_event_callback_register(rte_mempool_event_callback *func,\n+\t\t\t\t    void *user_data)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te = NULL;\n+\tstruct mempool_callback_data *cb;\n+\tvoid *tmp_te;\n+\tint ret;\n+\n+\tif (func == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\trte_mcfg_tailq_write_lock();\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tRTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {\n+\t\tcb = te->data;\n+\t\tif (cb->func == func && cb->user_data == user_data) {\n+\t\t\tret = -EEXIST;\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\n+\tte = rte_zmalloc(\"mempool_cb_tail_entry\", sizeof(*te), 0);\n+\tif (te == NULL) {\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Cannot allocate event callback tailq entry!\\n\");\n+\t\tret = -ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\n+\tcb = rte_malloc(\"mempool_cb_data\", sizeof(*cb), 0);\n+\tif (cb == NULL) {\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Cannot allocate event callback!\\n\");\n+\t\trte_free(te);\n+\t\tret = -ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\n+\tcb->func = func;\n+\tcb->user_data = user_data;\n+\tte->data = cb;\n+\tTAILQ_INSERT_TAIL(list, te, next);\n+\tret = 0;\n+\n+exit:\n+\trte_mcfg_tailq_write_unlock();\n+\trte_errno = -ret;\n+\treturn ret;\n+}\n+\n+int\n+rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,\n+\t\t\t\t      void *user_data)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te = NULL;\n+\tstruct mempool_callback_data *cb;\n+\tint ret = -ENOENT;\n+\n+\trte_mcfg_tailq_write_lock();\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tTAILQ_FOREACH(te, list, next) {\n+\t\tcb = te->data;\n+\t\tif (cb->func == func && cb->user_data == user_data) {\n+\t\t\tTAILQ_REMOVE(list, te, next);\n+\t\t\tret = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trte_mcfg_tailq_write_unlock();\n+\n+\tif (ret == 0) {\n+\t\trte_free(te);\n+\t\trte_free(cb);\n+\t}\n+\trte_errno = -ret;\n+\treturn ret;\n+}\ndiff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h\nindex 88bcbc51ef..5799d4a705 100644\n--- a/lib/mempool/rte_mempool.h\n+++ b/lib/mempool/rte_mempool.h\n@@ -1769,6 +1769,68 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),\n int\n rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);\n \n+/**\n+ * Mempool event type.\n+ * @internal\n+ */\n+enum rte_mempool_event {\n+\t/** Occurs after a mempool is fully populated. */\n+\tRTE_MEMPOOL_EVENT_READY = 0,\n+\t/** Occurs before the destruction of a mempool begins. */\n+\tRTE_MEMPOOL_EVENT_DESTROY = 1,\n+};\n+\n+/**\n+ * @internal\n+ * Mempool event callback.\n+ *\n+ * rte_mempool_event_callback_register() may be called from within the callback,\n+ * but the callbacks registered this way will not be invoked for the same event.\n+ * rte_mempool_event_callback_unregister() may only be safely called\n+ * to remove the running callback.\n+ */\n+typedef void (rte_mempool_event_callback)(\n+\t\tenum rte_mempool_event event,\n+\t\tstruct rte_mempool *mp,\n+\t\tvoid *user_data);\n+\n+/**\n+ * @internal\n+ * Register a callback function invoked on mempool life cycle event.\n+ * The function will be invoked in the process\n+ * that performs an action which triggers the callback.\n+ *\n+ * @param func\n+ *   Callback function.\n+ * @param user_data\n+ *   User data.\n+ *\n+ * @return\n+ *   0 on success, negative on failure and rte_errno is set.\n+ */\n+__rte_internal\n+int\n+rte_mempool_event_callback_register(rte_mempool_event_callback *func,\n+\t\t\t\t    void *user_data);\n+\n+/**\n+ * @internal\n+ * Unregister a callback added with rte_mempool_event_callback_register().\n+ * @p func and @p user_data must exactly match registration parameters.\n+ *\n+ * @param func\n+ *   Callback function.\n+ * @param user_data\n+ *   User data.\n+ *\n+ * @return\n+ *   0 on success, negative on failure and rte_errno is set.\n+ */\n+__rte_internal\n+int\n+rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,\n+\t\t\t\t      void *user_data);\n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/mempool/version.map b/lib/mempool/version.map\nindex 9f77da6fff..1b7d7c5456 100644\n--- a/lib/mempool/version.map\n+++ b/lib/mempool/version.map\n@@ -64,3 +64,11 @@ EXPERIMENTAL {\n \t__rte_mempool_trace_ops_free;\n \t__rte_mempool_trace_set_ops_byname;\n };\n+\n+INTERNAL {\n+\tglobal:\n+\n+\t# added in 21.11\n+\trte_mempool_event_callback_register;\n+\trte_mempool_event_callback_unregister;\n+};\n",
    "prefixes": [
        "v9",
        "1/4"
    ]
}