get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/100002/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 100002,
    "url": "https://patches.dpdk.org/api/patches/100002/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210929145249.2176811-2-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210929145249.2176811-2-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210929145249.2176811-2-dkozlyuk@nvidia.com",
    "date": "2021-09-29T14:52:46",
    "name": "[v2,1/4] mempool: add event callbacks",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b9d2238b29c47f075f396cdd5c19689d64d4cf8c",
    "submitter": {
        "id": 2367,
        "url": "https://patches.dpdk.org/api/people/2367/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@oss.nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210929145249.2176811-2-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 19267,
            "url": "https://patches.dpdk.org/api/series/19267/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=19267",
            "date": "2021-09-29T14:52:46",
            "name": "[v2,1/4] mempool: add event callbacks",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/19267/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/100002/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/100002/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6873AA0547;\n\tWed, 29 Sep 2021 16:53:16 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E4A81410F0;\n\tWed, 29 Sep 2021 16:53:12 +0200 (CEST)",
            "from AZHDRRW-EX02.NVIDIA.COM (azhdrrw-ex02.nvidia.com\n [20.64.145.131]) by mails.dpdk.org (Postfix) with ESMTP id 0CFD7410EF\n for <dev@dpdk.org>; Wed, 29 Sep 2021 16:53:11 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com (104.47.57.171)\n by mxs.oss.nvidia.com (10.13.234.37) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.2.858.15; Wed, 29 Sep 2021 07:53:09 -0700",
            "from DM5PR21CA0011.namprd21.prod.outlook.com (2603:10b6:3:ac::21) by\n CY4PR1201MB0005.namprd12.prod.outlook.com (2603:10b6:903:d1::23) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.13; Wed, 29 Sep\n 2021 14:53:08 +0000",
            "from DM6NAM11FT065.eop-nam11.prod.protection.outlook.com\n (2603:10b6:3:ac:cafe::7) by DM5PR21CA0011.outlook.office365.com\n (2603:10b6:3:ac::21) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4587.1 via Frontend\n Transport; Wed, 29 Sep 2021 14:53:08 +0000",
            "from mail.nvidia.com (216.228.112.32) by\n DM6NAM11FT065.mail.protection.outlook.com (10.13.172.109) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4566.14 via Frontend Transport; Wed, 29 Sep 2021 14:53:07 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL109.nvidia.com\n (172.20.187.15) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 29 Sep\n 2021 07:53:06 -0700",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 29 Sep\n 2021 14:53:04 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=JyVsoXd5mDM7FTjyNY3OQPu2K1X7xTTrHSJhFYBQuHl6Vs+ybnZzv5wiZkSck6MX5GQkymC6yVXINjKlzgX9d1D9zRpkd1jhHzfCN+aS8eiu9ETNbp2GPxpmVsQ7o3GVn/i12SkNvpXjfeUlO93svcDfwG4/McLJvJeHiDskxvKnk1IyvZ3eUw7II9t7WiNzct6EGZUK/B/2FRMLXnUaMTNJ7QJki7qldFRAmvGQcmH+TwdUkrTEhkAyJUGA00aoIyhIsIJV5eBonTjdPDQ26bqI8+o43mrqDOCJjF5sVDyved6Q+OcNAUDF9+XpHtOBCAE71OBCUGhVvD0dcZC+qw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=Yqdi7iBuhvqCl4Rr50p60IujMrdrZZ2Pg3HtVXrPlsI=;\n b=YtlKOGY2uaWl4XmnSXIj8YEMMbEBxohpgxyFKKT7/o8imzsPpj07qNQRve+cCXd7XgnLn9aUPpcGe3f3JWfYCTSv0VZsE5l5qwLZIlcSfmDYkUcCzJsLHNHkTM6X7juL42Rb0hDxmxDJo0DQrDY3Pc/c1BzdHeQkDXoRHvKJ4HOvx6YEy05O0DMyVcBfu34LpOdGoUvSvn+IERLi93uYFa7kKEqQUKfglEH6xnZfg+p32cVi9nlrGwpI+p4I0l6gDFmZSMV/DbLDT3rZ2sI6n3kLpEwkID8/+fbVI4vl43ccbK0TEpqfYV9R76ey/Wg8zGytbfDizRVjr+NosX6VPw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.32) smtp.rcpttodomain=6wind.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=Yqdi7iBuhvqCl4Rr50p60IujMrdrZZ2Pg3HtVXrPlsI=;\n b=k7hzpYy363dyl+NNRfqaIJbzMKW1ecg7qApZ/kFmwMiiDmgxXLX057Gfsw5c1V7dnOrOOvlwgwfbB02uYO1C3MDMzg3d/MLJcoSHDcvRZVtJTB88PXyH960+n75GOhijcKDSueExQbsvLTH+LBFIEDJWF2ueeI/le6lXYlnwiuF6gwRZSLRJV+9qOTfiRH4GRaWowOTJA7Zjh9RkI0UHszC7OseSR2O/yJLysdxTOV50Ezl1txZZEXJr7dSCxT6R+iA4j/PSI2DAKBt6bnat53RF1Bw5GGp+5DM3CCCS/IytYnuxIEF4R7/hoOJO60SVfUQKfPF51eWXNR6X2uqh7g==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.32)\n smtp.mailfrom=nvidia.com; 6wind.com; dkim=none (message not signed)\n header.d=none;6wind.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.32 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.32; helo=mail.nvidia.com;",
        "From": "<dkozlyuk@oss.nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>, Matan Azrad\n <matan@oss.nvidia.com>, Olivier Matz <olivier.matz@6wind.com>, Andrew\n Rybchenko <andrew.rybchenko@oktetlabs.ru>, Ray Kinsella <mdr@ashroe.eu>,\n \"Anatoly Burakov\" <anatoly.burakov@intel.com>",
        "Date": "Wed, 29 Sep 2021 17:52:46 +0300",
        "Message-ID": "<20210929145249.2176811-2-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210929145249.2176811-1-dkozlyuk@nvidia.com>",
        "References": "<20210818090755.2419483-1-dkozlyuk@nvidia.com>\n <20210929145249.2176811-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fc4f14b9-3767-4873-b68c-08d98358d94f",
        "X-MS-TrafficTypeDiagnostic": "CY4PR1201MB0005:",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR1201MB00050F65C5AD6F512E74F357B9A99@CY4PR1201MB0005.namprd12.prod.outlook.com>",
        "X-MS-Exchange-Transport-Forked": "True",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:1332;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n M9wEYG+7OhFLI1uF2VpndUDSbwk/ij9btVCmy/Q7aJvTEQzmrZ6r9iYcHQT3ZO+AVnymj7mR8MNEHkQRQhWBYVHWd8VXkG8bgo1GTkjgWo/MTuefbfdWsLM6VzBVvtMQ7Yx+BqnwIb6Z9QCg98iJAKfioJjEoijfyfZYZwOhmCEQZkN+7Usg1Gu+hPgeWEOxXELadQdOZZstPmHcKovapxgF72MND66L+jCGmicP8JfciSHp77NBQ62/GMyuho2IbeZxQdLnViSmr9cNySfhXAOboBN0CchaO9i1UpPktvDWzdR3eSzmSBiYq8aQSA2oDQxu+IZ3y4tsQK5SQDl3U7xwN5AjP9vQNp13YHAUP+cJiBA4HJgtM32yzOssJgELKYPfNWAeLGPZLR3Og/ETn5mOd+suv8hnpavvimJJ9irYpiaHAyyuuILEOqDIZu56epFErO+rsuW3NBAXtZ48+RNAa4+nL4ozoDSvqnwm8m/txfm+uuFOjQf6Ynd8YsV2LHFiyO+9j+5x2p3QVYQ1J7FyTfuGHupvQFuv0DhvaIfkpUCrsem+CsCePGSZXNFGg2g8+zkGBrLWTQ9oHmkirGHDT9ZxE+H0Z0qN8MGYC5v3pMzwZDWzc7mOkw+aZNVpYfHKS6kX+HyVKpOAIQ7CkFr1j8pK3BVHBQaPf1v38hDcELH8yTVZgbt2PeT5mxJLdLOET2NBEBYandEUNqLsCQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.32; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid01.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(30864003)(6916009)(5660300002)(316002)(4326008)(2906002)(70586007)(55016002)(86362001)(7696005)(2876002)(82310400003)(54906003)(70206006)(426003)(83380400001)(2616005)(1076003)(336012)(47076005)(186003)(8676002)(356005)(8936002)(26005)(36756003)(16526019)(6666004)(107886003)(6286002)(508600001)(36860700001)(7636003);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "29 Sep 2021 14:53:07.4748 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fc4f14b9-3767-4873-b68c-08d98358d94f",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.32];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT065.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR1201MB0005",
        "Subject": "[dpdk-dev] [PATCH v2 1/4] mempool: add event callbacks",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>\n\nPerformance of MLX5 PMD of different classes can benefit if PMD knows\nwhich memory it will need to handle in advance, before the first mbuf\nis sent to the PMD. It is impractical, however, to consider\nall allocated memory for this purpose. Most often mbuf memory comes\nfrom mempools that can come and go. PMD can enumerate existing mempools\non device start, but it also needs to track creation and destruction\nof mempools after the forwarding starts but before an mbuf from the new\nmempool is sent to the device.\n\nAdd an internal API to register callback for mempool lify cycle events,\ncurrently RTE_MEMPOOL_EVENT_READY (after populating)\nand RTE_MEMPOOL_EVENT_DESTROY (before freeing):\n* rte_mempool_event_callback_register()\n* rte_mempool_event_callback_unregister()\nProvide a unit test for the new API.\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n app/test/test_mempool.c   |  75 ++++++++++++++++++++\n lib/mempool/rte_mempool.c | 143 +++++++++++++++++++++++++++++++++++++-\n lib/mempool/rte_mempool.h |  56 +++++++++++++++\n lib/mempool/version.map   |   8 +++\n 4 files changed, 279 insertions(+), 3 deletions(-)",
    "diff": "diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c\nindex 7675a3e605..0c4ed7c60b 100644\n--- a/app/test/test_mempool.c\n+++ b/app/test/test_mempool.c\n@@ -14,6 +14,7 @@\n #include <rte_common.h>\n #include <rte_log.h>\n #include <rte_debug.h>\n+#include <rte_errno.h>\n #include <rte_memory.h>\n #include <rte_launch.h>\n #include <rte_cycles.h>\n@@ -471,6 +472,74 @@ test_mp_mem_init(struct rte_mempool *mp,\n \tdata->ret = 0;\n }\n \n+struct test_mempool_events_data {\n+\tstruct rte_mempool *mp;\n+\tenum rte_mempool_event event;\n+\tbool invoked;\n+};\n+\n+static void\n+test_mempool_events_cb(enum rte_mempool_event event,\n+\t\t       struct rte_mempool *mp, void *arg)\n+{\n+\tstruct test_mempool_events_data *data = arg;\n+\n+\tdata->mp = mp;\n+\tdata->event = event;\n+\tdata->invoked = true;\n+}\n+\n+static int\n+test_mempool_events(int (*populate)(struct rte_mempool *mp))\n+{\n+\tstruct test_mempool_events_data data;\n+\tstruct rte_mempool *mp;\n+\tint ret;\n+\n+\tret = rte_mempool_event_callback_register(NULL, &data);\n+\tRTE_TEST_ASSERT_NOT_EQUAL(ret, 0, \"Registered a NULL callback\");\n+\n+\tmemset(&data, 0, sizeof(data));\n+\tret = rte_mempool_event_callback_register(test_mempool_events_cb,\n+\t\t\t\t\t\t  &data);\n+\tRTE_TEST_ASSERT_EQUAL(ret, 0, \"Failed to register the callback: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\n+\tmp = rte_mempool_create_empty(\"empty\", MEMPOOL_SIZE,\n+\t\t\t\t      MEMPOOL_ELT_SIZE, 0, 0,\n+\t\t\t\t      SOCKET_ID_ANY, 0);\n+\tRTE_TEST_ASSERT_NOT_NULL(mp, \"Cannot create an empty mempool: %s\",\n+\t\t\t\t rte_strerror(rte_errno));\n+\tRTE_TEST_ASSERT_EQUAL(data.invoked, false,\n+\t\t\t      \"Callback invoked on an empty mempool creation\");\n+\n+\trte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL);\n+\tret = populate(mp);\n+\tRTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, \"Failed to populate the mempool: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\tRTE_TEST_ASSERT_EQUAL(data.invoked, true,\n+\t\t\t      \"Callback not invoked on an empty mempool population\");\n+\tRTE_TEST_ASSERT_EQUAL(data.event, RTE_MEMPOOL_EVENT_READY,\n+\t\t\t      \"Wrong callback invoked, expected READY\");\n+\tRTE_TEST_ASSERT_EQUAL(data.mp, mp,\n+\t\t\t      \"Callback invoked for a wrong mempool\");\n+\n+\tmemset(&data, 0, sizeof(data));\n+\trte_mempool_free(mp);\n+\tRTE_TEST_ASSERT_EQUAL(data.invoked, true,\n+\t\t\t      \"Callback not invoked on mempool destruction\");\n+\tRTE_TEST_ASSERT_EQUAL(data.event, RTE_MEMPOOL_EVENT_DESTROY,\n+\t\t\t      \"Wrong callback invoked, expected DESTROY\");\n+\tRTE_TEST_ASSERT_EQUAL(data.mp, mp,\n+\t\t\t      \"Callback invoked for a wrong mempool\");\n+\n+\tret = rte_mempool_event_callback_unregister(test_mempool_events_cb,\n+\t\t\t\t\t\t    &data);\n+\tRTE_TEST_ASSERT_EQUAL(ret, 0, \"Failed to unregister the callback: %s\",\n+\t\t\t      rte_strerror(rte_errno));\n+\treturn 0;\n+}\n+\n static int\n test_mempool(void)\n {\n@@ -645,6 +714,12 @@ test_mempool(void)\n \tif (test_mempool_basic(default_pool, 1) < 0)\n \t\tGOTO_ERR(ret, err);\n \n+\t/* test mempool event callbacks */\n+\tif (test_mempool_events(rte_mempool_populate_default) < 0)\n+\t\tGOTO_ERR(ret, err);\n+\tif (test_mempool_events(rte_mempool_populate_anon) < 0)\n+\t\tGOTO_ERR(ret, err);\n+\n \trte_mempool_list_dump(stdout);\n \n \tret = 0;\ndiff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c\nindex 59a588425b..c6cb99ba48 100644\n--- a/lib/mempool/rte_mempool.c\n+++ b/lib/mempool/rte_mempool.c\n@@ -42,6 +42,18 @@ static struct rte_tailq_elem rte_mempool_tailq = {\n };\n EAL_REGISTER_TAILQ(rte_mempool_tailq)\n \n+TAILQ_HEAD(mempool_callback_list, rte_tailq_entry);\n+\n+static struct rte_tailq_elem callback_tailq = {\n+\t.name = \"RTE_MEMPOOL_CALLBACK\",\n+};\n+EAL_REGISTER_TAILQ(callback_tailq)\n+\n+/* Invoke all registered mempool event callbacks. */\n+static void\n+mempool_event_callback_invoke(enum rte_mempool_event event,\n+\t\t\t      struct rte_mempool *mp);\n+\n #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5\n #define CALC_CACHE_FLUSHTHRESH(c)\t\\\n \t((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))\n@@ -360,6 +372,10 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,\n \tSTAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);\n \tmp->nb_mem_chunks++;\n \n+\t/* Report the mempool as ready only when fully populated. */\n+\tif (mp->populated_size >= mp->size)\n+\t\tmempool_event_callback_invoke(RTE_MEMPOOL_EVENT_READY, mp);\n+\n \trte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);\n \treturn i;\n \n@@ -722,6 +738,7 @@ rte_mempool_free(struct rte_mempool *mp)\n \t}\n \trte_mcfg_tailq_write_unlock();\n \n+\tmempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);\n \trte_mempool_trace_free(mp);\n \trte_mempool_free_memchunks(mp);\n \trte_mempool_ops_free(mp);\n@@ -779,9 +796,9 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache)\n \n /* create an empty mempool */\n struct rte_mempool *\n-rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,\n-\tunsigned cache_size, unsigned private_data_size,\n-\tint socket_id, unsigned flags)\n+rte_mempool_create_empty(const char *name, unsigned int n,\n+\tunsigned int elt_size, unsigned int cache_size,\n+\tunsigned int private_data_size, int socket_id, unsigned int flags)\n {\n \tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \tstruct rte_mempool_list *mempool_list;\n@@ -1343,3 +1360,123 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),\n \n \trte_mcfg_mempool_read_unlock();\n }\n+\n+struct mempool_callback {\n+\trte_mempool_event_callback *func;\n+\tvoid *arg;\n+};\n+\n+static void\n+mempool_event_callback_invoke(enum rte_mempool_event event,\n+\t\t\t      struct rte_mempool *mp)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te;\n+\tvoid *tmp_te;\n+\n+\trte_mcfg_tailq_read_lock();\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tTAILQ_FOREACH_SAFE(te, list, next, tmp_te) {\n+\t\tstruct mempool_callback *cb = te->data;\n+\t\trte_mcfg_tailq_read_unlock();\n+\t\tcb->func(event, mp, cb->arg);\n+\t\trte_mcfg_tailq_read_lock();\n+\t}\n+\trte_mcfg_tailq_read_unlock();\n+}\n+\n+int\n+rte_mempool_event_callback_register(rte_mempool_event_callback *func,\n+\t\t\t\t    void *arg)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te = NULL;\n+\tstruct mempool_callback *cb;\n+\tvoid *tmp_te;\n+\tint ret;\n+\n+\tif (func == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\trte_mcfg_mempool_read_lock();\n+\trte_mcfg_tailq_write_lock();\n+\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tTAILQ_FOREACH_SAFE(te, list, next, tmp_te) {\n+\t\tstruct mempool_callback *cb =\n+\t\t\t\t\t(struct mempool_callback *)te->data;\n+\t\tif (cb->func == func && cb->arg == arg) {\n+\t\t\tret = -EEXIST;\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\n+\tte = rte_zmalloc(\"MEMPOOL_TAILQ_ENTRY\", sizeof(*te), 0);\n+\tif (te == NULL) {\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Cannot allocate event callback tailq entry!\\n\");\n+\t\tret = -ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\n+\tcb = rte_malloc(\"MEMPOOL_EVENT_CALLBACK\", sizeof(*cb), 0);\n+\tif (cb == NULL) {\n+\t\tRTE_LOG(ERR, MEMPOOL,\n+\t\t\t\"Cannot allocate event callback!\\n\");\n+\t\trte_free(te);\n+\t\tret = -ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\n+\tcb->func = func;\n+\tcb->arg = arg;\n+\tte->data = cb;\n+\tTAILQ_INSERT_TAIL(list, te, next);\n+\tret = 0;\n+\n+exit:\n+\trte_mcfg_tailq_write_unlock();\n+\trte_mcfg_mempool_read_unlock();\n+\trte_errno = -ret;\n+\treturn ret;\n+}\n+\n+int\n+rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,\n+\t\t\t\t      void *arg)\n+{\n+\tstruct mempool_callback_list *list;\n+\tstruct rte_tailq_entry *te = NULL;\n+\tstruct mempool_callback *cb;\n+\tint ret;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n+\t\trte_errno = EPERM;\n+\t\treturn -1;\n+\t}\n+\n+\trte_mcfg_mempool_read_lock();\n+\trte_mcfg_tailq_write_lock();\n+\tret = -ENOENT;\n+\tlist = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);\n+\tTAILQ_FOREACH(te, list, next) {\n+\t\tcb = (struct mempool_callback *)te->data;\n+\t\tif (cb->func == func && cb->arg == arg)\n+\t\t\tbreak;\n+\t}\n+\tif (te != NULL) {\n+\t\tTAILQ_REMOVE(list, te, next);\n+\t\tret = 0;\n+\t}\n+\trte_mcfg_tailq_write_unlock();\n+\trte_mcfg_mempool_read_unlock();\n+\n+\tif (ret == 0) {\n+\t\trte_free(te);\n+\t\trte_free(cb);\n+\t}\n+\trte_errno = -ret;\n+\treturn ret;\n+}\ndiff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h\nindex 4235d6f0bf..c81e488851 100644\n--- a/lib/mempool/rte_mempool.h\n+++ b/lib/mempool/rte_mempool.h\n@@ -1775,6 +1775,62 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),\n int\n rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);\n \n+/**\n+ * Mempool event type.\n+ * @internal\n+ */\n+enum rte_mempool_event {\n+\t/** Occurs after a mempool is successfully populated. */\n+\tRTE_MEMPOOL_EVENT_READY = 0,\n+\t/** Occurs before destruction of a mempool begins. */\n+\tRTE_MEMPOOL_EVENT_DESTROY = 1,\n+};\n+\n+/**\n+ * @internal\n+ * Mempool event callback.\n+ */\n+typedef void (rte_mempool_event_callback)(\n+\t\tenum rte_mempool_event event,\n+\t\tstruct rte_mempool *mp,\n+\t\tvoid *arg);\n+\n+/**\n+ * @internal\n+ * Register a callback invoked on mempool life cycle event.\n+ * Callbacks will be invoked in the process that creates the mempool.\n+ *\n+ * @param cb\n+ *   Callback function.\n+ * @param cb_arg\n+ *   User data.\n+ *\n+ * @return\n+ *   0 on success, negative on failure and rte_errno is set.\n+ */\n+__rte_internal\n+int\n+rte_mempool_event_callback_register(rte_mempool_event_callback *cb,\n+\t\t\t\t    void *cb_arg);\n+\n+/**\n+ * @internal\n+ * Unregister a callback added with rte_mempool_event_callback_register().\n+ * @p cb and @p arg must exactly match registration parameters.\n+ *\n+ * @param cb\n+ *   Callback function.\n+ * @param cb_arg\n+ *   User data.\n+ *\n+ * @return\n+ *   0 on success, negative on failure and rte_errno is set.\n+ */\n+__rte_internal\n+int\n+rte_mempool_event_callback_unregister(rte_mempool_event_callback *cb,\n+\t\t\t\t      void *cb_arg);\n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/mempool/version.map b/lib/mempool/version.map\nindex 9f77da6fff..1b7d7c5456 100644\n--- a/lib/mempool/version.map\n+++ b/lib/mempool/version.map\n@@ -64,3 +64,11 @@ EXPERIMENTAL {\n \t__rte_mempool_trace_ops_free;\n \t__rte_mempool_trace_set_ops_byname;\n };\n+\n+INTERNAL {\n+\tglobal:\n+\n+\t# added in 21.11\n+\trte_mempool_event_callback_register;\n+\trte_mempool_event_callback_unregister;\n+};\n",
    "prefixes": [
        "v2",
        "1/4"
    ]
}