get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95678/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95678,
    "url": "http://patches.dpdk.org/api/patches/95678/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210712014654.32428-21-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210712014654.32428-21-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210712014654.32428-21-suanmingm@nvidia.com",
    "date": "2021-07-12T01:46:48",
    "name": "[v5,20/26] net/mlx5: move modify header allocator to ipool",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "e918fb964a938339fa98406541a87a48d7e6be86",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210712014654.32428-21-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 17759,
            "url": "http://patches.dpdk.org/api/series/17759/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=17759",
            "date": "2021-07-12T01:46:29",
            "name": "net/mlx5: insertion rate optimization",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/17759/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/95678/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/95678/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9EEDDA0C4B;\n\tMon, 12 Jul 2021 03:50:00 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1EBFA411EF;\n\tMon, 12 Jul 2021 03:48:13 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2044.outbound.protection.outlook.com [40.107.220.44])\n by mails.dpdk.org (Postfix) with ESMTP id C5A38411DB\n for <dev@dpdk.org>; Mon, 12 Jul 2021 03:48:07 +0200 (CEST)",
            "from BN8PR04CA0061.namprd04.prod.outlook.com (2603:10b6:408:d4::35)\n by CY4PR12MB1557.namprd12.prod.outlook.com (2603:10b6:910:a::23) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.23; Mon, 12 Jul\n 2021 01:48:05 +0000",
            "from BN8NAM11FT024.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:d4:cafe::77) by BN8PR04CA0061.outlook.office365.com\n (2603:10b6:408:d4::35) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4308.20 via Frontend\n Transport; Mon, 12 Jul 2021 01:48:05 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT024.mail.protection.outlook.com (10.13.177.38) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4308.20 via Frontend Transport; Mon, 12 Jul 2021 01:48:05 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 12 Jul\n 2021 01:47:45 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=ZNFyVd9nCd+9AlOilcuBp78uomnRkivbjxv2L3O67sP2SyuprrMQRkTk7QlbjMNdYm3QqPUDX9ffe7YLkJBLAN68B/KpWaMBsxNdXwULYscQDL/A49G3bUrr+hQWRt4bYyu8a0nxcL+fvzprBQoux79SvEKe+GegFqqUKtLGbuEq9DTf6iU7qmDZikRgLBMoKo6oyFEvbGvzUEOdxJvEAXGNQKSVIpnNilYitV0WRFrGo/ZOUohULDY2fvyc1jAFDaTo0YCqesWsHm/sFlpyxzIwTKxKUa4nRIClaokRdapkdBd/8kipgveu4ErmIokVzwqkl58h9i3Msp8MI5qmFQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=BY1udhiAxRELb1PpAe0ChhosbmtHVV3G4lS2h1rGNAg=;\n b=iQPDSWmfttpi9x+8+VgbSjgf3JvW/140PexbScx1ylOynkk5GRq8RXF18OfjDKM9CqbRJ7NuKFjpZ9clb331TD7t9mGZn+09X0+dYnebApAMzU8z71WdoRpTYMFJER9bOFn2FZ3wHxBuVx3OMwYOPfWsDNnsKdiiSxjCAYHDJoTwH/B13esJWkojCrH6sRhUDDZ8xMRRGwbTi6h+HJNFGEO5VPR1hYyNHpjm1D8hOlzAP5YWoDetE7o1FbWmN3EqqgRvhU/I+Q3G9qzlAuXP/WjJt9dXTm7uaCmmI+usrmIaU+dx2Db1ausxJb0bFyWSoGpH9bTS837UfertJpGn8g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=BY1udhiAxRELb1PpAe0ChhosbmtHVV3G4lS2h1rGNAg=;\n b=rNxzA/YWztQabdafsdlbflRmM6mLsjFofTjN9Fjl8ZuAq07Shu+kYjr1p3ojIGb8l1zbvdm3mdZn6mY4XxAR31lqOQj9a2jWv7VYH0YcF5pel0xOt4djrjQXhe5C7HzcL7gPIo8v1oZP01rLNuiQ3Kjde75Cpr2TrgZiY8CRQwaO7K4EIQs+aHqa3LOdUqVwoVEyLV1bC4TTQ9syCUZ35Knv6nsOkC2SskQaGtZGQ2yVUKDWlECof/Wb9CSsiv8ZzqSMpkSonzPl2vPPPnzomd5fW1X8P1tVeHT92rGu5W6O2/zOAYHsr4DJlnOjXHFETIL4RnUEa4sj3GQEnH6GXA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Date": "Mon, 12 Jul 2021 04:46:48 +0300",
        "Message-ID": "<20210712014654.32428-21-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20210712014654.32428-1-suanmingm@nvidia.com>",
        "References": "<20210527093403.1153127-1-suanmingm@nvidia.com>\n <20210712014654.32428-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "15c92c67-b86b-44c0-0218-08d944d717db",
        "X-MS-TrafficTypeDiagnostic": "CY4PR12MB1557:",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR12MB155773C85F29179994DCA82EC1159@CY4PR12MB1557.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:5797;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n zfnRdao9s3X4uwwVs6gyUJ8Snc9bdwNCL2gNyGO4fUGsADbqFn1A27vINUQWDmLqcqEBAXr6jffM20xnASXpmiTAph5zjUDTZ2CtwN/wixXYJ8TjEm1+Qz7Manb8aBgHm2nX1Gr+x9yHdWKuGpRoc0VhN05nioQ7XPnXkILtVkiOq13xR1kw9Rlz+T2/8vLnj+8oMTTn0gJphvldJOUdczTqSsaCDwv4qDOcrYX/5tiHRr2gnYKj5oszCE4ie5t4PO+BJD8VKYuasaVijMf/4SvF39XnJNbpqSfTDMHwxlwz4DyeSKVh3q+Ktr8s+srQed97dtLyjPTJ8IbN0N/Bgx+aD9CqFKVP15D/KcIQXTtRESRWyhO/r+d8pQXWyujQWBuYAX0epYDeNoLM3KRp5xHxGtgf9Q82IbFL6+X3jZL/kgAYRk/YktW2g5d9EyvkfqfgDUpapeVxAgejNegfzVOYblCKpUN8yf0YfeTZibITL9p6nz6qy8mgVNmnz4Rs6O9MUxnSOlszlQIaisH3J+YfJD7V0gpQpLdxFEEq5J9Ip/2F8XTD8JUf4OUS4cEJ27EWH2Wse/xykqBCp22njK8AtIbcCQUIKxrJXrD0b9EkkwxlvZ3XGBj29y+agu2vZ5Zu2fg32wKKl3ydg20/mMS/jOCp9cIJWrqHhv0CDDNdr+vFCgpw19Jynn8zNgmGae3BzqJ/eUKS8G1McanGENaW6n2hbzZ9saO+K8AR8ws=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(346002)(39860400002)(376002)(136003)(396003)(46966006)(36840700001)(82310400003)(16526019)(5660300002)(7696005)(110136005)(1076003)(186003)(83380400001)(356005)(82740400003)(8936002)(316002)(2906002)(336012)(36756003)(70586007)(4326008)(55016002)(54906003)(86362001)(36906005)(70206006)(26005)(8676002)(47076005)(478600001)(426003)(6286002)(36860700001)(6636002)(2616005)(7636003)(34020700004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "12 Jul 2021 01:48:05.6743 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 15c92c67-b86b-44c0-0218-08d944d717db",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT024.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR12MB1557",
        "Subject": "[dpdk-dev] [PATCH v5 20/26] net/mlx5: move modify header allocator\n to ipool",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Matan Azrad <matan@nvidia.com>\n\nModify header actions are allocated by mlx5_malloc which has a big\noverhead of memory and allocation time.\n\nOne of the action types under the modify header object is SET_TAG,\n\nThe SET_TAG action is commonly not reused by the flows and each flow has\nits own value.\n\nHence, the mlx5_malloc becomes a bottleneck in flow insertion rate in\nthe common cases of SET_TAG.\n\nUse ipool allocator for SET_TAG action.\n\nIpool allocator has less overhead of memory and insertion rate and has\nbetter synchronization mechanism in multithread cases.\n\nDifferent ipool is created for each optional size of modify header\nhandler.\n\nSigned-off-by: Matan Azrad <matan@nvidia.com>\nAcked-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c         |  4 ++\n drivers/net/mlx5/mlx5.h         | 14 ++++++\n drivers/net/mlx5/mlx5_flow.h    | 14 +-----\n drivers/net/mlx5/mlx5_flow_dv.c | 79 ++++++++++++++++++++++++++++-----\n 4 files changed, 86 insertions(+), 25 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex fd675c9262..640d36c6be 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -802,6 +802,7 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,\n \t}\n }\n \n+\n /**\n  * Release the flow resources' indexed mempool.\n  *\n@@ -815,6 +816,9 @@ mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)\n \n \tfor (i = 0; i < MLX5_IPOOL_MAX; ++i)\n \t\tmlx5_ipool_destroy(sh->ipool[i]);\n+\tfor (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)\n+\t\tif (sh->mdh_ipools[i])\n+\t\t\tmlx5_ipool_destroy(sh->mdh_ipools[i]);\n }\n \n /*\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex e9b08094a6..e7e4749824 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -36,6 +36,19 @@\n \n #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)\n \n+/*\n+ * Number of modification commands.\n+ * The maximal actions amount in FW is some constant, and it is 16 in the\n+ * latest releases. In some old releases, it will be limited to 8.\n+ * Since there is no interface to query the capacity, the maximal value should\n+ * be used to allow PMD to create the flow. The validation will be done in the\n+ * lower driver layer or FW. A failure will be returned if exceeds the maximal\n+ * supported actions number on the root table.\n+ * On non-root tables, there is no limitation, but 32 is enough right now.\n+ */\n+#define MLX5_MAX_MODIFY_NUM\t\t\t32\n+#define MLX5_ROOT_TBL_MODIFY_NUM\t\t16\n+\n enum mlx5_ipool_index {\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \tMLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */\n@@ -1146,6 +1159,7 @@ struct mlx5_dev_ctx_shared {\n \tstruct mlx5_flow_counter_mng cmng; /* Counters management structure. */\n \tvoid *default_miss_action; /* Default miss action. */\n \tstruct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];\n+\tstruct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];\n \t/* Memory Pool for mlx5 flow resources. */\n \tstruct mlx5_l3t_tbl *cnt_id_tbl; /* Shared counter lookup table. */\n \t/* Shared interrupt handler section. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex e3a29297ba..7027012220 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -505,23 +505,11 @@ struct mlx5_flow_dv_tag_resource {\n \tuint32_t tag_id; /**< Tag ID. */\n };\n \n-/*\n- * Number of modification commands.\n- * The maximal actions amount in FW is some constant, and it is 16 in the\n- * latest releases. In some old releases, it will be limited to 8.\n- * Since there is no interface to query the capacity, the maximal value should\n- * be used to allow PMD to create the flow. The validation will be done in the\n- * lower driver layer or FW. A failure will be returned if exceeds the maximal\n- * supported actions number on the root table.\n- * On non-root tables, there is no limitation, but 32 is enough right now.\n- */\n-#define MLX5_MAX_MODIFY_NUM\t\t\t32\n-#define MLX5_ROOT_TBL_MODIFY_NUM\t\t16\n-\n /* Modify resource structure */\n struct mlx5_flow_dv_modify_hdr_resource {\n \tstruct mlx5_list_entry entry;\n \tvoid *action; /**< Modify header action object. */\n+\tuint32_t idx;\n \t/* Key area for hash list matching: */\n \tuint8_t ft_type; /**< Flow table type, Rx or Tx. */\n \tuint8_t actions_num; /**< Number of modification actions. */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 4b9fd22824..e39fe43854 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -5344,6 +5344,45 @@ flow_dv_modify_match_cb(void *tool_ctx __rte_unused,\n \t       memcmp(&ref->ft_type, &resource->ft_type, key_len);\n }\n \n+static struct mlx5_indexed_pool *\n+flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)\n+{\n+\tstruct mlx5_indexed_pool *ipool = __atomic_load_n\n+\t\t\t\t     (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);\n+\n+\tif (!ipool) {\n+\t\tstruct mlx5_indexed_pool *expected = NULL;\n+\t\tstruct mlx5_indexed_pool_config cfg =\n+\t\t    (struct mlx5_indexed_pool_config) {\n+\t\t       .size = sizeof(struct mlx5_flow_dv_modify_hdr_resource) +\n+\t\t\t\t\t\t\t\t   (index + 1) *\n+\t\t\t\t\t   sizeof(struct mlx5_modification_cmd),\n+\t\t       .trunk_size = 64,\n+\t\t       .grow_trunk = 3,\n+\t\t       .grow_shift = 2,\n+\t\t       .need_lock = 1,\n+\t\t       .release_mem_en = 1,\n+\t\t       .malloc = mlx5_malloc,\n+\t\t       .free = mlx5_free,\n+\t\t       .type = \"mlx5_modify_action_resource\",\n+\t\t};\n+\n+\t\tcfg.size = RTE_ALIGN(cfg.size, sizeof(ipool));\n+\t\tipool = mlx5_ipool_create(&cfg);\n+\t\tif (!ipool)\n+\t\t\treturn NULL;\n+\t\tif (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],\n+\t\t\t\t\t\t &expected, ipool, false,\n+\t\t\t\t\t\t __ATOMIC_SEQ_CST,\n+\t\t\t\t\t\t __ATOMIC_SEQ_CST)) {\n+\t\t\tmlx5_ipool_destroy(ipool);\n+\t\t\tipool = __atomic_load_n(&sh->mdh_ipools[index],\n+\t\t\t\t\t\t__ATOMIC_SEQ_CST);\n+\t\t}\n+\t}\n+\treturn ipool;\n+}\n+\n struct mlx5_list_entry *\n flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)\n {\n@@ -5352,12 +5391,20 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)\n \tstruct mlx5dv_dr_domain *ns;\n \tstruct mlx5_flow_dv_modify_hdr_resource *entry;\n \tstruct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;\n+\tstruct mlx5_indexed_pool *ipool = flow_dv_modify_ipool_get(sh,\n+\t\t\t\t\t\t\t  ref->actions_num - 1);\n \tint ret;\n \tuint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);\n \tuint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);\n+\tuint32_t idx;\n \n-\tentry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,\n-\t\t\t    SOCKET_ID_ANY);\n+\tif (unlikely(!ipool)) {\n+\t\trte_flow_error_set(ctx->error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL, \"cannot allocate modify ipool\");\n+\t\treturn NULL;\n+\t}\n+\tentry = mlx5_ipool_zmalloc(ipool, &idx);\n \tif (!entry) {\n \t\trte_flow_error_set(ctx->error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -5377,25 +5424,29 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)\n \t\t\t\t\t(sh->ctx, ns, entry,\n \t\t\t\t\t data_len, &entry->action);\n \tif (ret) {\n-\t\tmlx5_free(entry);\n+\t\tmlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);\n \t\trte_flow_error_set(ctx->error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t   NULL, \"cannot create modification action\");\n \t\treturn NULL;\n \t}\n+\tentry->idx = idx;\n \treturn &entry->entry;\n }\n \n struct mlx5_list_entry *\n-flow_dv_modify_clone_cb(void *tool_ctx __rte_unused,\n-\t\t\tstruct mlx5_list_entry *oentry, void *cb_ctx)\n+flow_dv_modify_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,\n+\t\t\tvoid *cb_ctx)\n {\n+\tstruct mlx5_dev_ctx_shared *sh = tool_ctx;\n \tstruct mlx5_flow_cb_ctx *ctx = cb_ctx;\n \tstruct mlx5_flow_dv_modify_hdr_resource *entry;\n \tstruct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;\n \tuint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);\n+\tuint32_t idx;\n \n-\tentry = mlx5_malloc(0, sizeof(*entry) + data_len, 0, SOCKET_ID_ANY);\n+\tentry = mlx5_ipool_malloc(sh->mdh_ipools[ref->actions_num - 1],\n+\t\t\t\t  &idx);\n \tif (!entry) {\n \t\trte_flow_error_set(ctx->error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -5403,14 +5454,18 @@ flow_dv_modify_clone_cb(void *tool_ctx __rte_unused,\n \t\treturn NULL;\n \t}\n \tmemcpy(entry, oentry, sizeof(*entry) + data_len);\n+\tentry->idx = idx;\n \treturn &entry->entry;\n }\n \n void\n-flow_dv_modify_clone_free_cb(void *tool_ctx __rte_unused,\n-\t\t\t     struct mlx5_list_entry *entry)\n+flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n {\n-\tmlx5_free(entry);\n+\tstruct mlx5_dev_ctx_shared *sh = tool_ctx;\n+\tstruct mlx5_flow_dv_modify_hdr_resource *res =\n+\t\tcontainer_of(entry, typeof(*res), entry);\n+\n+\tmlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);\n }\n \n /**\n@@ -13756,14 +13811,14 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,\n }\n \n void\n-flow_dv_modify_remove_cb(void *tool_ctx __rte_unused,\n-\t\t\t struct mlx5_list_entry *entry)\n+flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n {\n \tstruct mlx5_flow_dv_modify_hdr_resource *res =\n \t\tcontainer_of(entry, typeof(*res), entry);\n+\tstruct mlx5_dev_ctx_shared *sh = tool_ctx;\n \n \tclaim_zero(mlx5_flow_os_destroy_flow_action(res->action));\n-\tmlx5_free(entry);\n+\tmlx5_ipool_free(sh->mdh_ipools[res->actions_num - 1], res->idx);\n }\n \n /**\n",
    "prefixes": [
        "v5",
        "20/26"
    ]
}