get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118210/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118210,
    "url": "http://patches.dpdk.org/api/patches/118210/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221014114833.13389-10-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221014114833.13389-10-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221014114833.13389-10-valex@nvidia.com",
    "date": "2022-10-14T11:48:24",
    "name": "[v3,09/18] net/mlx5/hws: Add HWS pool and buddy",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "92f82b3df3fde92d8051495647631ad89fee1400",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221014114833.13389-10-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25236,
            "url": "http://patches.dpdk.org/api/series/25236/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25236",
            "date": "2022-10-14T11:48:15",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/25236/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118210/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/118210/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A8C79A00C2;\n\tFri, 14 Oct 2022 13:50:23 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D305842DD7;\n\tFri, 14 Oct 2022 13:49:40 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2070.outbound.protection.outlook.com [40.107.220.70])\n by mails.dpdk.org (Postfix) with ESMTP id 2684542DDF\n for <dev@dpdk.org>; Fri, 14 Oct 2022 13:49:39 +0200 (CEST)",
            "from MW4PR04CA0386.namprd04.prod.outlook.com (2603:10b6:303:81::31)\n by DM4PR12MB6376.namprd12.prod.outlook.com (2603:10b6:8:a0::20) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.29; Fri, 14 Oct\n 2022 11:49:35 +0000",
            "from CO1NAM11FT023.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:81:cafe::16) by MW4PR04CA0386.outlook.office365.com\n (2603:10b6:303:81::31) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.30 via Frontend\n Transport; Fri, 14 Oct 2022 11:49:35 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n CO1NAM11FT023.mail.protection.outlook.com (10.13.175.35) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5723.20 via Frontend Transport; Fri, 14 Oct 2022 11:49:35 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Fri, 14 Oct\n 2022 04:49:24 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Fri, 14 Oct\n 2022 04:49:21 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=afEEZMylem9UkrxZGJWAO4qutEkT1gl/WrN1+pARI5456gjUcMTFnYS8nGYTMnsDWop0+IFmtHstWKATg9mWzxRFT8qVpVKidqad+mzvTLmKDDYH19awzTkRXuevdFTLF0Xz1mCO4/n2YpChgyHYayzjvjDnIAIjLer5zDw1SSCBv3x0aPantGwfK0R69nb/wHKwtAikIQrSBr+IvxlJJSg4pVNXmRP01zuszKlBEDhtJade105eh17m1XtDVj+NGp6IVfILThAwMzo8e9YlUCOtgvf5CT5oocrm9yjFT/0+NMGRjo2MSpaBZbAg/WoW/2CpJQeh0+Mu+rkUX57duQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=4PTLxbVddXxZgP3Vs9XlyXu84O7DyYE86+p0z3V9GwA=;\n b=H3v0QmAz/21c8XHbIQZj10euo4DpkVvD/UB0JRqbrGAPE2I1nXAkeB4ytQFMm7JsnhYDH+1wyyd8edV0FsLM96VTUbYjKxsmLHmrtmZct2H64UPilDeDL1BRe3deeW2Lazt7KoqNYNbvYltKjXZ9NJrtSXrpT5Lmp/gh44XWA7LAoZBkejd2B88H7fmQj0trBXqA3j/E6h67mBr2hnaTzzOXv00hex6MvvSptVlXWMaKYKukf7Pn1d6FQp2h9V1ihLUcR2/xpJt3GAbCZkF1642zt5ecON6OsOzxU57AWlnySVkNS7cdvtJG+a2sg5JPHEd6NXCpXnXeERAg23xsHQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4PTLxbVddXxZgP3Vs9XlyXu84O7DyYE86+p0z3V9GwA=;\n b=aW/2T6zcoJTqUoztxu+cMX5zJ8PUBZl2x5q5sgm61OQtVPIwu3dl8dFmKFYIPaKKvnqFwN6UBhKoDy0JY6pM1EWGK8fAXjEl2nGnipGBoePZkz/Akk5DNzhvaMxTrNn7cwlkpNcWcP5oY/yvjvCpzvPuton2oMAXjs0pzJVhtbFtT5+/RNgtLr6m8fD8+3W323LVUJXm4ihFZmPgfkQdu5ZLzseNQ5shalkrW75s4TON9CO7KsSY1xxdEnbDq03E27k3IFtg4uCYfBFvjACI//7Kb0FkQF0w/f2DIXjUjhL5fdLaTpeE8kgLe2XLAoCH7gLNJD2S9dUbFHJxQzgrSw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>",
        "Subject": "[v3 09/18] net/mlx5/hws: Add HWS pool and buddy",
        "Date": "Fri, 14 Oct 2022 14:48:24 +0300",
        "Message-ID": "<20221014114833.13389-10-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221014114833.13389-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221014114833.13389-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "CO1NAM11FT023:EE_|DM4PR12MB6376:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "54c9261f-9051-43f9-f312-08daadda2a62",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 0XU/gXDvPOUeblgRV3X9TH2g6fUGToNMmK8xUEXje014nuBMXOhQJcqdsd9vatrAqXq9yezJMAEKjqMlwWQp6KTooL/HJzxRNwdxg/NCE2LrBclm+jh/7phSnIf/26RBOy/pCjpBa5bbZ+gZMn6HcJwRP3/NpaTH3mmVJ6NzE42IhwWiMncsHEUL+57aDXv3C9QAc599CSFG/LbbcANOj7aDv47jsC8FulwUizDk5VieU6+Qyzd3aoe0pD5MNWxkLEmnlyiF3UdkgDLZA87XskTFaiN4e4b9crR58DIvYFHsxc/H/MhO84JUmYXQ7/PSIbDBgTpWkSeaoboiwjpy0WPd6eBED/Wvo6f/o/5Cbi94dnYVwxzMkwB6rsYfZ1couidRbU4tgtDUS21I7jcT/NokPdye2W9Uf0CzKePzXAk4+VEZmbba/cXsr815zp0l21RJTBrOxBHed7mhkv+um+psxvaJxiOOtBubvrXGI0inYqmNbudcyYf6cjb4WczsOtJDrZ2eOAB7/3ArzmvdgJuTMaIxjjUFbo6WLRnMH0UD9gGMlikDlGfRbd4/z4a/Z99jV6Yu/Ne22fYP84kz3p4hBa89R41GYuUh64W1ura6O2yeNBSebZ/VGbLrIyQOkPbPkY7UAPCQYXXMD6E53wnaj6Vb2HWsr/A2ZE3HS0lxD1lpfDWnnYh9WR2iifP33F8b0k4UlvYx3VsfuLsbRAWbV6PCie+1NjfutzNxNWJX+rSIFqM3ynxL/kxB6WpAxkXKOYL+HR9Vtzetrc/gDBMyh5on3SRJTOg5B0tPV+k=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(136003)(376002)(346002)(396003)(39860400002)(451199015)(46966006)(36840700001)(40470700004)(7636003)(82740400003)(356005)(86362001)(40460700003)(26005)(70586007)(6286002)(4326008)(7696005)(41300700001)(2616005)(8676002)(478600001)(70206006)(54906003)(107886003)(316002)(110136005)(2906002)(6636002)(5660300002)(8936002)(426003)(16526019)(1076003)(186003)(47076005)(336012)(83380400001)(36860700001)(30864003)(40480700001)(55016003)(36756003)(82310400005)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "14 Oct 2022 11:49:35.0856 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 54c9261f-9051-43f9-f312-08daadda2a62",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT023.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB6376",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Erez Shitrit <erezsh@nvidia.com>\n\nHWS needs to manage different types of device memory in\nan efficient and quick way. For this, memory pools are\nbeing used.\n\nSigned-off-by: Erez Shitrit <erezsh@nvidia.com>\nSigned-off-by: Alex Vesker <valex@nvidia.com>\n---\n drivers/net/mlx5/hws/mlx5dr_buddy.c | 201 +++++++++\n drivers/net/mlx5/hws/mlx5dr_buddy.h |  22 +\n drivers/net/mlx5/hws/mlx5dr_pool.c  | 672 ++++++++++++++++++++++++++++\n drivers/net/mlx5/hws/mlx5dr_pool.h  | 152 +++++++\n 4 files changed, 1047 insertions(+)\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_buddy.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_buddy.h\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_pool.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_pool.h",
    "diff": "diff --git a/drivers/net/mlx5/hws/mlx5dr_buddy.c b/drivers/net/mlx5/hws/mlx5dr_buddy.c\nnew file mode 100644\nindex 0000000000..9dba95f0b1\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_buddy.c\n@@ -0,0 +1,201 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include <rte_bitmap.h>\n+#include <rte_malloc.h>\n+#include \"mlx5dr_internal.h\"\n+#include \"mlx5dr_buddy.h\"\n+\n+static struct rte_bitmap *bitmap_alloc0(int s)\n+{\n+\tstruct rte_bitmap *bitmap;\n+\tuint32_t bmp_size;\n+\tvoid *mem;\n+\n+\tbmp_size = rte_bitmap_get_memory_footprint(s);\n+\tmem = rte_zmalloc(\"create_bmap\", bmp_size, RTE_CACHE_LINE_SIZE);\n+\tif (!mem) {\n+\t\tDR_LOG(ERR, \"No mem for bitmap\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tbitmap = rte_bitmap_init(s, mem, bmp_size);\n+\tif (!bitmap) {\n+\t\tDR_LOG(ERR, \"%s Failed to initialize bitmap\", __func__);\n+\t\trte_errno = EINVAL;\n+\t\tgoto err_mem_alloc;\n+\t}\n+\n+\treturn bitmap;\n+\n+err_mem_alloc:\n+\trte_free(mem);\n+\treturn NULL;\n+}\n+\n+static void bitmap_set_bit(struct rte_bitmap *bmp, uint32_t pos)\n+{\n+\trte_bitmap_set(bmp, pos);\n+}\n+\n+static void bitmap_clear_bit(struct rte_bitmap *bmp, uint32_t pos)\n+{\n+\trte_bitmap_clear(bmp, pos);\n+}\n+\n+static bool bitmap_test_bit(struct rte_bitmap *bmp, unsigned long n)\n+{\n+\treturn !!rte_bitmap_get(bmp, n);\n+}\n+\n+static unsigned long bitmap_ffs(struct rte_bitmap *bmap,\n+\t\t\t\tunsigned long n, unsigned long m)\n+{\n+\tuint64_t out_slab = 0;\n+\tuint32_t pos = 0; /* Compilation warn */\n+\n+\t__rte_bitmap_scan_init(bmap);\n+\tif (!rte_bitmap_scan(bmap, &pos, &out_slab)) {\n+\t\tDR_LOG(ERR, \"Failed to get slab from bitmap.\");\n+\t\treturn m;\n+\t}\n+\tpos = pos + __builtin_ctzll(out_slab);\n+\n+\tif (pos < n) {\n+\t\tDR_LOG(ERR, \"Unexpected bit (%d < %\"PRIx64\") from bitmap\", pos, n);\n+\t\treturn m;\n+\t}\n+\treturn pos;\n+}\n+\n+static unsigned long mlx5dr_buddy_find_first_bit(struct rte_bitmap *addr,\n+\t\t\t\t\t\t uint32_t size)\n+{\n+\treturn bitmap_ffs(addr, 0, size);\n+}\n+\n+static int mlx5dr_buddy_init(struct mlx5dr_buddy_mem *buddy, uint32_t max_order)\n+{\n+\tint i, s;\n+\n+\tbuddy->max_order = max_order;\n+\n+\tbuddy->bits = simple_calloc(buddy->max_order + 1, sizeof(long *));\n+\tif (!buddy->bits) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn -1;\n+\t}\n+\n+\tbuddy->num_free = simple_calloc(buddy->max_order + 1, sizeof(*buddy->num_free));\n+\tif (!buddy->num_free) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto err_out_free_bits;\n+\t}\n+\n+\tfor (i = 0; i <= (int)buddy->max_order; ++i) {\n+\t\ts = 1 << (buddy->max_order - i);\n+\t\tbuddy->bits[i] = bitmap_alloc0(s);\n+\t\tif (!buddy->bits[i])\n+\t\t\tgoto err_out_free_num_free;\n+\t}\n+\n+\tbitmap_set_bit(buddy->bits[buddy->max_order], 0);\n+\n+\tbuddy->num_free[buddy->max_order] = 1;\n+\n+\treturn 0;\n+\n+err_out_free_num_free:\n+\tfor (i = 0; i <= (int)buddy->max_order; ++i)\n+\t\trte_free(buddy->bits[i]);\n+\n+\tsimple_free(buddy->num_free);\n+\n+err_out_free_bits:\n+\tsimple_free(buddy->bits);\n+\treturn -1;\n+}\n+\n+struct mlx5dr_buddy_mem *mlx5dr_buddy_create(uint32_t max_order)\n+{\n+\tstruct mlx5dr_buddy_mem *buddy;\n+\n+\tbuddy = simple_calloc(1, sizeof(*buddy));\n+\tif (!buddy) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tif (mlx5dr_buddy_init(buddy, max_order))\n+\t\tgoto free_buddy;\n+\n+\treturn buddy;\n+\n+free_buddy:\n+\tsimple_free(buddy);\n+\treturn NULL;\n+}\n+\n+void mlx5dr_buddy_cleanup(struct mlx5dr_buddy_mem *buddy)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i <= (int)buddy->max_order; ++i) {\n+\t\trte_free(buddy->bits[i]);\n+\t}\n+\n+\tsimple_free(buddy->num_free);\n+\tsimple_free(buddy->bits);\n+}\n+\n+int mlx5dr_buddy_alloc_mem(struct mlx5dr_buddy_mem *buddy, int order)\n+{\n+\tint seg;\n+\tint o, m;\n+\n+\tfor (o = order; o <= (int)buddy->max_order; ++o)\n+\t\tif (buddy->num_free[o]) {\n+\t\t\tm = 1 << (buddy->max_order - o);\n+\t\t\tseg = mlx5dr_buddy_find_first_bit(buddy->bits[o], m);\n+\t\t\tif (m <= seg)\n+\t\t\t\treturn -1;\n+\n+\t\t\tgoto found;\n+\t\t}\n+\n+\treturn -1;\n+\n+found:\n+\tbitmap_clear_bit(buddy->bits[o], seg);\n+\t--buddy->num_free[o];\n+\n+\twhile (o > order) {\n+\t\t--o;\n+\t\tseg <<= 1;\n+\t\tbitmap_set_bit(buddy->bits[o], seg ^ 1);\n+\t\t++buddy->num_free[o];\n+\t}\n+\n+\tseg <<= order;\n+\n+\treturn seg;\n+}\n+\n+void mlx5dr_buddy_free_mem(struct mlx5dr_buddy_mem *buddy, uint32_t seg, int order)\n+{\n+\tseg >>= order;\n+\n+\twhile (bitmap_test_bit(buddy->bits[order], seg ^ 1)) {\n+\t\tbitmap_clear_bit(buddy->bits[order], seg ^ 1);\n+\t\t--buddy->num_free[order];\n+\t\tseg >>= 1;\n+\t\t++order;\n+\t}\n+\n+\tbitmap_set_bit(buddy->bits[order], seg);\n+\n+\t++buddy->num_free[order];\n+}\n+\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_buddy.h b/drivers/net/mlx5/hws/mlx5dr_buddy.h\nnew file mode 100644\nindex 0000000000..b9ec446b99\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_buddy.h\n@@ -0,0 +1,22 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_BUDDY_H_\n+#define MLX5DR_BUDDY_H_\n+\n+struct mlx5dr_buddy_mem {\n+\tstruct rte_bitmap **bits;\n+\tunsigned int *num_free;\n+\tuint32_t max_order;\n+};\n+\n+struct mlx5dr_buddy_mem *mlx5dr_buddy_create(uint32_t max_order);\n+\n+void mlx5dr_buddy_cleanup(struct mlx5dr_buddy_mem *buddy);\n+\n+int mlx5dr_buddy_alloc_mem(struct mlx5dr_buddy_mem *buddy, int order);\n+\n+void mlx5dr_buddy_free_mem(struct mlx5dr_buddy_mem *buddy, uint32_t seg, int order);\n+\n+#endif /* MLX5DR_BUDDY_H_ */\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_pool.c b/drivers/net/mlx5/hws/mlx5dr_pool.c\nnew file mode 100644\nindex 0000000000..2bfda5b4a5\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_pool.c\n@@ -0,0 +1,672 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include <rte_bitmap.h>\n+#include <rte_malloc.h>\n+#include \"mlx5dr_buddy.h\"\n+#include \"mlx5dr_internal.h\"\n+\n+static void mlx5dr_pool_free_one_resource(struct mlx5dr_pool_resource *resource)\n+{\n+\tmlx5dr_cmd_destroy_obj(resource->devx_obj);\n+\n+\tsimple_free(resource);\n+}\n+\n+static void mlx5dr_pool_resource_free(struct mlx5dr_pool *pool,\n+\t\t\t\t      int resource_idx)\n+{\n+\tmlx5dr_pool_free_one_resource(pool->resource[resource_idx]);\n+\tpool->resource[resource_idx] = NULL;\n+\n+\tif (pool->tbl_type == MLX5DR_TABLE_TYPE_FDB) {\n+\t\tmlx5dr_pool_free_one_resource(pool->mirror_resource[resource_idx]);\n+\t\tpool->mirror_resource[resource_idx] = NULL;\n+\t}\n+}\n+\n+static struct mlx5dr_pool_resource *\n+mlx5dr_pool_create_one_resource(struct mlx5dr_pool *pool, uint32_t log_range,\n+\t\t\t\tuint32_t fw_ft_type)\n+{\n+\tstruct mlx5dr_cmd_ste_create_attr ste_attr;\n+\tstruct mlx5dr_cmd_stc_create_attr stc_attr;\n+\tstruct mlx5dr_pool_resource *resource;\n+\tstruct mlx5dr_devx_obj *devx_obj;\n+\n+\tresource = simple_malloc(sizeof(*resource));\n+\tif (!resource) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tswitch (pool->type) {\n+\tcase MLX5DR_POOL_TYPE_STE:\n+\t\tste_attr.log_obj_range = log_range;\n+\t\tste_attr.table_type = fw_ft_type;\n+\t\tdevx_obj = mlx5dr_cmd_ste_create(pool->ctx->ibv_ctx, &ste_attr);\n+\t\tbreak;\n+\tcase MLX5DR_POOL_TYPE_STC:\n+\t\tstc_attr.log_obj_range = log_range;\n+\t\tstc_attr.table_type = fw_ft_type;\n+\t\tdevx_obj = mlx5dr_cmd_stc_create(pool->ctx->ibv_ctx, &stc_attr);\n+\t\tbreak;\n+\tdefault:\n+\t\tassert(0);\n+\t\tbreak;\n+\t}\n+\n+\tif (!devx_obj) {\n+\t\tDR_LOG(ERR, \"Failed to allocate resource objects\");\n+\t\tgoto free_resource;\n+\t}\n+\n+\tresource->pool = pool;\n+\tresource->devx_obj = devx_obj;\n+\tresource->range = 1 << log_range;\n+\tresource->base_id = devx_obj->id;\n+\n+\treturn resource;\n+\n+free_resource:\n+\tsimple_free(resource);\n+\treturn NULL;\n+}\n+\n+static int\n+mlx5dr_pool_resource_alloc(struct mlx5dr_pool *pool, uint32_t log_range, int idx)\n+{\n+\tstruct mlx5dr_pool_resource *resource;\n+\tuint32_t fw_ft_type, opt_log_range;\n+\n+\tfw_ft_type = mlx5dr_table_get_res_fw_ft_type(pool->tbl_type, false);\n+\topt_log_range = pool->opt_type == MLX5DR_POOL_OPTIMIZE_ORIG ? 0 : log_range;\n+\tresource = mlx5dr_pool_create_one_resource(pool, opt_log_range, fw_ft_type);\n+\tif (!resource) {\n+\t\tDR_LOG(ERR, \"Failed allocating resource\");\n+\t\treturn rte_errno;\n+\t}\n+\tpool->resource[idx] = resource;\n+\n+\tif (pool->tbl_type == MLX5DR_TABLE_TYPE_FDB) {\n+\t\tstruct mlx5dr_pool_resource *mir_resource;\n+\n+\t\tfw_ft_type = mlx5dr_table_get_res_fw_ft_type(pool->tbl_type, true);\n+\t\topt_log_range = pool->opt_type == MLX5DR_POOL_OPTIMIZE_MIRROR ? 0 : log_range;\n+\t\tmir_resource = mlx5dr_pool_create_one_resource(pool, opt_log_range, fw_ft_type);\n+\t\tif (!mir_resource) {\n+\t\t\tDR_LOG(ERR, \"Failed allocating mirrored resource\");\n+\t\t\tmlx5dr_pool_free_one_resource(resource);\n+\t\t\tpool->resource[idx] = NULL;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tpool->mirror_resource[idx] = mir_resource;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int mlx5dr_pool_bitmap_get_free_slot(struct rte_bitmap *bitmap, uint32_t *iidx)\n+{\n+\tuint64_t slab = 0;\n+\n+\t__rte_bitmap_scan_init(bitmap);\n+\n+\tif (!rte_bitmap_scan(bitmap, iidx, &slab))\n+\t\treturn ENOMEM;\n+\n+\t*iidx += __builtin_ctzll(slab);\n+\n+\trte_bitmap_clear(bitmap, *iidx);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_bitmap *mlx5dr_pool_create_and_init_bitmap(uint32_t log_range)\n+{\n+\tstruct rte_bitmap *cur_bmp;\n+\tuint32_t bmp_size;\n+\tvoid *mem;\n+\n+\tbmp_size = rte_bitmap_get_memory_footprint(1 << log_range);\n+\tmem = rte_zmalloc(\"create_stc_bmap\", bmp_size, RTE_CACHE_LINE_SIZE);\n+\tif (!mem) {\n+\t\tDR_LOG(ERR, \"No mem for bitmap\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tcur_bmp = rte_bitmap_init_with_all_set(1 << log_range, mem, bmp_size);\n+\tif (!cur_bmp) {\n+\t\trte_free(mem);\n+\t\tDR_LOG(ERR, \"Failed to initialize stc bitmap.\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\treturn cur_bmp;\n+}\n+\n+static void mlx5dr_pool_buddy_db_put_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t      struct mlx5dr_pool_chunk *chunk)\n+{\n+\tstruct mlx5dr_buddy_mem *buddy;\n+\n+\tbuddy = pool->db.buddy_manager->buddies[chunk->resource_idx];\n+\tif (!buddy) {\n+\t\tassert(false);\n+\t\tDR_LOG(ERR, \"No such buddy (%d)\", chunk->resource_idx);\n+\t\treturn;\n+\t}\n+\n+\tmlx5dr_buddy_free_mem(buddy, chunk->offset, chunk->order);\n+}\n+\n+static struct mlx5dr_buddy_mem *\n+mlx5dr_pool_buddy_get_next_buddy(struct mlx5dr_pool *pool, int idx,\n+\t\t\t\t uint32_t order, bool *is_new_buddy)\n+{\n+\tstatic struct mlx5dr_buddy_mem *buddy;\n+\tuint32_t new_buddy_size;\n+\n+\tbuddy = pool->db.buddy_manager->buddies[idx];\n+\tif (buddy)\n+\t\treturn buddy;\n+\n+\tnew_buddy_size = RTE_MAX(pool->alloc_log_sz, order);\n+\t*is_new_buddy = true;\n+\tbuddy = mlx5dr_buddy_create(new_buddy_size);\n+\tif (!buddy) {\n+\t\tDR_LOG(ERR, \"Failed to create buddy order: %d index: %d\",\n+\t\t       new_buddy_size, idx);\n+\t\treturn NULL;\n+\t}\n+\n+\tif (mlx5dr_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {\n+\t\tDR_LOG(ERR, \"Failed to create resource type: %d: size %d index: %d\",\n+\t\t\tpool->type, new_buddy_size, idx);\n+\t\tmlx5dr_buddy_cleanup(buddy);\n+\t\treturn NULL;\n+\t}\n+\n+\tpool->db.buddy_manager->buddies[idx] = buddy;\n+\n+\treturn buddy;\n+}\n+\n+static int mlx5dr_pool_buddy_get_mem_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t\t   int order,\n+\t\t\t\t\t   uint32_t *buddy_idx,\n+\t\t\t\t\t   int *seg)\n+{\n+\tstruct mlx5dr_buddy_mem *buddy;\n+\tbool new_mem = false;\n+\tint err = 0;\n+\tint i;\n+\n+\t*seg = -1;\n+\n+\t/* Find the next free place from the buddy array */\n+\twhile (*seg == -1) {\n+\t\tfor (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {\n+\t\t\tbuddy = mlx5dr_pool_buddy_get_next_buddy(pool, i,\n+\t\t\t\t\t\t\t\t order,\n+\t\t\t\t\t\t\t\t &new_mem);\n+\t\t\tif (!buddy) {\n+\t\t\t\terr = rte_errno;\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\n+\t\t\t*seg = mlx5dr_buddy_alloc_mem(buddy, order);\n+\t\t\tif (*seg != -1)\n+\t\t\t\tgoto found;\n+\n+\t\t\tif (pool->flags & MLX5DR_POOL_FLAGS_ONE_RESOURCE) {\n+\t\t\t\tDR_LOG(ERR, \"Fail to allocate seg for one resource pool\");\n+\t\t\t\terr = rte_errno;\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\n+\t\t\tif (new_mem) {\n+\t\t\t\t/* We have new memory pool, should be place for us */\n+\t\t\t\tassert(false);\n+\t\t\t\tDR_LOG(ERR, \"No memory for order: %d with buddy no: %d\",\n+\t\t\t\t\torder, i);\n+\t\t\t\trte_errno = ENOMEM;\n+\t\t\t\terr = ENOMEM;\n+\t\t\t\tgoto out;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+found:\n+\t*buddy_idx = i;\n+out:\n+\treturn err;\n+}\n+\n+static int mlx5dr_pool_buddy_db_get_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t     struct mlx5dr_pool_chunk *chunk)\n+{\n+\tint ret = 0;\n+\n+\t/* Go over the buddies and find next free slot */\n+\tret = mlx5dr_pool_buddy_get_mem_chunk(pool, chunk->order,\n+\t\t\t\t\t      &chunk->resource_idx,\n+\t\t\t\t\t      &chunk->offset);\n+\tif (ret)\n+\t\tDR_LOG(ERR, \"Failed to get free slot for chunk with order: %d\",\n+\t\t\tchunk->order);\n+\n+\treturn ret;\n+}\n+\n+static void mlx5dr_pool_buddy_db_uninit(struct mlx5dr_pool *pool)\n+{\n+\tstruct mlx5dr_buddy_mem *buddy;\n+\tint i;\n+\n+\tfor (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {\n+\t\tbuddy = pool->db.buddy_manager->buddies[i];\n+\t\tif (buddy) {\n+\t\t\tmlx5dr_buddy_cleanup(buddy);\n+\t\t\tsimple_free(buddy);\n+\t\t\tpool->db.buddy_manager->buddies[i] = NULL;\n+\t\t}\n+\t}\n+\n+\tsimple_free(pool->db.buddy_manager);\n+}\n+\n+static int mlx5dr_pool_buddy_db_init(struct mlx5dr_pool *pool, uint32_t log_range)\n+{\n+\tpool->db.buddy_manager = simple_calloc(1, sizeof(*pool->db.buddy_manager));\n+\tif (!pool->db.buddy_manager) {\n+\t\tDR_LOG(ERR, \"No mem for buddy_manager with log_range: %d\", log_range);\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (pool->flags & MLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {\n+\t\tbool new_buddy;\n+\n+\t\tif (!mlx5dr_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {\n+\t\t\tDR_LOG(ERR, \"Failed allocating memory on create log_sz: %d\", log_range);\n+\t\t\tsimple_free(pool->db.buddy_manager);\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t}\n+\n+\tpool->p_db_uninit = &mlx5dr_pool_buddy_db_uninit;\n+\tpool->p_get_chunk = &mlx5dr_pool_buddy_db_get_chunk;\n+\tpool->p_put_chunk = &mlx5dr_pool_buddy_db_put_chunk;\n+\n+\treturn 0;\n+}\n+\n+static int mlx5dr_pool_create_resource_on_index(struct mlx5dr_pool *pool,\n+\t\t\t\t\t\tuint32_t alloc_size, int idx)\n+{\n+\tif (mlx5dr_pool_resource_alloc(pool, alloc_size, idx) != 0) {\n+\t\tDR_LOG(ERR, \"Failed to create resource type: %d: size %d index: %d\",\n+\t\t\tpool->type, alloc_size, idx);\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static struct mlx5dr_pool_elements *\n+mlx5dr_pool_element_create_new_elem(struct mlx5dr_pool *pool, uint32_t order, int idx)\n+{\n+\tstruct mlx5dr_pool_elements *elem;\n+\tuint32_t alloc_size;\n+\n+\talloc_size = pool->alloc_log_sz;\n+\n+\telem = simple_calloc(1, sizeof(*elem));\n+\tif (!elem) {\n+\t\tDR_LOG(ERR, \"Failed to create elem order: %d index: %d\",\n+\t\t       order, idx);\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\t/*sharing the same resource, also means that all the elements are with size 1*/\n+\tif ((pool->flags & MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&\n+\t    !(pool->flags & MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK)) {\n+\t\t /* Currently all chunks in size 1 */\n+\t\telem->bitmap =  mlx5dr_pool_create_and_init_bitmap(alloc_size - order);\n+\t\tif (!elem->bitmap) {\n+\t\t\tDR_LOG(ERR, \"Failed to create bitmap type: %d: size %d index: %d\",\n+\t\t\t       pool->type, alloc_size, idx);\n+\t\t\tgoto free_elem;\n+\t\t}\n+\t}\n+\n+\tif (mlx5dr_pool_create_resource_on_index(pool, alloc_size, idx)) {\n+\t\tDR_LOG(ERR, \"Failed to create resource type: %d: size %d index: %d\",\n+\t\t\tpool->type, alloc_size, idx);\n+\t\tgoto free_db;\n+\t}\n+\n+\tpool->db.element_manager->elements[idx] = elem;\n+\n+\treturn elem;\n+\n+free_db:\n+\trte_free(elem->bitmap);\n+free_elem:\n+\tsimple_free(elem);\n+\treturn NULL;\n+}\n+\n+static int mlx5dr_pool_element_find_seg(struct mlx5dr_pool_elements *elem, int *seg)\n+{\n+\tif (mlx5dr_pool_bitmap_get_free_slot(elem->bitmap, (uint32_t *)seg)) {\n+\t\telem->is_full = true;\n+\t\treturn ENOMEM;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_pool_onesize_element_get_mem_chunk(struct mlx5dr_pool *pool, uint32_t order,\n+\t\t\t\t\t  uint32_t *idx, int *seg)\n+{\n+\tstruct mlx5dr_pool_elements *elem;\n+\n+\telem = pool->db.element_manager->elements[0];\n+\tif (!elem)\n+\t\telem = mlx5dr_pool_element_create_new_elem(pool, order, 0);\n+\tif (!elem)\n+\t\tgoto err_no_elem;\n+\n+\t*idx = 0;\n+\n+\tif (mlx5dr_pool_element_find_seg(elem, seg) != 0) {\n+\t\tDR_LOG(ERR, \"No more resources (last request order: %d)\", order);\n+\t\trte_errno = ENOMEM;\n+\t\treturn ENOMEM;\n+\t}\n+\n+\telem->num_of_elements++;\n+\treturn 0;\n+\n+err_no_elem:\n+\tDR_LOG(ERR, \"Failed to allocate element for order: %d\", order);\n+\treturn ENOMEM;\n+}\n+\n+static int\n+mlx5dr_pool_general_element_get_mem_chunk(struct mlx5dr_pool *pool, uint32_t order,\n+\t\t\t\t\t  uint32_t *idx, int *seg)\n+{\n+\tint ret;\n+\tint i;\n+\n+\tfor (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {\n+\t\tif (!pool->resource[i]) {\n+\t\t\tret = mlx5dr_pool_create_resource_on_index(pool, order, i);\n+\t\t\tif (ret)\n+\t\t\t\tgoto err_no_res;\n+\t\t\t*idx = i;\n+\t\t\t*seg = 0; /* One memory slot in that element */\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\trte_errno = ENOMEM;\n+\tDR_LOG(ERR, \"No more resources (last request order: %d)\", order);\n+\treturn ENOMEM;\n+\n+err_no_res:\n+\tDR_LOG(ERR, \"Failed to allocate element for order: %d\", order);\n+\treturn ENOMEM;\n+}\n+\n+static int mlx5dr_pool_general_element_db_get_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t\t\t    struct mlx5dr_pool_chunk *chunk)\n+{\n+\tint ret;\n+\n+\t/* Go over all memory elements and find/allocate free slot */\n+\tret = mlx5dr_pool_general_element_get_mem_chunk(pool, chunk->order,\n+\t\t\t\t\t\t\t&chunk->resource_idx,\n+\t\t\t\t\t\t\t&chunk->offset);\n+\tif (ret)\n+\t\tDR_LOG(ERR, \"Failed to get free slot for chunk with order: %d\",\n+\t\t\tchunk->order);\n+\n+\treturn ret;\n+}\n+\n+static void mlx5dr_pool_general_element_db_put_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t\t\t     struct mlx5dr_pool_chunk *chunk)\n+{\n+\tassert(pool->resource[chunk->resource_idx]);\n+\n+\tif (pool->flags & MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE)\n+\t\tmlx5dr_pool_resource_free(pool, chunk->resource_idx);\n+}\n+\n+static void mlx5dr_pool_general_element_db_uninit(struct mlx5dr_pool *pool)\n+{\n+\t(void)pool;\n+}\n+\n+/* This memory management works as the following:\n+ * - At start doesn't allocate no mem at all.\n+ * - When new request for chunk arrived:\n+ *\tallocate resource and give it.\n+ * - When free that chunk:\n+ *\tthe resource is freed.\n+ */\n+static int mlx5dr_pool_general_element_db_init(struct mlx5dr_pool *pool)\n+{\n+\tpool->db.element_manager = simple_calloc(1, sizeof(*pool->db.element_manager));\n+\tif (!pool->db.element_manager) {\n+\t\tDR_LOG(ERR, \"No mem for general elemnt_manager\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tpool->p_db_uninit = &mlx5dr_pool_general_element_db_uninit;\n+\tpool->p_get_chunk = &mlx5dr_pool_general_element_db_get_chunk;\n+\tpool->p_put_chunk = &mlx5dr_pool_general_element_db_put_chunk;\n+\n+\treturn 0;\n+}\n+\n+static void mlx5dr_onesize_element_db_destroy_element(struct mlx5dr_pool *pool,\n+\t\t\t\t\t\t      struct mlx5dr_pool_elements *elem,\n+\t\t\t\t\t\t      struct mlx5dr_pool_chunk *chunk)\n+{\n+\tassert(pool->resource[chunk->resource_idx]);\n+\n+\tmlx5dr_pool_resource_free(pool, chunk->resource_idx);\n+\n+\tsimple_free(elem);\n+\tpool->db.element_manager->elements[chunk->resource_idx] = NULL;\n+}\n+\n+static void mlx5dr_onesize_element_db_put_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t\t\tstruct mlx5dr_pool_chunk *chunk)\n+{\n+\tstruct mlx5dr_pool_elements *elem;\n+\n+\tassert(chunk->resource_idx == 0);\n+\n+\telem = pool->db.element_manager->elements[chunk->resource_idx];\n+\tif (!elem) {\n+\t\tassert(false);\n+\t\tDR_LOG(ERR, \"No such element (%d)\", chunk->resource_idx);\n+\t\treturn;\n+\t}\n+\n+\trte_bitmap_set(elem->bitmap, chunk->offset);\n+\telem->is_full = false;\n+\telem->num_of_elements--;\n+\n+\tif (pool->flags & MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE &&\n+\t   !elem->num_of_elements)\n+\t\tmlx5dr_onesize_element_db_destroy_element(pool, elem, chunk);\n+}\n+\n+static int mlx5dr_onesize_element_db_get_chunk(struct mlx5dr_pool *pool,\n+\t\t\t\t\t       struct mlx5dr_pool_chunk *chunk)\n+{\n+\tint ret = 0;\n+\n+\t/* Go over all memory elements and find/allocate free slot */\n+\tret = mlx5dr_pool_onesize_element_get_mem_chunk(pool, chunk->order,\n+\t\t\t\t\t\t\t&chunk->resource_idx,\n+\t\t\t\t\t\t\t&chunk->offset);\n+\tif (ret)\n+\t\tDR_LOG(ERR, \"Failed to get free slot for chunk with order: %d\",\n+\t\t\tchunk->order);\n+\n+\treturn ret;\n+}\n+\n+static void mlx5dr_onesize_element_db_uninit(struct mlx5dr_pool *pool)\n+{\n+\tstruct mlx5dr_pool_elements *elem;\n+\tint i;\n+\n+\tfor (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {\n+\t\telem = pool->db.element_manager->elements[i];\n+\t\tif (elem) {\n+\t\t\tif (elem->bitmap)\n+\t\t\t\trte_free(elem->bitmap);\n+\t\t\tsimple_free(elem);\n+\t\t\tpool->db.element_manager->elements[i] = NULL;\n+\t\t}\n+\t}\n+\tsimple_free(pool->db.element_manager);\n+}\n+\n+/* This memory management works as the following:\n+ * - At start doesn't allocate no mem at all.\n+ * - When new request for chunk arrived:\n+ *  aloocate the first and only slot of memory/resource\n+ *  when it ended return error.\n+ */\n+static int mlx5dr_pool_onesize_element_db_init(struct mlx5dr_pool *pool)\n+{\n+\tpool->db.element_manager = simple_calloc(1, sizeof(*pool->db.element_manager));\n+\tif (!pool->db.element_manager) {\n+\t\tDR_LOG(ERR, \"No mem for general elemnt_manager\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tpool->p_db_uninit = &mlx5dr_onesize_element_db_uninit;\n+\tpool->p_get_chunk = &mlx5dr_onesize_element_db_get_chunk;\n+\tpool->p_put_chunk = &mlx5dr_onesize_element_db_put_chunk;\n+\n+\treturn 0;\n+}\n+\n+static int mlx5dr_pool_db_init(struct mlx5dr_pool *pool,\n+\t\t\t       enum mlx5dr_db_type db_type)\n+{\n+\tint ret;\n+\n+\tif (db_type == MLX5DR_POOL_DB_TYPE_GENERAL_SIZE)\n+\t\tret = mlx5dr_pool_general_element_db_init(pool);\n+\telse if (db_type == MLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE)\n+\t\tret = mlx5dr_pool_onesize_element_db_init(pool);\n+\telse\n+\t\tret = mlx5dr_pool_buddy_db_init(pool, pool->alloc_log_sz);\n+\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to init general db : %d (ret: %d)\", db_type, ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void mlx5dr_pool_db_unint(struct mlx5dr_pool *pool)\n+{\n+\tpool->p_db_uninit(pool);\n+}\n+\n+int\n+mlx5dr_pool_chunk_alloc(struct mlx5dr_pool *pool,\n+\t\t\tstruct mlx5dr_pool_chunk *chunk)\n+{\n+\tint ret;\n+\n+\tpthread_spin_lock(&pool->lock);\n+\tret = pool->p_get_chunk(pool, chunk);\n+\tpthread_spin_unlock(&pool->lock);\n+\n+\treturn ret;\n+}\n+\n+void mlx5dr_pool_chunk_free(struct mlx5dr_pool *pool,\n+\t\t\t    struct mlx5dr_pool_chunk *chunk)\n+{\n+\tpthread_spin_lock(&pool->lock);\n+\tpool->p_put_chunk(pool, chunk);\n+\tpthread_spin_unlock(&pool->lock);\n+}\n+\n+struct mlx5dr_pool *\n+mlx5dr_pool_create(struct mlx5dr_context *ctx, struct mlx5dr_pool_attr *pool_attr)\n+{\n+\tenum mlx5dr_db_type res_db_type;\n+\tstruct mlx5dr_pool *pool;\n+\n+\tpool = simple_calloc(1, sizeof(*pool));\n+\tif (!pool)\n+\t\treturn NULL;\n+\n+\tpool->ctx = ctx;\n+\tpool->type = pool_attr->pool_type;\n+\tpool->alloc_log_sz = pool_attr->alloc_log_sz;\n+\tpool->flags = pool_attr->flags;\n+\tpool->tbl_type = pool_attr->table_type;\n+\tpool->opt_type = pool_attr->opt_type;\n+\n+\tpthread_spin_init(&pool->lock, PTHREAD_PROCESS_PRIVATE);\n+\n+\t/* Support general db */\n+\tif (pool->flags == (MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE |\n+\t\t\t    MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK))\n+\t\tres_db_type = MLX5DR_POOL_DB_TYPE_GENERAL_SIZE;\n+\telse if (pool->flags == (MLX5DR_POOL_FLAGS_ONE_RESOURCE |\n+\t\t\t\t MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS))\n+\t\tres_db_type = MLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE;\n+\telse\n+\t\tres_db_type = MLX5DR_POOL_DB_TYPE_BUDDY;\n+\n+\tpool->alloc_log_sz = pool_attr->alloc_log_sz;\n+\n+\tif (mlx5dr_pool_db_init(pool, res_db_type))\n+\t\tgoto free_pool;\n+\n+\treturn pool;\n+\n+free_pool:\n+\tpthread_spin_destroy(&pool->lock);\n+\tsimple_free(pool);\n+\treturn NULL;\n+}\n+\n+int mlx5dr_pool_destroy(struct mlx5dr_pool *pool)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++)\n+\t\tif (pool->resource[i])\n+\t\t\tmlx5dr_pool_resource_free(pool, i);\n+\n+\tmlx5dr_pool_db_unint(pool);\n+\n+\tpthread_spin_destroy(&pool->lock);\n+\tsimple_free(pool);\n+\treturn 0;\n+}\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_pool.h b/drivers/net/mlx5/hws/mlx5dr_pool.h\nnew file mode 100644\nindex 0000000000..cd12c3ab9a\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_pool.h\n@@ -0,0 +1,152 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_POOL_H_\n+#define MLX5DR_POOL_H_\n+\n+enum mlx5dr_pool_type {\n+\tMLX5DR_POOL_TYPE_STE,\n+\tMLX5DR_POOL_TYPE_STC,\n+};\n+\n+#define MLX5DR_POOL_STC_LOG_SZ 14\n+\n+#define MLX5DR_POOL_RESOURCE_ARR_SZ 100\n+\n+struct mlx5dr_pool_chunk {\n+\tuint32_t resource_idx;\n+\t/* Internal offset, relative to base index */\n+\tint      offset;\n+\tint      order;\n+};\n+\n+struct mlx5dr_pool_resource {\n+\tstruct mlx5dr_pool *pool;\n+\tstruct mlx5dr_devx_obj *devx_obj;\n+\tuint32_t base_id;\n+\tuint32_t range;\n+};\n+\n+enum mlx5dr_pool_flags {\n+\t/* Only a one resource in that pool */\n+\tMLX5DR_POOL_FLAGS_ONE_RESOURCE = 1 << 0,\n+\tMLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,\n+\t/* No sharing resources between chunks */\n+\tMLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,\n+\t/* All objects are in the same size */\n+\tMLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,\n+\t/* Manged by buddy allocator */\n+\tMLX5DR_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,\n+\t/* Allocate pool_type memory on pool creation */\n+\tMLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,\n+\n+\t/* These values should be used by the caller */\n+\tMLX5DR_POOL_FLAGS_FOR_STC_POOL =\n+\t\tMLX5DR_POOL_FLAGS_ONE_RESOURCE |\n+\t\tMLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS,\n+\tMLX5DR_POOL_FLAGS_FOR_MATCHER_STE_POOL =\n+\t\tMLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE |\n+\t\tMLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK,\n+\tMLX5DR_POOL_FLAGS_FOR_STE_ACTION_POOL =\n+\t\tMLX5DR_POOL_FLAGS_ONE_RESOURCE |\n+\t\tMLX5DR_POOL_FLAGS_BUDDY_MANAGED |\n+\t\tMLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE,\n+};\n+\n+enum mlx5dr_pool_optimize {\n+\tMLX5DR_POOL_OPTIMIZE_NONE = 0x0,\n+\tMLX5DR_POOL_OPTIMIZE_ORIG = 0x1,\n+\tMLX5DR_POOL_OPTIMIZE_MIRROR = 0x2,\n+};\n+\n+struct mlx5dr_pool_attr {\n+\tenum mlx5dr_pool_type pool_type;\n+\tenum mlx5dr_table_type table_type;\n+\tenum mlx5dr_pool_flags flags;\n+\tenum mlx5dr_pool_optimize opt_type;\n+\t/* Allocation size once memory is depleted */\n+\tsize_t alloc_log_sz;\n+};\n+\n+enum mlx5dr_db_type {\n+\t/* Uses for allocating chunk of big memory, each element has its own resource in the FW*/\n+\tMLX5DR_POOL_DB_TYPE_GENERAL_SIZE,\n+\t/* One resource only, all the elements are with same one size */\n+\tMLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE,\n+\t/* Many resources, the memory allocated with buddy mechanism */\n+\tMLX5DR_POOL_DB_TYPE_BUDDY,\n+};\n+\n+struct mlx5dr_buddy_manager {\n+\tstruct mlx5dr_buddy_mem *buddies[MLX5DR_POOL_RESOURCE_ARR_SZ];\n+};\n+\n+struct mlx5dr_pool_elements {\n+\tuint32_t num_of_elements;\n+\tstruct rte_bitmap *bitmap;\n+\tbool is_full;\n+};\n+\n+struct mlx5dr_element_manager {\n+\tstruct mlx5dr_pool_elements *elements[MLX5DR_POOL_RESOURCE_ARR_SZ];\n+};\n+\n+struct mlx5dr_pool_db {\n+\tenum mlx5dr_db_type type;\n+\tunion {\n+\t\tstruct mlx5dr_element_manager *element_manager;\n+\t\tstruct mlx5dr_buddy_manager *buddy_manager;\n+\t};\n+};\n+\n+typedef int (*mlx5dr_pool_db_get_chunk)(struct mlx5dr_pool *pool,\n+\t\t\t\t\tstruct mlx5dr_pool_chunk *chunk);\n+typedef void (*mlx5dr_pool_db_put_chunk)(struct mlx5dr_pool *pool,\n+\t\t\t\t\t struct mlx5dr_pool_chunk *chunk);\n+typedef void (*mlx5dr_pool_unint_db)(struct mlx5dr_pool *pool);\n+\n+struct mlx5dr_pool {\n+\tstruct mlx5dr_context *ctx;\n+\tenum mlx5dr_pool_type type;\n+\tenum mlx5dr_pool_flags flags;\n+\tpthread_spinlock_t lock;\n+\tsize_t alloc_log_sz;\n+\tenum mlx5dr_table_type tbl_type;\n+\tenum mlx5dr_pool_optimize opt_type;\n+\tstruct mlx5dr_pool_resource *resource[MLX5DR_POOL_RESOURCE_ARR_SZ];\n+\tstruct mlx5dr_pool_resource *mirror_resource[MLX5DR_POOL_RESOURCE_ARR_SZ];\n+\t/* DB */\n+\tstruct mlx5dr_pool_db db;\n+\t/* Functions */\n+\tmlx5dr_pool_unint_db p_db_uninit;\n+\tmlx5dr_pool_db_get_chunk p_get_chunk;\n+\tmlx5dr_pool_db_put_chunk p_put_chunk;\n+};\n+\n+struct mlx5dr_pool *\n+mlx5dr_pool_create(struct mlx5dr_context *ctx,\n+\t\t   struct mlx5dr_pool_attr *pool_attr);\n+\n+int mlx5dr_pool_destroy(struct mlx5dr_pool *pool);\n+\n+int mlx5dr_pool_chunk_alloc(struct mlx5dr_pool *pool,\n+\t\t\t    struct mlx5dr_pool_chunk *chunk);\n+\n+void mlx5dr_pool_chunk_free(struct mlx5dr_pool *pool,\n+\t\t\t    struct mlx5dr_pool_chunk *chunk);\n+\n+static inline struct mlx5dr_devx_obj *\n+mlx5dr_pool_chunk_get_base_devx_obj(struct mlx5dr_pool *pool,\n+\t\t\t\t    struct mlx5dr_pool_chunk *chunk)\n+{\n+\treturn pool->resource[chunk->resource_idx]->devx_obj;\n+}\n+\n+static inline struct mlx5dr_devx_obj *\n+mlx5dr_pool_chunk_get_base_devx_obj_mirror(struct mlx5dr_pool *pool,\n+\t\t\t\t\t   struct mlx5dr_pool_chunk *chunk)\n+{\n+\treturn pool->mirror_resource[chunk->resource_idx]->devx_obj;\n+}\n+#endif /* MLX5DR_POOL_H_ */\n",
    "prefixes": [
        "v3",
        "09/18"
    ]
}