get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99685/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99685,
    "url": "http://patches.dpdk.org/api/patches/99685/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-3-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210926111904.237736-3-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210926111904.237736-3-xuemingl@nvidia.com",
    "date": "2021-09-26T11:18:55",
    "name": "[02/11] common/mlx5: support receive memory pool",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3547fcec086dea62881f2aac31d395a22429659f",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-3-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19166,
            "url": "http://patches.dpdk.org/api/series/19166/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19166",
            "date": "2021-09-26T11:18:53",
            "name": "net/mlx5: support shared Rx queue",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/19166/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/99685/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/99685/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 541A1A0547;\n\tSun, 26 Sep 2021 13:19:40 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E694040E5A;\n\tSun, 26 Sep 2021 13:19:34 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2066.outbound.protection.outlook.com [40.107.243.66])\n by mails.dpdk.org (Postfix) with ESMTP id 3DBA0410ED\n for <dev@dpdk.org>; Sun, 26 Sep 2021 13:19:33 +0200 (CEST)",
            "from MWHPR10CA0058.namprd10.prod.outlook.com (2603:10b6:300:2c::20)\n by BN6PR12MB1283.namprd12.prod.outlook.com (2603:10b6:404:19::8) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.18; Sun, 26 Sep\n 2021 11:19:30 +0000",
            "from CO1NAM11FT027.eop-nam11.prod.protection.outlook.com\n (2603:10b6:300:2c:cafe::d3) by MWHPR10CA0058.outlook.office365.com\n (2603:10b6:300:2c::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.15 via Frontend\n Transport; Sun, 26 Sep 2021 11:19:30 +0000",
            "from mail.nvidia.com (216.228.112.36) by\n CO1NAM11FT027.mail.protection.outlook.com (10.13.174.224) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4544.13 via Frontend Transport; Sun, 26 Sep 2021 11:19:30 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL101.nvidia.com\n (172.20.187.10) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:19:29 +0000",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:19:27 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=R8khKgHRzCHTSu6QFr8DBSluMhjVIUpO0q/qrH5v/qAnS56jgs6SksQJVGVjtsVluE52MfI7BPtBUf+jAhkqqNyX1EosQmevV3/jjs3jNefsE2vwmpNIxl1Dq9g11xXu8WX+CufH5qFpaw4Xdh1UO/oAIYbMTXVMcWJNMjdG0F40Trsa41ZU0glfwCzCvewPBbblRhXdE9NzDSFDbsbsmpMt2B2iA16nkczrapc4FjmcG+kgXSeNqLdq5zXNXBrLeH4Nd1wooe17OQPRgpljATY8IDXHNF/Z2+h+OB1rGtkjzKUgKc+ZnGo2SoYbYc55ULHXnQ6zv9wuHAhKLSK+Yw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=XyxGV6DTOsWSgcKQnLGNhrSn/fPtuFxywAnS2xuQfl4=;\n b=aICihJ5b9Ngxqz9JD3A2+s/sLTtn4T64WVVsrUoZR0RbRmFM+O+xnzmlG5QwbBMstluXmUhmmRgRGjZgNWlD/sG+2Uo5Et5OaBM6pUYSQgy5VLK+wLi93yPgbxKY/EYLaRK/l6I2g6EeQFZHuJJUdDNgEjwH+6pLdHRnTI335M43oaaATLFNFhl4MvPGnFy3euhqudoAxIvgg0utDKLG68uv6LrirIEesU1fxOD4Ejl8oAw17iAbXJfUHBWFa7YdkxYUEWU0rGvZAWT2qwzyYtFgxBMn2I3TbVP6LJbXNo1Z8cVwYpzi/wI4QrSjrWOMwhItfEOCopnEuZN+KPxzPQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.36) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=XyxGV6DTOsWSgcKQnLGNhrSn/fPtuFxywAnS2xuQfl4=;\n b=SDbPJpNc7WTYPmR7AU2ji6p12L0Xx1LweyggJhdsoR3L1yBm+Gpxv9Ob4jmPfw6QkvAev0QQV9bnKXsKNiA1tExf9Cw1qVYp5lFr/N+hopzRUFj2FtK9BY+/AJP/a4Rg61uonxCeFW2BIYPNkhNT9OrAz6IIFhUo0PQ4uFXsBOyGwqJp0gMmkGL2TaXL4NBOpLRPoR6YxtYmoTKnEPUdlctD3OD4V1wvv8EqKkHmsI87yo0bI9h/63iKnhhqLN5y5shSdduvvQpY2QRmWqy6G25fWBlT/WZPRKgE9tCI4F696E+ToCU8BP7oLZwySoEESUEpCkdBpMIptJxdwrgtfw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.36)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.36 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.36; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>, \"Ray\n Kinsella\" <mdr@ashroe.eu>",
        "Date": "Sun, 26 Sep 2021 19:18:55 +0800",
        "Message-ID": "<20210926111904.237736-3-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "References": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "67e9d1d7-0654-446e-3532-08d980df825c",
        "X-MS-TrafficTypeDiagnostic": "BN6PR12MB1283:",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR12MB128372BA4450D034DFE2D769A1A69@BN6PR12MB1283.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:2331;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n zRoAhBcXSI56fNZvg2rz/DhORslM5Q8Zw4Ml2/YIqeM9Zo/77SMNfUfje3InNQNuHFx8HgSmlFhf0zk9a1umkkgWlqmxnVEm+T4jbBo3H5L767Aytb6628+/zSyiF4/I+OtVTYMK+dHq9OHbHyxoy+YHPSro2Kk6Fj72fMHIBCMhdxQ0W/SB1ZYeQa0Ek1/PbHs/ilkCG1pckAQ6Hk4CWAjFp+I/4hztTXCiK3J6ARih7e1see+xB1fAS3GakcYb6lMRHxMD4Ce581PwLd4kwDKUzEgIGfM5bn2VHiWGt8bNpS68LZRIDDbkmGXfQQtWI7d6au8wUlFZ1R2hv02M/oX03+WTuQpx6Tk+v0TA1LuefAlt4K2zRvq+8APz9SWDir80ykLINIWkevuRtD4ekmdW+BXVvQBgabqIf8yP4d0N9zDNXcOwhlssqqBAq2aI2kyHpcvJooBXZlj4vtUagThFa933G4LWDShAWRy2PxoxVt99yrgvLKEgixN523d/BiIv3M0lsbPX+o9isoZBbIyx58EYnl27p/rB/prVb9LnKKDGKjfEFqOUY0RjHyXeWObnZO+GsR4JvrIRO7XcL4uJbSP6Sw6oGyiKH2cZDHX1s7WwuauFFw7QFjPFDCyoFKNMilQWyoeF9YK0FLJA4CwSPoCLnLDdspZiNBX+PASJpr5JtRT4pC4VBkifhhmJP62D7H2et/QcDrMyi3AuDw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.36; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid05.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(16526019)(6916009)(30864003)(356005)(36906005)(316002)(54906003)(336012)(2906002)(4326008)(2616005)(36756003)(6666004)(426003)(1076003)(8936002)(8676002)(55016002)(70586007)(6286002)(186003)(508600001)(86362001)(36860700001)(7636003)(5660300002)(47076005)(83380400001)(70206006)(7696005)(82310400003)(26005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "26 Sep 2021 11:19:30.1799 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 67e9d1d7-0654-446e-3532-08d980df825c",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.36];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT027.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR12MB1283",
        "Subject": "[dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adds DevX supports of PRM shared receive memory pool(RMP) object.\nRMP is used to support shared Rx queue. Multiple RQ could share same\nRMP. Memory buffers are supplied to RMP.\n\nThis patch makes RMP RQ optional, created only if mlx5_devx_rq.rmp\nis set.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_devx.c | 310 +++++++++++++++++++++----\n drivers/common/mlx5/mlx5_common_devx.h |  19 +-\n drivers/common/mlx5/mlx5_devx_cmds.c   |  52 +++++\n drivers/common/mlx5/mlx5_devx_cmds.h   |  16 ++\n drivers/common/mlx5/mlx5_prm.h         |  85 ++++++-\n drivers/common/mlx5/version.map        |   1 +\n drivers/net/mlx5/mlx5_devx.c           |   2 +-\n 7 files changed, 434 insertions(+), 51 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_devx.c b/drivers/common/mlx5/mlx5_common_devx.c\nindex 22c8d356c45..cd6f13a66b6 100644\n--- a/drivers/common/mlx5/mlx5_common_devx.c\n+++ b/drivers/common/mlx5/mlx5_common_devx.c\n@@ -271,6 +271,39 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,\n \treturn -rte_errno;\n }\n \n+/**\n+ * Destroy DevX Receive Queue resources.\n+ *\n+ * @param[in] rq_res\n+ *   DevX RQ resource to destroy.\n+ */\n+static void\n+mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res)\n+{\n+\tif (rq_res->umem_obj)\n+\t\tclaim_zero(mlx5_os_umem_dereg(rq_res->umem_obj));\n+\tif (rq_res->umem_buf)\n+\t\tmlx5_free((void *)(uintptr_t)rq_res->umem_buf);\n+\tmemset(rq_res, 0, sizeof(*rq_res));\n+}\n+\n+/**\n+ * Destroy DevX Receive Memory Pool.\n+ *\n+ * @param[in] rmp\n+ *   DevX RMP to destroy.\n+ */\n+static void\n+mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp)\n+{\n+\tMLX5_ASSERT(rmp->ref_cnt == 0);\n+\tif (rmp->rmp) {\n+\t\tclaim_zero(mlx5_devx_cmd_destroy(rmp->rmp));\n+\t\trmp->rmp = NULL;\n+\t}\n+\tmlx5_devx_wq_res_destroy(&rmp->wq);\n+}\n+\n /**\n  * Destroy DevX Receive Queue.\n  *\n@@ -280,55 +313,47 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,\n void\n mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq)\n {\n-\tif (rq->rq)\n+\tif (rq->rq) {\n \t\tclaim_zero(mlx5_devx_cmd_destroy(rq->rq));\n-\tif (rq->umem_obj)\n-\t\tclaim_zero(mlx5_os_umem_dereg(rq->umem_obj));\n-\tif (rq->umem_buf)\n-\t\tmlx5_free((void *)(uintptr_t)rq->umem_buf);\n+\t\trq->rq = NULL;\n+\t}\n+\tif (rq->rmp == NULL) {\n+\t\tmlx5_devx_wq_res_destroy(&rq->wq);\n+\t} else {\n+\t\tMLX5_ASSERT(rq->rmp->ref_cnt > 0);\n+\t\trq->rmp->ref_cnt--;\n+\t\tif (rq->rmp->ref_cnt == 0)\n+\t\t\tmlx5_devx_rmp_destroy(rq->rmp);\n+\t}\n+\trq->db_rec = 0;\n }\n \n /**\n- * Create Receive Queue using DevX API.\n- *\n- * Get a pointer to partially initialized attributes structure, and updates the\n- * following fields:\n- *   wq_umem_valid\n- *   wq_umem_id\n- *   wq_umem_offset\n- *   dbr_umem_valid\n- *   dbr_umem_id\n- *   dbr_addr\n- *   log_wq_pg_sz\n- * All other fields are updated by caller.\n+ * Create WQ resources using DevX API.\n  *\n  * @param[in] ctx\n  *   Context returned from mlx5 open_device() glue function.\n- * @param[in/out] rq_obj\n- *   Pointer to RQ to create.\n+ * @param[in/out] rq_rest\n+ *   Pointer to RQ resource to create.\n  * @param[in] wqe_size\n  *   Size of WQE structure.\n  * @param[in] log_wqbb_n\n  *   Log of number of WQBBs in queue.\n- * @param[in] attr\n- *   Pointer to RQ attributes structure.\n- * @param[in] socket\n- *   Socket to use for allocation.\n+ * @param[in] wq_attr\n+ *   Pointer to WQ attributes structure.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n-int\n-mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,\n-\t\t    uint16_t log_wqbb_n,\n-\t\t    struct mlx5_devx_create_rq_attr *attr, int socket)\n+static int\n+mlx5_devx_wq_res_create(void *ctx, struct mlx5_devx_wq_res *rq_res,\n+\t\t\tuint32_t wqe_size, uint16_t log_wqbb_n,\n+\t\t\tstruct mlx5_devx_wq_attr *wq_attr, int socket)\n {\n-\tstruct mlx5_devx_obj *rq = NULL;\n \tstruct mlx5dv_devx_umem *umem_obj = NULL;\n \tvoid *umem_buf = NULL;\n \tsize_t alignment = MLX5_WQE_BUF_ALIGNMENT;\n-\tuint32_t umem_size, umem_dbrec;\n-\tuint16_t rq_size = 1 << log_wqbb_n;\n+\tuint32_t umem_size;\n \tint ret;\n \n \tif (alignment == (size_t)-1) {\n@@ -337,8 +362,7 @@ mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,\n \t\treturn -rte_errno;\n \t}\n \t/* Allocate memory buffer for WQEs and doorbell record. */\n-\tumem_size = wqe_size * rq_size;\n-\tumem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n+\tumem_size = wqe_size * (1 << log_wqbb_n);\n \tumem_size += MLX5_DBR_SIZE;\n \tumem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,\n \t\t\t       alignment, socket);\n@@ -355,14 +379,58 @@ mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\t/* Fill attributes for RQ object creation. */\n-\tattr->wq_attr.wq_umem_valid = 1;\n-\tattr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);\n-\tattr->wq_attr.wq_umem_offset = 0;\n-\tattr->wq_attr.dbr_umem_valid = 1;\n-\tattr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;\n-\tattr->wq_attr.dbr_addr = umem_dbrec;\n-\tattr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;\n+\trq_res->umem_buf = umem_buf;\n+\trq_res->umem_obj = umem_obj;\n+\t/* Fill WQ attributes. */\n+\twq_attr->wq_umem_valid = 1;\n+\twq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);\n+\twq_attr->wq_umem_offset = 0;\n+\twq_attr->dbr_umem_valid = 1;\n+\twq_attr->dbr_umem_id = wq_attr->wq_umem_id;\n+\twq_attr->dbr_addr = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n+\twq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;\n+\treturn 0;\n+error:\n+\tret = rte_errno;\n+\tif (umem_obj)\n+\t\tclaim_zero(mlx5_os_umem_dereg(umem_obj));\n+\tif (umem_buf)\n+\t\tmlx5_free((void *)(uintptr_t)umem_buf);\n+\trte_errno = ret;\n+\treturn -rte_errno;\n+}\n+\n+/**\n+ * Create standalone Receive Queue using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   Context returned from mlx5 open_device() glue function.\n+ * @param[in/out] rq_obj\n+ *   Pointer to RQ to create.\n+ * @param[in] wqe_size\n+ *   Size of WQE structure.\n+ * @param[in] log_wqbb_n\n+ *   Log of number of WQBBs in queue.\n+ * @param[in] attr\n+ *   Pointer to RQ attributes structure.\n+ * @param[in] socket\n+ *   Socket to use for allocation.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj,\n+\t\t\tuint32_t wqe_size, uint16_t log_wqbb_n,\n+\t\t\tstruct mlx5_devx_create_rq_attr *attr, int socket)\n+{\n+\tstruct mlx5_devx_obj *rq = NULL;\n+\tint ret;\n+\n+\tret = mlx5_devx_wq_res_create(ctx, &rq_obj->wq, wqe_size, log_wqbb_n,\n+\t\t\t\t      &attr->wq_attr, socket);\n+\tif (ret != 0)\n+\t\treturn ret;\n \t/* Create receive queue object with DevX. */\n \trq = mlx5_devx_cmd_create_rq(ctx, attr, socket);\n \tif (!rq) {\n@@ -370,18 +438,166 @@ mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\trq_obj->umem_buf = umem_buf;\n-\trq_obj->umem_obj = umem_obj;\n \trq_obj->rq = rq;\n-\trq_obj->db_rec = RTE_PTR_ADD(rq_obj->umem_buf, umem_dbrec);\n \treturn 0;\n error:\n \tret = rte_errno;\n-\tif (umem_obj)\n-\t\tclaim_zero(mlx5_os_umem_dereg(umem_obj));\n-\tif (umem_buf)\n-\t\tmlx5_free((void *)(uintptr_t)umem_buf);\n+\tmlx5_devx_wq_res_destroy(&rq_obj->wq);\n \trte_errno = ret;\n \treturn -rte_errno;\n }\n \n+/**\n+ * Create Receive Memory Pool using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   Context returned from mlx5 open_device() glue function.\n+ * @param[in/out] rq_obj\n+ *   Pointer to RQ to create.\n+ * @param[in] wqe_size\n+ *   Size of WQE structure.\n+ * @param[in] log_wqbb_n\n+ *   Log of number of WQBBs in queue.\n+ * @param[in] attr\n+ *   Pointer to RQ attributes structure.\n+ * @param[in] socket\n+ *   Socket to use for allocation.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj,\n+\t\t     uint32_t wqe_size, uint16_t log_wqbb_n,\n+\t\t     struct mlx5_devx_wq_attr *wq_attr, int socket)\n+{\n+\tstruct mlx5_devx_create_rmp_attr rmp_attr = { 0 };\n+\tint ret;\n+\n+\trmp_attr.wq_attr = *wq_attr;\n+\tret = mlx5_devx_wq_res_create(ctx, &rmp_obj->wq, wqe_size, log_wqbb_n,\n+\t\t\t\t      &rmp_attr.wq_attr, socket);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\trmp_attr.state = MLX5_RMPC_STATE_RDY;\n+\trmp_attr.basic_cyclic_rcv_wqe =\n+\t\twq_attr->wq_type == MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ ?\n+\t\t0 : 1;\n+\t/* Create receive queue object with DevX. */\n+\trmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket);\n+\tif (rmp_obj->rmp == NULL) {\n+\t\tDRV_LOG(ERR, \"Can't create DevX RMP object.\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\treturn 0;\n+error:\n+\tret = rte_errno;\n+\tmlx5_devx_wq_res_destroy(&rmp_obj->wq);\n+\trte_errno = ret;\n+\treturn -rte_errno;\n+}\n+\n+/**\n+ * Create Shared Receive Queue based on RMP using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   Context returned from mlx5 open_device() glue function.\n+ * @param[in/out] rq_obj\n+ *   Pointer to RQ to create.\n+ * @param[in] wqe_size\n+ *   Size of WQE structure.\n+ * @param[in] log_wqbb_n\n+ *   Log of number of WQBBs in queue.\n+ * @param[in] attr\n+ *   Pointer to RQ attributes structure.\n+ * @param[in] socket\n+ *   Socket to use for allocation.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj,\n+\t\t\t   uint32_t wqe_size, uint16_t log_wqbb_n,\n+\t\t\t   struct mlx5_devx_create_rq_attr *attr, int socket)\n+{\n+\tstruct mlx5_devx_obj *rq = NULL;\n+\tint ret;\n+\n+\tret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n,\n+\t\t\t\t   &attr->wq_attr, socket);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\trq_obj->rmp->ref_cnt++;\n+\tattr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP;\n+\tattr->rmpn = rq_obj->rmp->rmp->id;\n+\tattr->flush_in_error_en = 0;\n+\tmemset(&attr->wq_attr, 0, sizeof(attr->wq_attr));\n+\t/* Create receive queue object with DevX. */\n+\trq = mlx5_devx_cmd_create_rq(ctx, attr, socket);\n+\tif (!rq) {\n+\t\tDRV_LOG(ERR, \"Can't create DevX RMP RQ object.\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trq_obj->rq = rq;\n+\treturn 0;\n+error:\n+\tret = rte_errno;\n+\tmlx5_devx_rq_destroy(rq_obj);\n+\trte_errno = ret;\n+\treturn -rte_errno;\n+}\n+\n+/**\n+ * Create Receive Queue using DevX API. Shared RQ is created only if rmp set.\n+ *\n+ * Get a pointer to partially initialized attributes structure, and updates the\n+ * following fields:\n+ *   wq_umem_valid\n+ *   wq_umem_id\n+ *   wq_umem_offset\n+ *   dbr_umem_valid\n+ *   dbr_umem_id\n+ *   dbr_addr\n+ *   log_wq_pg_sz\n+ * All other fields are updated by caller.\n+ *\n+ * @param[in] ctx\n+ *   Context returned from mlx5 open_device() glue function.\n+ * @param[in/out] rq_obj\n+ *   Pointer to RQ to create.\n+ * @param[in] wqe_size\n+ *   Size of WQE structure.\n+ * @param[in] log_wqbb_n\n+ *   Log of number of WQBBs in queue.\n+ * @param[in] attr\n+ *   Pointer to RQ attributes structure.\n+ * @param[in] socket\n+ *   Socket to use for allocation.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj,\n+\t\t    uint32_t wqe_size, uint16_t log_wqbb_n,\n+\t\t    struct mlx5_devx_create_rq_attr *attr, int socket)\n+{\n+\tuint32_t umem_size, umem_dbrec;\n+\tint ret;\n+\n+\tif (rq_obj->rmp == NULL)\n+\t\tret = mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size,\n+\t\t\t\t\t      log_wqbb_n, attr, socket);\n+\telse\n+\t\tret = mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size,\n+\t\t\t\t\t\t log_wqbb_n, attr, socket);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\tumem_size = wqe_size * (1 << log_wqbb_n);\n+\tumem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n+\trq_obj->db_rec = RTE_PTR_ADD(rq_obj->wq.umem_buf, umem_dbrec);\n+\treturn 0;\n+}\ndiff --git a/drivers/common/mlx5/mlx5_common_devx.h b/drivers/common/mlx5/mlx5_common_devx.h\nindex aad0184e5ac..328b6ce9324 100644\n--- a/drivers/common/mlx5/mlx5_common_devx.h\n+++ b/drivers/common/mlx5/mlx5_common_devx.h\n@@ -33,11 +33,26 @@ struct mlx5_devx_sq {\n \tvolatile uint32_t *db_rec; /* The SQ doorbell record. */\n };\n \n+/* DevX Receive Queue resource structure. */\n+struct mlx5_devx_wq_res {\n+\tvoid *umem_obj; /* The RQ umem object. */\n+\tvolatile void *umem_buf;\n+};\n+\n+/* DevX Receive Queue structure. */\n+struct mlx5_devx_rmp {\n+\tstruct mlx5_devx_obj *rmp; /* The RMP DevX object. */\n+\tuint32_t ref_cnt; /* Reference count. */\n+\tstruct mlx5_devx_wq_res wq;\n+};\n+\n /* DevX Receive Queue structure. */\n struct mlx5_devx_rq {\n \tstruct mlx5_devx_obj *rq; /* The RQ DevX object. */\n-\tvoid *umem_obj; /* The RQ umem object. */\n-\tvolatile void *umem_buf;\n+\tunion {\n+\t\tstruct mlx5_devx_rmp *rmp; /* Shared RQ RMP object. */\n+\t\tstruct mlx5_devx_wq_res wq; /* WQ resource of standalone RQ. */\n+\t};\n \tvolatile uint32_t *db_rec; /* The RQ doorbell record. */\n };\n \ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c\nindex 56407cc332f..120331e9c87 100644\n--- a/drivers/common/mlx5/mlx5_devx_cmds.c\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.c\n@@ -766,6 +766,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,\n \t\t\tMLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);\n \tattr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,\n \t\t\t\t\t    flow_counters_dump);\n+\tattr->log_max_rmp = MLX5_GET(cmd_hca_cap, hcattr, log_max_rmp);\n+\tattr->mem_rq_rmp = MLX5_GET(cmd_hca_cap, hcattr, mem_rq_rmp);\n \tattr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr,\n \t\t\t\t\t  log_max_rqt_size);\n \tattr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);\n@@ -1250,6 +1252,56 @@ mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n }\n \n /**\n+ * Create RMP using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   Context returned from mlx5 open_device() glue function.\n+ * @param [in] rmp_attr\n+ *   Pointer to create RMP attributes structure.\n+ * @param [in] socket\n+ *   CPU socket ID for allocations.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_rmp(void *ctx,\n+\t\t\t struct mlx5_devx_create_rmp_attr *rmp_attr,\n+\t\t\t int socket)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_rmp_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};\n+\tvoid *rmp_ctx, *wq_ctx;\n+\tstruct mlx5_devx_wq_attr *wq_attr;\n+\tstruct mlx5_devx_obj *rmp = NULL;\n+\n+\trmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rmp), 0, socket);\n+\tif (!rmp) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate RMP data\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);\n+\trmp_ctx = MLX5_ADDR_OF(create_rmp_in, in, ctx);\n+\tMLX5_SET(rmpc, rmp_ctx, state, rmp_attr->state);\n+\tMLX5_SET(rmpc, rmp_ctx, basic_cyclic_rcv_wqe,\n+\t\t rmp_attr->basic_cyclic_rcv_wqe);\n+\twq_ctx = MLX5_ADDR_OF(rmpc, rmp_ctx, wq);\n+\twq_attr = &rmp_attr->wq_attr;\n+\tdevx_cmd_fill_wq_data(wq_ctx, wq_attr);\n+\trmp->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,\n+\t\t\t\t\t      sizeof(out));\n+\tif (!rmp->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create RMP using DevX\");\n+\t\trte_errno = errno;\n+\t\tmlx5_free(rmp);\n+\t\treturn NULL;\n+\t}\n+\trmp->id = MLX5_GET(create_rmp_out, out, rmpn);\n+\treturn rmp;\n+}\n+\n+/*\n  * Create TIR using DevX API.\n  *\n  * @param[in] ctx\ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h\nindex e576e30f242..fa8ba89abe6 100644\n--- a/drivers/common/mlx5/mlx5_devx_cmds.h\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.h\n@@ -101,6 +101,8 @@ struct mlx5_hca_flow_attr {\n struct mlx5_hca_attr {\n \tuint32_t eswitch_manager:1;\n \tuint32_t flow_counters_dump:1;\n+\tuint32_t mem_rq_rmp:1;\n+\tuint32_t log_max_rmp:5;\n \tuint32_t log_max_rqt_size:5;\n \tuint32_t parse_graph_flex_node:1;\n \tuint8_t flow_counter_bulk_alloc_bitmap;\n@@ -245,6 +247,17 @@ struct mlx5_devx_modify_rq_attr {\n \tuint32_t lwm:16; /* Contained WQ lwm. */\n };\n \n+/* Create RMP attributes structure, used by create RMP operation. */\n+struct mlx5_devx_create_rmp_attr {\n+\tuint32_t rsvd0:8;\n+\tuint32_t state:4;\n+\tuint32_t rsvd1:20;\n+\tuint32_t basic_cyclic_rcv_wqe:1;\n+\tuint32_t rsvd4:31;\n+\tuint32_t rsvd8[10];\n+\tstruct mlx5_devx_wq_attr wq_attr;\n+};\n+\n struct mlx5_rx_hash_field_select {\n \tuint32_t l3_prot_type:1;\n \tuint32_t l4_prot_type:1;\n@@ -520,6 +533,9 @@ __rte_internal\n int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n \t\t\t    struct mlx5_devx_modify_rq_attr *rq_attr);\n __rte_internal\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_rmp(void *ctx,\n+\t\t\tstruct mlx5_devx_create_rmp_attr *rq_attr, int socket);\n+__rte_internal\n struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(void *ctx,\n \t\t\t\t\t   struct mlx5_devx_tir_attr *tir_attr);\n __rte_internal\ndiff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h\nindex 72af3710a8f..df0991ee402 100644\n--- a/drivers/common/mlx5/mlx5_prm.h\n+++ b/drivers/common/mlx5/mlx5_prm.h\n@@ -1061,6 +1061,10 @@ enum {\n \tMLX5_CMD_OP_CREATE_RQ = 0x908,\n \tMLX5_CMD_OP_MODIFY_RQ = 0x909,\n \tMLX5_CMD_OP_QUERY_RQ = 0x90b,\n+\tMLX5_CMD_OP_CREATE_RMP = 0x90c,\n+\tMLX5_CMD_OP_MODIFY_RMP = 0x90d,\n+\tMLX5_CMD_OP_DESTROY_RMP = 0x90e,\n+\tMLX5_CMD_OP_QUERY_RMP = 0x90f,\n \tMLX5_CMD_OP_CREATE_TIS = 0x912,\n \tMLX5_CMD_OP_QUERY_TIS = 0x915,\n \tMLX5_CMD_OP_CREATE_RQT = 0x916,\n@@ -1557,7 +1561,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {\n \tu8 reserved_at_378[0x3];\n \tu8 log_max_tis[0x5];\n \tu8 basic_cyclic_rcv_wqe[0x1];\n-\tu8 reserved_at_381[0x2];\n+\tu8 reserved_at_381[0x1];\n+\tu8 mem_rq_rmp[0x1];\n \tu8 log_max_rmp[0x5];\n \tu8 reserved_at_388[0x3];\n \tu8 log_max_rqt[0x5];\n@@ -2159,6 +2164,84 @@ struct mlx5_ifc_query_rq_in_bits {\n \tu8 reserved_at_60[0x20];\n };\n \n+enum {\n+\tMLX5_RMPC_STATE_RDY = 0x1,\n+\tMLX5_RMPC_STATE_ERR = 0x3,\n+};\n+\n+struct mlx5_ifc_rmpc_bits {\n+\tu8 reserved_at_0[0x8];\n+\tu8 state[0x4];\n+\tu8 reserved_at_c[0x14];\n+\tu8 basic_cyclic_rcv_wqe[0x1];\n+\tu8 reserved_at_21[0x1f];\n+\tu8 reserved_at_40[0x140];\n+\tstruct mlx5_ifc_wq_bits wq;\n+};\n+\n+struct mlx5_ifc_query_rmp_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_rmpc_bits rmp_context;\n+};\n+\n+struct mlx5_ifc_query_rmp_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0x8];\n+\tu8 rmpn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_modify_rmp_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+};\n+\n+struct mlx5_ifc_rmp_bitmask_bits {\n+\tu8 reserved_at_0[0x20];\n+\tu8 reserved_at_20[0x1f];\n+\tu8 lwm[0x1];\n+};\n+\n+struct mlx5_ifc_modify_rmp_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 rmp_state[0x4];\n+\tu8 reserved_at_44[0x4];\n+\tu8 rmpn[0x18];\n+\tu8 reserved_at_60[0x20];\n+\tstruct mlx5_ifc_rmp_bitmask_bits bitmask;\n+\tu8 reserved_at_c0[0x40];\n+\tstruct mlx5_ifc_rmpc_bits ctx;\n+};\n+\n+struct mlx5_ifc_create_rmp_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 rmpn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_rmp_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_rmpc_bits ctx;\n+};\n+\n struct mlx5_ifc_create_tis_out_bits {\n \tu8 status[0x8];\n \tu8 reserved_at_8[0x18];\ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex e5cb6b70604..40975078cc4 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -31,6 +31,7 @@ INTERNAL {\n \tmlx5_devx_cmd_create_geneve_tlv_option;\n \tmlx5_devx_cmd_create_import_kek_obj;\n \tmlx5_devx_cmd_create_qp;\n+\tmlx5_devx_cmd_create_rmp;\n \tmlx5_devx_cmd_create_rq;\n \tmlx5_devx_cmd_create_rqt;\n \tmlx5_devx_cmd_create_sq;\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 447d6bafb93..4d479c19e6c 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -514,7 +514,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \tret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret)\n \t\tgoto error;\n-\trxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf;\n+\trxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;\n \trxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec;\n \trxq_data->cq_arm_sn = 0;\n \trxq_data->cq_ci = 0;\n",
    "prefixes": [
        "02/11"
    ]
}