get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112348/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112348,
    "url": "https://patches.dpdk.org/api/patches/112348/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-8-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-8-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-8-lizh@nvidia.com",
    "date": "2022-06-06T11:20:44",
    "name": "[v1,04/17] vdpa/mlx5: support pre create virtq resource",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "91495d58edde39b1c882fccb475f454fd7e3b4f0",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-8-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23341,
            "url": "https://patches.dpdk.org/api/series/23341/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=23341",
            "date": "2022-06-06T11:20:44",
            "name": null,
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/23341/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/112348/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/112348/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1D515A0543;\n\tMon,  6 Jun 2022 13:22:21 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A350142B73;\n\tMon,  6 Jun 2022 13:22:02 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2044.outbound.protection.outlook.com [40.107.236.44])\n by mails.dpdk.org (Postfix) with ESMTP id DC7C5415D7\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:22:01 +0200 (CEST)",
            "from BN8PR07CA0014.namprd07.prod.outlook.com (2603:10b6:408:ac::27)\n by DM6PR12MB4154.namprd12.prod.outlook.com (2603:10b6:5:21d::21) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.13; Mon, 6 Jun\n 2022 11:22:00 +0000",
            "from BN8NAM11FT019.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:ac:cafe::cf) by BN8PR07CA0014.outlook.office365.com\n (2603:10b6:408:ac::27) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.18 via Frontend\n Transport; Mon, 6 Jun 2022 11:22:00 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n BN8NAM11FT019.mail.protection.outlook.com (10.13.176.158) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:21:59 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:21:57 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:21:54 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=XlP9sch97hwPVTGRCmxDjtw5oqKkH1bpi/32Gva4Ie+C6CM6TasULMuqDV4v0W0jOxTnf3dAEY0atauEZHU6Hfc8cH8lz6mWcf2w/5ObcvFNNArRurpax2Ei5P6QJVEdkv0NqXYZiJsbx4sKDT78LL7kq2YAYKe1R/XlrkrV3BdLWqGV+3WfuAiv8DEoMARaxKrouhcP26AfHss0KuQfFN/15AyK5nr2QNYPAo76M5LyMqeV3yUEGoWrSgTolLaVLJ082r5HvLpfid9NtKG094eOc9xf7PYrPeFWdDxMUVxNBOyVwlTXBe+BNnLQ3Dq+hGU3M8WhPsj/Qem8PzPJ9w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=pWgUAQGnUueLXIS/QHxVt3prfe0vowEE1RTBi6rBqKY=;\n b=iOIwZ3l9RvA0MIr5DVLZhSPlQvK8X3im4pQv+hit9wInz3MX/iHC/omgqoytDM/zLEUWkytPyZabBDdZ+L5AK3UZ4Xf6OAzLgpTtIk+sOot5iV+Xlv/dL9RCAindN/m0ZEdfFVVwWJQ1v3LIEasdLCXDIVO0PvSQZl+CLcBozTWT1ZgsERgWlRdTshEhVWuJ9+shbMZAHBJG6CdUt4UfGXPfDB98Wj+hW3j4Cyyj8+sdamsvi2qLn/wdv6Xgtq28007y+WsfdCg4hLisSAE3Ty1DVP35hpI5KD79ZiteCECLpB+Bmwl7PwKTpyIyq5fEc53Q1PFimv3Iswg2OeVR9Q==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=pWgUAQGnUueLXIS/QHxVt3prfe0vowEE1RTBi6rBqKY=;\n b=YzgYj7iVK/Tr65uUcKxR3dw3rvcjOXGj6pkipMtEr+HiGGFQ7ZhELcaiwgv1MOapw0ChF5n6anqSwnWFPbShzET/hmw1JfzoMnuCIUj4siCheINEMiBSIStog37ZMWoKtSSgoTHEZKztjwF1jrtLWruRK9doUZgA2cqfichH3JdhXJ+2hERCd6KCUwxaBnbE+/5uirD0ah6LmgoQxPIgqecnkuY6hbnjgPW4A7D17DkiuORxdPJXI18L4C6czlBA6OyC3fY44Tis5eb2wTbnLsrdyD36+rxzQ+A8A+G+PZi5gBVFZeR4c8qpm0hbaUOJtepmNd5SsEmn8IzkTYmcYw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, Yajun Wu <yajunw@nvidia.com>",
        "Subject": "[PATCH v1 04/17] vdpa/mlx5: support pre create virtq resource",
        "Date": "Mon, 6 Jun 2022 14:20:44 +0300",
        "Message-ID": "<20220606112109.208873-8-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "233cd452-e0c4-4e96-8d90-08da47aec622",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB4154:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB4154B2D68346266C8458DCBDBFA29@DM6PR12MB4154.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n xQb+mGLFRFyoSu31E3YEdddfuF8yvDwYpMGUcnyV3+lpUDOhNd/xRjHc5jjVeDoem+Mt/UmgIN7Pkgzi6Vroxq0mMu7TKCzGb4h0ebs7JmJ/JwAmOQYVJc2UxOPun8c1hkKYBxDcDnzBXXEflWLUF/488UusIhJZYurhWQ2u+HJz/nw7akh9CutGXDV648lspSuMOeH/lzpWAa9Gb5IeB0cEh6JIkJObeFCwEMeK1n+0zuA25ddWgNXCM3xR0o0L9gMgLGdMEVpih8FxbyAt/qXmK6EyDsJHtC5SaMwN60gw67Du4Y9HbALoKVTmdWKMeRNX8pNBKZPwc2CZgL6snDqQ4Vb2TnfczwcAHoGFKZ7wudXguTNZ+m08EvUlmBs51+H87q2WyYioTP06/G84VV23qk0PGYWuKdvbKk2Wj5NuhI+hdjxhiKLj1h5vVuzSn5MLmQ5DFAa/H2lN2fEQYopVB1bxe6D54xFHMxo0w5eloiggdd8lAJS57IzCyrNfl9XgiGl84Mi4/qB/mXGKo2bW5lesrHCxWiswcocPxr5m/QLYT7VNtUOVsVL1RGpbzjRTbb+rHtkQs06ZqDGeyOKcyUWPro/l3hFxAvk/pIIuk7hM998FVq2DrzahN271GxX1MsOfhe7TDeOQpyccZhLbpOeufglITSzuTRYDLJgcoaTKTaV/weaye4Xm/U+N2kOCOBJnJiLG488eJHcL8A==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(40470700004)(46966006)(36840700001)(356005)(55016003)(26005)(6666004)(2906002)(6636002)(47076005)(40460700003)(81166007)(6286002)(54906003)(316002)(36756003)(86362001)(36860700001)(83380400001)(426003)(7696005)(2616005)(4326008)(110136005)(8676002)(107886003)(1076003)(70586007)(70206006)(186003)(5660300002)(16526019)(508600001)(336012)(8936002)(82310400005)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:21:59.8662 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 233cd452-e0c4-4e96-8d90-08da47aec622",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT019.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB4154",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Yajun Wu <yajunw@nvidia.com>\n\nThe motivation of this change is to reduce vDPA device queue creation\ntime by create some queue resource in vDPA device probe stage.\n\nIn VM live migration scenario, this can reduce 0.8ms for each queue\ncreation, thus reduce LM network downtime.\n\nTo create queue resource(umem/counter) in advance, we need to know\nvirtio queue depth and max number of queue VM will use.\n\nIntroduce two new devargs: queues(max queue pair number) and queue_size\n(queue depth). Two args must be both provided, if only one argument\nprovided, the argument will be ignored and no pre-creation.\n\nThe queues and queue_size must also be identical to vhost configuration\ndriver later receive. Otherwise either the pre-create resource is wasted\nor missing or the resource need destroy and recreate(in case queue_size\nmismatch).\n\nPre-create umem/counter will keep alive until vDPA device removal.\n\nSigned-off-by: Yajun Wu <yajunw@nvidia.com>\n---\n doc/guides/vdpadevs/mlx5.rst  | 14 +++++++\n drivers/vdpa/mlx5/mlx5_vdpa.c | 75 ++++++++++++++++++++++++++++++++++-\n drivers/vdpa/mlx5/mlx5_vdpa.h |  2 +\n 3 files changed, 89 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/doc/guides/vdpadevs/mlx5.rst b/doc/guides/vdpadevs/mlx5.rst\nindex 3ded142311..0ad77bf535 100644\n--- a/doc/guides/vdpadevs/mlx5.rst\n+++ b/doc/guides/vdpadevs/mlx5.rst\n@@ -101,6 +101,20 @@ for an additional list of options shared with other mlx5 drivers.\n \n   - 0, HW default.\n \n+- ``queue_size`` parameter [int]\n+\n+  - 1 - 1024, Virio Queue depth for pre-creating queue resource to speed up\n+    first time queue creation. Set it together with queues devarg.\n+\n+  - 0, default value, no pre-create virtq resource.\n+\n+- ``queues`` parameter [int]\n+\n+  - 1 - 128, Max number of virio queue pair(including 1 rx queue and 1 tx queue)\n+    for pre-create queue resource to speed up first time queue creation. Set it\n+    together with queue_size devarg.\n+\n+  - 0, default value, no pre-create virtq resource.\n \n Error handling\n ^^^^^^^^^^^^^^\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex ee71339b78..faf833ee2f 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -244,7 +244,9 @@ mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)\n static void\n mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)\n {\n-\tmlx5_vdpa_virtqs_cleanup(priv);\n+\t/* Clean pre-created resource in dev removal only. */\n+\tif (!priv->queues)\n+\t\tmlx5_vdpa_virtqs_cleanup(priv);\n \tmlx5_vdpa_mem_dereg(priv);\n }\n \n@@ -494,6 +496,12 @@ mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)\n \t\tpriv->hw_max_latency_us = (uint32_t)tmp;\n \t} else if (strcmp(key, \"hw_max_pending_comp\") == 0) {\n \t\tpriv->hw_max_pending_comp = (uint32_t)tmp;\n+\t} else if (strcmp(key, \"queue_size\") == 0) {\n+\t\tpriv->queue_size = (uint16_t)tmp;\n+\t} else if (strcmp(key, \"queues\") == 0) {\n+\t\tpriv->queues = (uint16_t)tmp;\n+\t} else {\n+\t\tDRV_LOG(WARNING, \"Invalid key %s.\", key);\n \t}\n \treturn 0;\n }\n@@ -524,9 +532,68 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n \tif (!priv->event_us &&\n \t    priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER)\n \t\tpriv->event_us = MLX5_VDPA_DEFAULT_TIMER_STEP_US;\n+\tif ((priv->queue_size && !priv->queues) ||\n+\t\t(!priv->queue_size && priv->queues)) {\n+\t\tpriv->queue_size = 0;\n+\t\tpriv->queues = 0;\n+\t\tDRV_LOG(WARNING, \"Please provide both queue_size and queues.\");\n+\t}\n \tDRV_LOG(DEBUG, \"event mode is %d.\", priv->event_mode);\n \tDRV_LOG(DEBUG, \"event_us is %u us.\", priv->event_us);\n \tDRV_LOG(DEBUG, \"no traffic max is %u.\", priv->no_traffic_max);\n+\tDRV_LOG(DEBUG, \"queues is %u, queue_size is %u.\", priv->queues,\n+\t\tpriv->queue_size);\n+}\n+\n+static int\n+mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t index;\n+\tuint32_t i;\n+\n+\tif (!priv->queues)\n+\t\treturn 0;\n+\tfor (index = 0; index < (priv->queues * 2); ++index) {\n+\t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n+\n+\t\tif (priv->caps.queue_counters_valid) {\n+\t\t\tif (!virtq->counters)\n+\t\t\t\tvirtq->counters =\n+\t\t\t\t\tmlx5_devx_cmd_create_virtio_q_counters\n+\t\t\t\t\t\t(priv->cdev->ctx);\n+\t\t\tif (!virtq->counters) {\n+\t\t\t\tDRV_LOG(ERR, \"Failed to create virtq couners for virtq\"\n+\t\t\t\t\t\" %d.\", index);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t\tfor (i = 0; i < RTE_DIM(virtq->umems); ++i) {\n+\t\t\tuint32_t size;\n+\t\t\tvoid *buf;\n+\t\t\tstruct mlx5dv_devx_umem *obj;\n+\n+\t\t\tsize = priv->caps.umems[i].a * priv->queue_size +\n+\t\t\t\t\tpriv->caps.umems[i].b;\n+\t\t\tbuf = rte_zmalloc(__func__, size, 4096);\n+\t\t\tif (buf == NULL) {\n+\t\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq\"\n+\t\t\t\t\t\t\" %u.\", i, index);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tobj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf,\n+\t\t\t\t\tsize, IBV_ACCESS_LOCAL_WRITE);\n+\t\t\tif (obj == NULL) {\n+\t\t\t\trte_free(buf);\n+\t\t\t\tDRV_LOG(ERR, \"Failed to register umem %d for virtq %u.\",\n+\t\t\t\t\t\ti, index);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tvirtq->umems[i].size = size;\n+\t\t\tvirtq->umems[i].buf = buf;\n+\t\t\tvirtq->umems[i].obj = obj;\n+\t\t}\n+\t}\n+\treturn 0;\n }\n \n static int\n@@ -604,6 +671,8 @@ mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)\n \t\treturn -rte_errno;\n \tif (mlx5_vdpa_event_qp_global_prepare(priv))\n \t\treturn -rte_errno;\n+\tif (mlx5_vdpa_virtq_resource_prepare(priv))\n+\t\treturn -rte_errno;\n \treturn 0;\n }\n \n@@ -638,6 +707,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \t\tpriv->num_lag_ports = 1;\n \tpthread_mutex_init(&priv->vq_config_lock, NULL);\n \tpriv->cdev = cdev;\n+\tmlx5_vdpa_config_get(mkvlist, priv);\n \tif (mlx5_vdpa_create_dev_resources(priv))\n \t\tgoto error;\n \tpriv->vdev = rte_vdpa_register_device(cdev->dev, &mlx5_vdpa_ops);\n@@ -646,7 +716,6 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \t\trte_errno = rte_errno ? rte_errno : EINVAL;\n \t\tgoto error;\n \t}\n-\tmlx5_vdpa_config_get(mkvlist, priv);\n \tSLIST_INIT(&priv->mr_list);\n \tpthread_mutex_lock(&priv_list_lock);\n \tTAILQ_INSERT_TAIL(&priv_list, priv, next);\n@@ -684,6 +753,8 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)\n {\n \tuint32_t i;\n \n+\tif (priv->queues)\n+\t\tmlx5_vdpa_virtqs_cleanup(priv);\n \tmlx5_vdpa_dev_cache_clean(priv);\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tif (!priv->virtqs[i].counters)\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex e7f3319f89..f6719a3c60 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -135,6 +135,8 @@ struct mlx5_vdpa_priv {\n \tuint8_t hw_latency_mode; /* Hardware CQ moderation mode. */\n \tuint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */\n \tuint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */\n+\tuint16_t queue_size; /* virtq depth for pre-creating virtq resource */\n+\tuint16_t queues; /* Max virtq pair for pre-creating virtq resource */\n \tstruct rte_vdpa_device *vdev; /* vDPA device. */\n \tstruct mlx5_common_device *cdev; /* Backend mlx5 device. */\n \tint vid; /* vhost device id. */\n",
    "prefixes": [
        "v1",
        "04/17"
    ]
}