get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112372/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112372,
    "url": "https://patches.dpdk.org/api/patches/112372/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-32-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-32-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-32-lizh@nvidia.com",
    "date": "2022-06-06T11:21:08",
    "name": "[16/16] vdpa/mlx5: prepare virtqueue resource creation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "18be5040fe76f336d280c088c52494a4c6c378ab",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-32-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23339,
            "url": "https://patches.dpdk.org/api/series/23339/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=23339",
            "date": "2022-06-06T11:20:37",
            "name": "Add vDPA multi-threads optiomization",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/23339/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/112372/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/112372/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D42E7A0543;\n\tMon,  6 Jun 2022 13:25:29 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 4BCB042B7B;\n\tMon,  6 Jun 2022 13:23:11 +0200 (CEST)",
            "from NAM04-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam04on2075.outbound.protection.outlook.com [40.107.102.75])\n by mails.dpdk.org (Postfix) with ESMTP id 3581D42B94\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:23:09 +0200 (CEST)",
            "from DM3PR03CA0017.namprd03.prod.outlook.com (2603:10b6:0:50::27) by\n CY4PR12MB1367.namprd12.prod.outlook.com (2603:10b6:903:3c::19) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5314.13; Mon, 6 Jun 2022 11:23:07 +0000",
            "from DM6NAM11FT035.eop-nam11.prod.protection.outlook.com\n (2603:10b6:0:50:cafe::9f) by DM3PR03CA0017.outlook.office365.com\n (2603:10b6:0:50::27) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.12 via Frontend\n Transport; Mon, 6 Jun 2022 11:23:07 +0000",
            "from mail.nvidia.com (12.22.5.238) by\n DM6NAM11FT035.mail.protection.outlook.com (10.13.172.100) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:23:07 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL105.nvidia.com\n (10.27.9.14) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:23:06 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:23:03 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=G0xwYYsr58nNJYi29cfHA2LZNf4Sg9Hpt3t8KIvInzkqW9AB5ecJgu11VN6kdvkN2BWZaKujgM0ZP8xT9vIyjsWGQvL7wz8Ky+Ft7j6AYyDrMcIMKnC/lF16BGPFlGJG7lGLEew8dI2eQqZQ2zz9pMAe3bRCjt/H74XkIUufFGc+UH18pru+eMILmRD1RLvy5avxTkgt/TQHJpMwEyQTjfk2oXVf3FmCzMG1GQI1A4BIl5IFxrx76hTaNwdcMos4wDfN7PvnoMPB0Z6yCEgyOY59f2hoGRmm7GBVfujizTlwCONZCA7NWfr0tMZg0XrnKIH93ddyrh8dOGwFap8wxw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=XsUnvxy0pfveSZZ6jgiaSH5PTTAq7dr+MbccXrR8wgI=;\n b=Fugu/mOH9kZ3SLTndq2eEHjnbIPkI09bkw5odtEiBpsTE8N7JQKFCQ8OY1F9lUYJKW2i1BrW+fhwDU3uaTaEkMKxMNIkR2rPELP72iP71ytxi7AywqpWLjmpGJarU2MPpr6Z7ITvadebacpQ3WBRtJKeoLEXiqzzgAdauhvkVw+owH6tdU5cgPoHF0dDqYkLzAku8enimEezyN0pKWKR46UR2Or+dIyCy+LpCGdcwDdZGfCoiiLG1A1UVPSmJ4bive9ZDUnfarHT+bKCtUlvRzVOHrtWAxZs6qt7Ki42Z/WvMiTsxZDjbMEZ9mtvw4VgzRfDQ06uLfZVb32qfvJYug==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.238) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=XsUnvxy0pfveSZZ6jgiaSH5PTTAq7dr+MbccXrR8wgI=;\n b=IaDAUuZ/pqg1l5gXgNnklc069IvsT1XWwU+gDDQsYo5hF5kzNgLwHklgEyjgXyPlG6I8EXcKy78TKVheEqxvOYPjWzIUIOkbGn1rcqDYBpMkEz2LwoczLFoTdEzs5RqHu5+RcugGBjf3KD8ikJEE4nENppJwyasFBVF1sunhda/tl5PiA26N5vmXe4Us/0e6U9XgW6nqXgqFnMiH9p56nXBlE34bL23FlH2xNjmkPjSEnXhABmrrMnpHOtsy7VELgtuEkXAHVb1Uu7E1OgVfqUwWrTL/Nzi4DdoVBGXi8jwhhD1BoZ+dFb4K8BofdPPqwoQG2mA2ttp2KVCaA1+gYg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.238)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.238 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.238; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>",
        "Subject": "[PATCH 16/16] vdpa/mlx5: prepare virtqueue resource creation",
        "Date": "Mon, 6 Jun 2022 14:21:08 +0300",
        "Message-ID": "<20220606112109.208873-32-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "b464f7bb-1da3-435d-7885-08da47aeee33",
        "X-MS-TrafficTypeDiagnostic": "CY4PR12MB1367:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR12MB1367BD68E98D321A0990F629BFA29@CY4PR12MB1367.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Ty1i3bMz1pDxT9MDTViWj2gIzZJV4YXmwqizbr+mAS+JGEovK0jc4zOwfpJIC2eZJvwrPyGRxufSO6hedDaNFKjRNfHG6rSuJPd1Sm8Pane+XdyKSy5UzQH8VCslZqeFlKLrDrbfj16LCrfGJbOuNFAATSgxXh22aO7KjlTcMuvZUgaD95WOSZu7TCm9bFB6T2SOy13IS7k7a6G62jfie2oKTL31VM4+5b31kg7qhQRpp8TwOB5bjonQGOuTVTaBHiWSMXeAyOAsEa1es9Sqp4ENBsoGASl/m9XazzegG4XvnsI/dFJqS/ZtdUhg5Jr+lvwfYCLkcu0Ke+Lc2d7jdcZefOoCNr5VZqpaSPlK9N/2N++96mp7xPgUjcwz1B2jvd9KN5LjuC8V54gDoQ9UmwItTQJe/vNba8Th2+r6vx5Rr0J5uEE/jHx3IXEBkoJONfXSEbxsTH9bA8P2WFTAcmU7yYSPVSS9HjNTTxjcXFsDyrpzIsMmpDGF1/7annB9DcqgmRx6jO892urfKJ8kVW3VyN8kgeqF+voHZSg8KVefsrVYprAhjW5Tje4PINmaatTcbYrN41csVJMmITRMyO+QIvGsjuw3ZMjQ9N55XYmSMNY4rnZV2GfnDWrylp8CInl5/84U3KtZDCuDehv5ZS9FjQlZ04g52xpfxM9myezgi7SjHn5XqtkUCC0/Fc8EINAHqwR7q3wCTXfU7sFvwg==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.238; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(40470700004)(46966006)(36840700001)(8676002)(8936002)(83380400001)(47076005)(426003)(54906003)(336012)(4326008)(110136005)(2906002)(7696005)(6636002)(356005)(70586007)(70206006)(36756003)(30864003)(1076003)(86362001)(316002)(508600001)(2616005)(81166007)(5660300002)(82310400005)(6286002)(36860700001)(107886003)(16526019)(40460700003)(186003)(26005)(55016003)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:23:07.1016 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b464f7bb-1da3-435d-7885-08da47aeee33",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.238];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT035.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR12MB1367",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Split the virtqs virt-queue resource between\nthe configuration threads.\nAlso need pre-created virt-queue resource\nafter virtq destruction.\nThis accelerates the LM process and reduces its time by 30%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c         | 115 ++++++++++++++++++++------\n drivers/vdpa/mlx5/mlx5_vdpa.h         |  12 ++-\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c |  15 +++-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   | 111 +++++++++++++++++++++----\n 4 files changed, 208 insertions(+), 45 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex f006a9cd3f..c5d82872c7 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -275,23 +275,18 @@ mlx5_vdpa_wait_dev_close_tasks_done(struct mlx5_vdpa_priv *priv)\n }\n \n static int\n-mlx5_vdpa_dev_close(int vid)\n+_internal_mlx5_vdpa_dev_close(struct mlx5_vdpa_priv *priv,\n+\t\tbool release_resource)\n {\n-\tstruct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);\n-\tstruct mlx5_vdpa_priv *priv =\n-\t\tmlx5_vdpa_find_priv_resource_by_vdev(vdev);\n \tint ret = 0;\n+\tint vid = priv->vid;\n \n-\tif (priv == NULL) {\n-\t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n-\t\treturn -1;\n-\t}\n \tmlx5_vdpa_cqe_event_unset(priv);\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED) {\n \t\tret |= mlx5_vdpa_lm_log(priv);\n \t\tpriv->state = MLX5_VDPA_STATE_IN_PROGRESS;\n \t}\n-\tif (priv->use_c_thread) {\n+\tif (priv->use_c_thread && !release_resource) {\n \t\tif (priv->last_c_thrd_idx >=\n \t\t\t(conf_thread_mng.max_thrds - 1))\n \t\t\tpriv->last_c_thrd_idx = 0;\n@@ -315,7 +310,7 @@ mlx5_vdpa_dev_close(int vid)\n \tpthread_mutex_lock(&priv->steer_update_lock);\n \tmlx5_vdpa_steer_unset(priv);\n \tpthread_mutex_unlock(&priv->steer_update_lock);\n-\tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_virtqs_release(priv, release_resource);\n \tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n@@ -329,6 +324,24 @@ mlx5_vdpa_dev_close(int vid)\n \treturn ret;\n }\n \n+static int\n+mlx5_vdpa_dev_close(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);\n+\tstruct mlx5_vdpa_priv *priv;\n+\n+\tif (!vdev) {\n+\t\tDRV_LOG(ERR, \"Invalid vDPA device.\");\n+\t\treturn -1;\n+\t}\n+\tpriv = mlx5_vdpa_find_priv_resource_by_vdev(vdev);\n+\tif (priv == NULL) {\n+\t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n+\t\treturn -1;\n+\t}\n+\treturn _internal_mlx5_vdpa_dev_close(priv, false);\n+}\n+\n static int\n mlx5_vdpa_dev_config(int vid)\n {\n@@ -624,11 +637,33 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n \t\tpriv->queue_size);\n }\n \n+void\n+mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t max_queues, index;\n+\tstruct mlx5_vdpa_virtq *virtq;\n+\n+\tif (!priv->queues || !priv->queue_size)\n+\t\treturn;\n+\tmax_queues = ((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?\n+\t\t(priv->queues * 2) : (priv->caps.max_num_virtio_queues);\n+\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tmlx5_vdpa_steer_unset(priv);\n+\tfor (index = 0; index < max_queues; ++index) {\n+\t\tvirtq = &priv->virtqs[index];\n+\t\tif (virtq->virtq) {\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tmlx5_vdpa_virtq_unset(virtq);\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t}\n+\t}\n+}\n+\n static int\n mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tuint32_t max_queues;\n-\tuint32_t index;\n+\tuint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;\n+\tuint32_t max_queues, index, thrd_idx, data[1];\n \tstruct mlx5_vdpa_virtq *virtq;\n \n \tfor (index = 0; index < priv->caps.max_num_virtio_queues;\n@@ -640,25 +675,53 @@ mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn 0;\n \tmax_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?\n \t\t(priv->queues * 2) : (priv->caps.max_num_virtio_queues);\n-\tfor (index = 0; index < max_queues; ++index)\n-\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n-\t\t\tindex))\n+\tif (priv->use_c_thread) {\n+\t\tuint32_t main_task_idx[max_queues];\n+\n+\t\tfor (index = 0; index < max_queues; ++index) {\n+\t\t\tthrd_idx = index % (conf_thread_mng.max_thrds + 1);\n+\t\t\tif (!thrd_idx) {\n+\t\t\t\tmain_task_idx[task_num] = index;\n+\t\t\t\ttask_num++;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tthrd_idx = priv->last_c_thrd_idx + 1;\n+\t\t\tif (thrd_idx >= conf_thread_mng.max_thrds)\n+\t\t\t\tthrd_idx = 0;\n+\t\t\tpriv->last_c_thrd_idx = thrd_idx;\n+\t\t\tdata[0] = index;\n+\t\t\tif (mlx5_vdpa_task_add(priv, thrd_idx,\n+\t\t\t\tMLX5_VDPA_TASK_PREPARE_VIRTQ,\n+\t\t\t\t&remaining_cnt, &err_cnt,\n+\t\t\t\t(void **)&data, 1)) {\n+\t\t\t\tDRV_LOG(ERR, \"Fail to add \"\n+\t\t\t\t\"task prepare virtq (%d).\", index);\n+\t\t\t\tmain_task_idx[task_num] = index;\n+\t\t\t\ttask_num++;\n+\t\t\t}\n+\t\t}\n+\t\tfor (index = 0; index < task_num; ++index)\n+\t\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n+\t\t\t\tmain_task_idx[index]))\n+\t\t\t\tgoto error;\n+\t\tif (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,\n+\t\t\t&err_cnt, 2000)) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\"Failed to wait virt-queue prepare tasks ready.\");\n \t\t\tgoto error;\n+\t\t}\n+\t} else {\n+\t\tfor (index = 0; index < max_queues; ++index)\n+\t\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n+\t\t\t\tindex))\n+\t\t\t\tgoto error;\n+\t}\n \tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n \t\tif (mlx5_vdpa_steer_update(priv, true))\n \t\t\tgoto error;\n \treturn 0;\n error:\n-\tfor (index = 0; index < max_queues; ++index) {\n-\t\tvirtq = &priv->virtqs[index];\n-\t\tif (virtq->virtq) {\n-\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\t\tmlx5_vdpa_virtq_unset(virtq);\n-\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n-\t\t}\n-\t}\n-\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n-\t\tmlx5_vdpa_steer_unset(priv);\n+\tmlx5_vdpa_prepare_virtq_destroy(priv);\n \treturn -1;\n }\n \n@@ -860,7 +923,7 @@ static void\n mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n {\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED)\n-\t\tmlx5_vdpa_dev_close(priv->vid);\n+\t\t_internal_mlx5_vdpa_dev_close(priv, true);\n \tif (priv->use_c_thread)\n \t\tmlx5_vdpa_wait_dev_close_tasks_done(priv);\n \tmlx5_vdpa_release_dev_resources(priv);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex f353db62ac..dc4dfba5ed 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -85,6 +85,7 @@ enum mlx5_vdpa_task_type {\n \tMLX5_VDPA_TASK_SETUP_VIRTQ,\n \tMLX5_VDPA_TASK_STOP_VIRTQ,\n \tMLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,\n+\tMLX5_VDPA_TASK_PREPARE_VIRTQ,\n };\n \n /* Generic task information and size must be multiple of 4B. */\n@@ -128,6 +129,9 @@ struct mlx5_vdpa_virtq {\n \tuint32_t configured:1;\n \tuint32_t enable:1;\n \tuint32_t stopped:1;\n+\tuint32_t rx_csum:1;\n+\tuint32_t virtio_version_1_0:1;\n+\tuint32_t event_mode:3;\n \tuint32_t version;\n \tpthread_mutex_t virtq_lock;\n \tstruct mlx5_vdpa_priv *priv;\n@@ -355,8 +359,12 @@ void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);\n  *\n  * @param[in] priv\n  *   The vdpa driver private structure.\n+ * @param[in] release_resource\n+ *   The vdpa driver release resource without prepare resource.\n  */\n-void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);\n+void\n+mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,\n+\t\tbool release_resource);\n \n /**\n  * Cleanup cached resources of all virtqs.\n@@ -595,4 +603,6 @@ int\n mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);\n void\n mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);\n+void\n+mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nindex bb2279440b..6e6624e5a3 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -153,6 +153,7 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t\t__atomic_fetch_add(\n \t\t\t\t\ttask.err_cnt, 1, __ATOMIC_RELAXED);\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\tbreak;\n \t\tcase MLX5_VDPA_TASK_STOP_VIRTQ:\n@@ -193,7 +194,7 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\tpthread_mutex_lock(&priv->steer_update_lock);\n \t\t\tmlx5_vdpa_steer_unset(priv);\n \t\t\tpthread_mutex_unlock(&priv->steer_update_lock);\n-\t\t\tmlx5_vdpa_virtqs_release(priv);\n+\t\t\tmlx5_vdpa_virtqs_release(priv, false);\n \t\t\tmlx5_vdpa_drain_cq(priv);\n \t\t\tif (priv->lm_mr.addr)\n \t\t\t\tmlx5_os_wrapped_mkey_destroy(\n@@ -205,6 +206,18 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t\t&priv->dev_close_progress, 0,\n \t\t\t\t__ATOMIC_RELAXED);\n \t\t\tbreak;\n+\t\tcase MLX5_VDPA_TASK_PREPARE_VIRTQ:\n+\t\t\tret = mlx5_vdpa_virtq_single_resource_prepare(\n+\t\t\t\t\tpriv, task.idx);\n+\t\t\tif (ret) {\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to prepare virtq %d.\",\n+\t\t\t\ttask.idx);\n+\t\t\t\t__atomic_fetch_add(\n+\t\t\t\ttask.err_cnt, 1,\n+\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t}\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"Invalid vdpa task type %d.\",\n \t\t\ttask.type);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 20ce382487..d4dd73f861 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -116,18 +116,29 @@ mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv)\n \t}\n }\n \n+static void\n+mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq)\n+{\n+\t/* Clean pre-created resource in dev removal only */\n+\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n+\tvirtq->index = 0;\n+\tvirtq->virtq = NULL;\n+\tvirtq->configured = 0;\n+}\n+\n /* Release cached VQ resources. */\n void\n mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n {\n \tunsigned int i, j;\n \n+\tmlx5_vdpa_steer_unset(priv);\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n \n-\t\tif (virtq->index != i)\n-\t\t\tcontinue;\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\tif (virtq->virtq)\n+\t\t\tmlx5_vdpa_vq_destroy(virtq);\n \t\tfor (j = 0; j < RTE_DIM(virtq->umems); ++j) {\n \t\t\tif (virtq->umems[j].obj) {\n \t\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n@@ -157,29 +168,37 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\tif (ret)\n \t\t\tDRV_LOG(WARNING, \"Failed to stop virtq %d.\",\n \t\t\t\tvirtq->index);\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n-\t\tvirtq->index = 0;\n-\t\tvirtq->virtq = NULL;\n-\t\tvirtq->configured = 0;\n \t}\n+\tmlx5_vdpa_vq_destroy(virtq);\n \tvirtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;\n }\n \n void\n-mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n+mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,\n+\tbool release_resource)\n {\n \tstruct mlx5_vdpa_virtq *virtq;\n-\tint i;\n-\n-\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\tuint32_t i, max_virtq, valid_vq_num;\n+\n+\tvalid_vq_num = ((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?\n+\t\t(priv->queues * 2) : priv->caps.max_num_virtio_queues;\n+\tmax_virtq = (release_resource &&\n+\t\t(valid_vq_num) > priv->nr_virtqs) ?\n+\t\t(valid_vq_num) : priv->nr_virtqs;\n+\tfor (i = 0; i < max_virtq; i++) {\n \t\tvirtq = &priv->virtqs[i];\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tmlx5_vdpa_virtq_unset(virtq);\n-\t\tif (i < (priv->queues * 2))\n+\t\tvirtq->enable = 0;\n+\t\tif (!release_resource && i < valid_vq_num)\n \t\t\tmlx5_vdpa_virtq_single_resource_prepare(\n \t\t\t\t\tpriv, i);\n \t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n+\tif (!release_resource && priv->queues &&\n+\t\tmlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tif (mlx5_vdpa_steer_update(priv, true))\n+\t\t\tmlx5_vdpa_steer_unset(priv);\n \tpriv->features = 0;\n \tpriv->nr_virtqs = 0;\n }\n@@ -455,6 +474,9 @@ mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,\n \t\tvirtq->priv = priv;\n \t\tif (!virtq->virtq)\n \t\t\treturn true;\n+\t\tvirtq->rx_csum = attr.rx_csum;\n+\t\tvirtq->virtio_version_1_0 = attr.virtio_version_1_0;\n+\t\tvirtq->event_mode = attr.event_mode;\n \t}\n \treturn false;\n }\n@@ -538,6 +560,9 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)\n \t\tgoto error;\n \t}\n \tclaim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));\n+\tvirtq->rx_csum = attr.rx_csum;\n+\tvirtq->virtio_version_1_0 = attr.virtio_version_1_0;\n+\tvirtq->event_mode = attr.event_mode;\n \tvirtq->configured = 1;\n \trte_spinlock_lock(&priv->db_lock);\n \trte_write32(virtq->index, priv->virtq_db_addr);\n@@ -629,6 +654,31 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)\n \treturn 0;\n }\n \n+static bool\n+mlx5_vdpa_is_pre_created_vq_mismatch(struct mlx5_vdpa_priv *priv,\n+\t\tstruct mlx5_vdpa_virtq *virtq)\n+{\n+\tstruct rte_vhost_vring vq;\n+\tuint32_t event_mode;\n+\n+\tif (virtq->rx_csum !=\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)))\n+\t\treturn true;\n+\tif (virtq->virtio_version_1_0 !=\n+\t\t!!(priv->features & (1ULL << VIRTIO_F_VERSION_1)))\n+\t\treturn true;\n+\tif (rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq))\n+\t\treturn true;\n+\tif (vq.size != virtq->vq_size)\n+\t\treturn true;\n+\tevent_mode = vq.callfd != -1 || !(priv->caps.event_mode &\n+\t\t(1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?\n+\t\tMLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n+\tif (virtq->event_mode != event_mode)\n+\t\treturn true;\n+\treturn false;\n+}\n+\n int\n mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n {\n@@ -664,6 +714,15 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\tvirtq = &priv->virtqs[i];\n \t\t\tif (!virtq->enable)\n \t\t\t\tcontinue;\n+\t\t\tif (priv->queues && virtq->virtq) {\n+\t\t\t\tif (mlx5_vdpa_is_pre_created_vq_mismatch(priv, virtq)) {\n+\t\t\t\t\tmlx5_vdpa_prepare_virtq_destroy(priv);\n+\t\t\t\t\ti = 0;\n+\t\t\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\t\t\tif (!virtq->enable)\n+\t\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t}\n \t\t\tthrd_idx = i % (conf_thread_mng.max_thrds + 1);\n \t\t\tif (!thrd_idx) {\n \t\t\t\tmain_task_idx[task_num] = i;\n@@ -693,6 +752,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\t\tgoto error;\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t\tif (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,\n@@ -724,20 +784,32 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t} else {\n \t\tfor (i = 0; i < nr_vring; i++) {\n \t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tif (!virtq->enable)\n+\t\t\t\tcontinue;\n+\t\t\tif (priv->queues && virtq->virtq) {\n+\t\t\t\tif (mlx5_vdpa_is_pre_created_vq_mismatch(priv,\n+\t\t\t\t\tvirtq)) {\n+\t\t\t\t\tmlx5_vdpa_prepare_virtq_destroy(\n+\t\t\t\t\tpriv);\n+\t\t\t\t\ti = 0;\n+\t\t\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\t\t\tif (!virtq->enable)\n+\t\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t}\n \t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\t\tif (virtq->enable) {\n-\t\t\t\tif (mlx5_vdpa_virtq_setup(priv, i, true)) {\n-\t\t\t\t\tpthread_mutex_unlock(\n+\t\t\tif (mlx5_vdpa_virtq_setup(priv, i, true)) {\n+\t\t\t\tpthread_mutex_unlock(\n \t\t\t\t\t\t&virtq->virtq_lock);\n-\t\t\t\t\tgoto error;\n-\t\t\t\t}\n+\t\t\t\tgoto error;\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t}\n \treturn 0;\n error:\n-\tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_virtqs_release(priv, true);\n \treturn -1;\n }\n \n@@ -795,6 +867,11 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \t\t\t\t\t\"for virtq %d.\", index);\n \t\t}\n \t\tmlx5_vdpa_virtq_unset(virtq);\n+\t} else {\n+\t\tif (virtq->virtq &&\n+\t\t\tmlx5_vdpa_is_pre_created_vq_mismatch(priv, virtq))\n+\t\t\tDRV_LOG(WARNING,\n+\t\t\t\"Configuration mismatch dummy virtq %d.\", index);\n \t}\n \tif (enable) {\n \t\tret = mlx5_vdpa_virtq_setup(priv, index, true);\n",
    "prefixes": [
        "16/16"
    ]
}