get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113056/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113056,
    "url": "http://patches.dpdk.org/api/patches/113056/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-16-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-16-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-16-lizh@nvidia.com",
    "date": "2022-06-18T08:48:05",
    "name": "[v3,15/15] vdpa/mlx5: prepare virtqueue resource creation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5918fb1aeb6ee74212735d5612344095bbd3191c",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-16-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113056/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113056/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3BB46A0032;\n\tSat, 18 Jun 2022 10:50:09 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6A1D442BA6;\n\tSat, 18 Jun 2022 10:49:16 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2063.outbound.protection.outlook.com [40.107.243.63])\n by mails.dpdk.org (Postfix) with ESMTP id 9CB1142B98\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:49:14 +0200 (CEST)",
            "from DS7PR05CA0038.namprd05.prod.outlook.com (2603:10b6:8:2f::23) by\n BN6PR12MB1844.namprd12.prod.outlook.com (2603:10b6:404:fc::23) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5353.13; Sat, 18 Jun 2022 08:49:09 +0000",
            "from DM6NAM11FT060.eop-nam11.prod.protection.outlook.com\n (2603:10b6:8:2f:cafe::df) by DS7PR05CA0038.outlook.office365.com\n (2603:10b6:8:2f::23) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5373.10 via Frontend\n Transport; Sat, 18 Jun 2022 08:49:09 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n DM6NAM11FT060.mail.protection.outlook.com (10.13.173.63) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:49:08 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:49:08 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:49:04 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=ll8uo1cXMsKP5dX/hByR7Ca2CRK3MCgg35l8mL4NCb5LOJtHmGTucgHTT+a+4TnHc6ZutXNZVqJCwymDDOl+8z0fn0RYEp9dQRqHQCojT9xSvJ0NrYGKYxpAt9NAzGd0RS0lU7kDKI6AVM3IL5beLyca9HAHhQ7WRjlKOOPsQQCf/OGRFIr4dZUmf/f35cY+3wsfCrtU0eTvKA5kay0PP/ZCXIaWboz4nwD7vjAsCpFkVHKCHoOlUoxsX7KXqt/KLcjFDm/P3MhoXN++altpHmtoVbHTtIOmXYwhOAG7fjP4yVkl4JYVUtUdRGUcAPA5wE/zBbGN8DdiTCdSPU5mLw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=tLYORpftxenQSpFjZHrlCP+iraxvBoiQVDqfNcHRCQ4=;\n b=WIbKZHogw1DSQGgzbBU0Tgna3lNjgmVDmWuTkDNSHednLaniOmisIH0imvhwVDMCvYY1BTUm2GRheZN+KejHTLa/NVM7jiXq8ZBYrVk9QxOr6oLXZkRkigJjICrWarzJOw/5pcCkxtQ4bZe+Bim5IMVOVG6RisglZlu2Ba3CfEtIiJLnl54EVkkQ2lSI0mTfwIWZdwNTLU8v65nXqlvpS8DkDGrewmf8uWtOj2Sq43PgFbXvB6Yv2w2doXzzsn4yItoeFcpX5pRnrt1MFnIPcM+3myqI7+B86gsn/8+3djg/nsqXIg80Vwkx2QYuub7a2eRae4CkaSbsReNpfJnwXg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=tLYORpftxenQSpFjZHrlCP+iraxvBoiQVDqfNcHRCQ4=;\n b=oNbKR4RvRgm6t4l2R0xH7neoDbGcL2S+Jj4OT+dJi3TUwM4eE4pmUmLI5Pji/Wc0Dr5opOWvh+xJCeCjqCYcPZUsUZzTKTNjOEGgIztvzd9L/IwTHHS1C1pmfYxWNfxgetLW1FIGnsghYvGgJHGeQAv6FYudy5rhsjWT6hMfsiYrNg+2Z3nl7JP2n489cA88fJSnfqYF7vEQJ+pk1pE6SUxw5khf89FE9vF3QnagVlBAMRfgvMkO2zkz9wYnuc7PEDEThGjabtMjTLT7AGisJSVkAw3xe9YXqqpTkyIx6DTVzn9b9RegcTTqL7P+C9F3UE/84uyMJNj97+8+BnskYQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH v3 15/15] vdpa/mlx5: prepare virtqueue resource creation",
        "Date": "Sat, 18 Jun 2022 11:48:05 +0300",
        "Message-ID": "<20220618084805.87315-16-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "bd3474c0-d045-4605-fb2e-08da510768ab",
        "X-MS-TrafficTypeDiagnostic": "BN6PR12MB1844:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR12MB18441C0B64E3A53F558BB29ABFAE9@BN6PR12MB1844.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n XuwZ3zbYLUyXj8q+/84ybj7U7XFDYYgpTsruHCtmwe4Ga22BHzziE6ofNi3M0smSmufBYpKWcOTw1P8/ok8OQPbWmiX329EygttmklUbFIYHZJyRpk7fyMRMFCOHnXhToP98X2Nqy2+wl+Kbd4wkUi/dXP3c/cy7UJTObpQj2aOkRrzlMlKqueDJ9T5YwUWyR5EehtzL23SvIx8kkZ+xq0DQAxMnRHPcEVhclFUVBA0NB0aGOhP/OSJ6K7QTkYogiWeInEaSOUmoOYREWq3rITdbJ/AeFRbWj1FRTQgW2x0pPsuKdIXOMDisCFCd84QhNyI+PWGB63exDcxH0Oo2CFxbFK+5WuOgTK4lEzMRcFbGWdZPvzQNGWnm5YlLkIKYcidh+uzGoPOMbznA8FKGSMtCMNoYHz43SABwEC6WIUtZ35yjqjSBZOPBZkKDl0D+GqUzPIhy2MAYuBWkyAf3FOkHqleLHuxbcyRCdflOVhqZtpmXC/sG/fEfRmXQQLITxccCQlXgVzI1wkihGDKXBY7wyi4XOGsyv6YZ6o5xe4nuABLW5dUpX5wWerQ5vnJJOVTtqWy1lHL6CVkQ/Ip0mAxObB5M4viECjFxnWxIlLgeNmQvoXmB4zF1zCsSHi1NCIgMKbYuLGTFLZmVqFmRTna+MaJuxiubSwHSK1/SgdCBiVDRWYeGcrTNqnwpYWmgRCEMzXNKXm3jLAU3LR0uxg==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(40470700004)(46966006)(36840700001)(86362001)(7696005)(82310400005)(1076003)(30864003)(6286002)(4326008)(70586007)(8676002)(316002)(6636002)(110136005)(40460700003)(70206006)(26005)(356005)(186003)(54906003)(81166007)(16526019)(5660300002)(498600001)(8936002)(83380400001)(36756003)(2616005)(36860700001)(47076005)(55016003)(336012)(426003)(2906002)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:49:08.7194 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n bd3474c0-d045-4605-fb2e-08da510768ab",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT060.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR12MB1844",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Split the virtqs virt-queue resource between\nthe configuration threads.\nAlso need pre-created virt-queue resource\nafter virtq destruction.\nThis accelerates the LM process and reduces its time by 30%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n doc/guides/rel_notes/release_22_07.rst |   1 +\n drivers/vdpa/mlx5/mlx5_vdpa.c          | 115 +++++++++++++++++++------\n drivers/vdpa/mlx5/mlx5_vdpa.h          |  12 ++-\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c  |  15 +++-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c    | 111 ++++++++++++++++++++----\n 5 files changed, 209 insertions(+), 45 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst\nindex 2056cd9ee7..e1a9796e5c 100644\n--- a/doc/guides/rel_notes/release_22_07.rst\n+++ b/doc/guides/rel_notes/release_22_07.rst\n@@ -178,6 +178,7 @@ New Features\n * **Updated Nvidia mlx5 vDPA driver.**\n \n   * Added new devargs ``queue_size`` and ``queues`` to allow prior creation of virtq resources.\n+  * Added new devarg ``max_conf_threads`` defines the number of multi-thread management to parallel the configurations.\n \n \n Removed Items\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex f006a9cd3f..c5d82872c7 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -275,23 +275,18 @@ mlx5_vdpa_wait_dev_close_tasks_done(struct mlx5_vdpa_priv *priv)\n }\n \n static int\n-mlx5_vdpa_dev_close(int vid)\n+_internal_mlx5_vdpa_dev_close(struct mlx5_vdpa_priv *priv,\n+\t\tbool release_resource)\n {\n-\tstruct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);\n-\tstruct mlx5_vdpa_priv *priv =\n-\t\tmlx5_vdpa_find_priv_resource_by_vdev(vdev);\n \tint ret = 0;\n+\tint vid = priv->vid;\n \n-\tif (priv == NULL) {\n-\t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n-\t\treturn -1;\n-\t}\n \tmlx5_vdpa_cqe_event_unset(priv);\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED) {\n \t\tret |= mlx5_vdpa_lm_log(priv);\n \t\tpriv->state = MLX5_VDPA_STATE_IN_PROGRESS;\n \t}\n-\tif (priv->use_c_thread) {\n+\tif (priv->use_c_thread && !release_resource) {\n \t\tif (priv->last_c_thrd_idx >=\n \t\t\t(conf_thread_mng.max_thrds - 1))\n \t\t\tpriv->last_c_thrd_idx = 0;\n@@ -315,7 +310,7 @@ mlx5_vdpa_dev_close(int vid)\n \tpthread_mutex_lock(&priv->steer_update_lock);\n \tmlx5_vdpa_steer_unset(priv);\n \tpthread_mutex_unlock(&priv->steer_update_lock);\n-\tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_virtqs_release(priv, release_resource);\n \tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n@@ -329,6 +324,24 @@ mlx5_vdpa_dev_close(int vid)\n \treturn ret;\n }\n \n+static int\n+mlx5_vdpa_dev_close(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);\n+\tstruct mlx5_vdpa_priv *priv;\n+\n+\tif (!vdev) {\n+\t\tDRV_LOG(ERR, \"Invalid vDPA device.\");\n+\t\treturn -1;\n+\t}\n+\tpriv = mlx5_vdpa_find_priv_resource_by_vdev(vdev);\n+\tif (priv == NULL) {\n+\t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n+\t\treturn -1;\n+\t}\n+\treturn _internal_mlx5_vdpa_dev_close(priv, false);\n+}\n+\n static int\n mlx5_vdpa_dev_config(int vid)\n {\n@@ -624,11 +637,33 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n \t\tpriv->queue_size);\n }\n \n+void\n+mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t max_queues, index;\n+\tstruct mlx5_vdpa_virtq *virtq;\n+\n+\tif (!priv->queues || !priv->queue_size)\n+\t\treturn;\n+\tmax_queues = ((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?\n+\t\t(priv->queues * 2) : (priv->caps.max_num_virtio_queues);\n+\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tmlx5_vdpa_steer_unset(priv);\n+\tfor (index = 0; index < max_queues; ++index) {\n+\t\tvirtq = &priv->virtqs[index];\n+\t\tif (virtq->virtq) {\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tmlx5_vdpa_virtq_unset(virtq);\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t}\n+\t}\n+}\n+\n static int\n mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tuint32_t max_queues;\n-\tuint32_t index;\n+\tuint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;\n+\tuint32_t max_queues, index, thrd_idx, data[1];\n \tstruct mlx5_vdpa_virtq *virtq;\n \n \tfor (index = 0; index < priv->caps.max_num_virtio_queues;\n@@ -640,25 +675,53 @@ mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn 0;\n \tmax_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?\n \t\t(priv->queues * 2) : (priv->caps.max_num_virtio_queues);\n-\tfor (index = 0; index < max_queues; ++index)\n-\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n-\t\t\tindex))\n+\tif (priv->use_c_thread) {\n+\t\tuint32_t main_task_idx[max_queues];\n+\n+\t\tfor (index = 0; index < max_queues; ++index) {\n+\t\t\tthrd_idx = index % (conf_thread_mng.max_thrds + 1);\n+\t\t\tif (!thrd_idx) {\n+\t\t\t\tmain_task_idx[task_num] = index;\n+\t\t\t\ttask_num++;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tthrd_idx = priv->last_c_thrd_idx + 1;\n+\t\t\tif (thrd_idx >= conf_thread_mng.max_thrds)\n+\t\t\t\tthrd_idx = 0;\n+\t\t\tpriv->last_c_thrd_idx = thrd_idx;\n+\t\t\tdata[0] = index;\n+\t\t\tif (mlx5_vdpa_task_add(priv, thrd_idx,\n+\t\t\t\tMLX5_VDPA_TASK_PREPARE_VIRTQ,\n+\t\t\t\t&remaining_cnt, &err_cnt,\n+\t\t\t\t(void **)&data, 1)) {\n+\t\t\t\tDRV_LOG(ERR, \"Fail to add \"\n+\t\t\t\t\"task prepare virtq (%d).\", index);\n+\t\t\t\tmain_task_idx[task_num] = index;\n+\t\t\t\ttask_num++;\n+\t\t\t}\n+\t\t}\n+\t\tfor (index = 0; index < task_num; ++index)\n+\t\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n+\t\t\t\tmain_task_idx[index]))\n+\t\t\t\tgoto error;\n+\t\tif (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,\n+\t\t\t&err_cnt, 2000)) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\"Failed to wait virt-queue prepare tasks ready.\");\n \t\t\tgoto error;\n+\t\t}\n+\t} else {\n+\t\tfor (index = 0; index < max_queues; ++index)\n+\t\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n+\t\t\t\tindex))\n+\t\t\t\tgoto error;\n+\t}\n \tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n \t\tif (mlx5_vdpa_steer_update(priv, true))\n \t\t\tgoto error;\n \treturn 0;\n error:\n-\tfor (index = 0; index < max_queues; ++index) {\n-\t\tvirtq = &priv->virtqs[index];\n-\t\tif (virtq->virtq) {\n-\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\t\tmlx5_vdpa_virtq_unset(virtq);\n-\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n-\t\t}\n-\t}\n-\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n-\t\tmlx5_vdpa_steer_unset(priv);\n+\tmlx5_vdpa_prepare_virtq_destroy(priv);\n \treturn -1;\n }\n \n@@ -860,7 +923,7 @@ static void\n mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n {\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED)\n-\t\tmlx5_vdpa_dev_close(priv->vid);\n+\t\t_internal_mlx5_vdpa_dev_close(priv, true);\n \tif (priv->use_c_thread)\n \t\tmlx5_vdpa_wait_dev_close_tasks_done(priv);\n \tmlx5_vdpa_release_dev_resources(priv);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex f353db62ac..dc4dfba5ed 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -85,6 +85,7 @@ enum mlx5_vdpa_task_type {\n \tMLX5_VDPA_TASK_SETUP_VIRTQ,\n \tMLX5_VDPA_TASK_STOP_VIRTQ,\n \tMLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,\n+\tMLX5_VDPA_TASK_PREPARE_VIRTQ,\n };\n \n /* Generic task information and size must be multiple of 4B. */\n@@ -128,6 +129,9 @@ struct mlx5_vdpa_virtq {\n \tuint32_t configured:1;\n \tuint32_t enable:1;\n \tuint32_t stopped:1;\n+\tuint32_t rx_csum:1;\n+\tuint32_t virtio_version_1_0:1;\n+\tuint32_t event_mode:3;\n \tuint32_t version;\n \tpthread_mutex_t virtq_lock;\n \tstruct mlx5_vdpa_priv *priv;\n@@ -355,8 +359,12 @@ void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);\n  *\n  * @param[in] priv\n  *   The vdpa driver private structure.\n+ * @param[in] release_resource\n+ *   The vdpa driver release resource without prepare resource.\n  */\n-void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);\n+void\n+mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,\n+\t\tbool release_resource);\n \n /**\n  * Cleanup cached resources of all virtqs.\n@@ -595,4 +603,6 @@ int\n mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);\n void\n mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);\n+void\n+mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nindex bb2279440b..6e6624e5a3 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -153,6 +153,7 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t\t__atomic_fetch_add(\n \t\t\t\t\ttask.err_cnt, 1, __ATOMIC_RELAXED);\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\tbreak;\n \t\tcase MLX5_VDPA_TASK_STOP_VIRTQ:\n@@ -193,7 +194,7 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\tpthread_mutex_lock(&priv->steer_update_lock);\n \t\t\tmlx5_vdpa_steer_unset(priv);\n \t\t\tpthread_mutex_unlock(&priv->steer_update_lock);\n-\t\t\tmlx5_vdpa_virtqs_release(priv);\n+\t\t\tmlx5_vdpa_virtqs_release(priv, false);\n \t\t\tmlx5_vdpa_drain_cq(priv);\n \t\t\tif (priv->lm_mr.addr)\n \t\t\t\tmlx5_os_wrapped_mkey_destroy(\n@@ -205,6 +206,18 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t\t&priv->dev_close_progress, 0,\n \t\t\t\t__ATOMIC_RELAXED);\n \t\t\tbreak;\n+\t\tcase MLX5_VDPA_TASK_PREPARE_VIRTQ:\n+\t\t\tret = mlx5_vdpa_virtq_single_resource_prepare(\n+\t\t\t\t\tpriv, task.idx);\n+\t\t\tif (ret) {\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to prepare virtq %d.\",\n+\t\t\t\ttask.idx);\n+\t\t\t\t__atomic_fetch_add(\n+\t\t\t\ttask.err_cnt, 1,\n+\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\t}\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"Invalid vdpa task type %d.\",\n \t\t\ttask.type);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 58466b3c0b..06a5c26947 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -116,18 +116,29 @@ mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv)\n \t}\n }\n \n+static void\n+mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq)\n+{\n+\t/* Clean pre-created resource in dev removal only */\n+\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n+\tvirtq->index = 0;\n+\tvirtq->virtq = NULL;\n+\tvirtq->configured = 0;\n+}\n+\n /* Release cached VQ resources. */\n void\n mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n {\n \tunsigned int i, j;\n \n+\tmlx5_vdpa_steer_unset(priv);\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n \n-\t\tif (virtq->index != i)\n-\t\t\tcontinue;\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\tif (virtq->virtq)\n+\t\t\tmlx5_vdpa_vq_destroy(virtq);\n \t\tfor (j = 0; j < RTE_DIM(virtq->umems); ++j) {\n \t\t\tif (virtq->umems[j].obj) {\n \t\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n@@ -157,29 +168,37 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\tif (ret)\n \t\t\tDRV_LOG(WARNING, \"Failed to stop virtq %d.\",\n \t\t\t\tvirtq->index);\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n-\t\tvirtq->index = 0;\n-\t\tvirtq->virtq = NULL;\n-\t\tvirtq->configured = 0;\n \t}\n+\tmlx5_vdpa_vq_destroy(virtq);\n \tvirtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;\n }\n \n void\n-mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n+mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,\n+\tbool release_resource)\n {\n \tstruct mlx5_vdpa_virtq *virtq;\n-\tint i;\n-\n-\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\tuint32_t i, max_virtq, valid_vq_num;\n+\n+\tvalid_vq_num = ((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?\n+\t\t(priv->queues * 2) : priv->caps.max_num_virtio_queues;\n+\tmax_virtq = (release_resource &&\n+\t\t(valid_vq_num) > priv->nr_virtqs) ?\n+\t\t(valid_vq_num) : priv->nr_virtqs;\n+\tfor (i = 0; i < max_virtq; i++) {\n \t\tvirtq = &priv->virtqs[i];\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tmlx5_vdpa_virtq_unset(virtq);\n-\t\tif (i < (priv->queues * 2))\n+\t\tvirtq->enable = 0;\n+\t\tif (!release_resource && i < valid_vq_num)\n \t\t\tmlx5_vdpa_virtq_single_resource_prepare(\n \t\t\t\t\tpriv, i);\n \t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n+\tif (!release_resource && priv->queues &&\n+\t\tmlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tif (mlx5_vdpa_steer_update(priv, true))\n+\t\t\tmlx5_vdpa_steer_unset(priv);\n \tpriv->features = 0;\n \tpriv->nr_virtqs = 0;\n }\n@@ -455,6 +474,9 @@ mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,\n \t\tvirtq->priv = priv;\n \t\tif (!virtq->virtq)\n \t\t\treturn true;\n+\t\tvirtq->rx_csum = attr.rx_csum;\n+\t\tvirtq->virtio_version_1_0 = attr.virtio_version_1_0;\n+\t\tvirtq->event_mode = attr.event_mode;\n \t}\n \treturn false;\n }\n@@ -538,6 +560,9 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)\n \t\tgoto error;\n \t}\n \tclaim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));\n+\tvirtq->rx_csum = attr.rx_csum;\n+\tvirtq->virtio_version_1_0 = attr.virtio_version_1_0;\n+\tvirtq->event_mode = attr.event_mode;\n \tvirtq->configured = 1;\n \trte_spinlock_lock(&priv->db_lock);\n \trte_write32(virtq->index, priv->virtq_db_addr);\n@@ -629,6 +654,31 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)\n \treturn 0;\n }\n \n+static bool\n+mlx5_vdpa_is_pre_created_vq_mismatch(struct mlx5_vdpa_priv *priv,\n+\t\tstruct mlx5_vdpa_virtq *virtq)\n+{\n+\tstruct rte_vhost_vring vq;\n+\tuint32_t event_mode;\n+\n+\tif (virtq->rx_csum !=\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)))\n+\t\treturn true;\n+\tif (virtq->virtio_version_1_0 !=\n+\t\t!!(priv->features & (1ULL << VIRTIO_F_VERSION_1)))\n+\t\treturn true;\n+\tif (rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq))\n+\t\treturn true;\n+\tif (vq.size != virtq->vq_size)\n+\t\treturn true;\n+\tevent_mode = vq.callfd != -1 || !(priv->caps.event_mode &\n+\t\t(1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?\n+\t\tMLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n+\tif (virtq->event_mode != event_mode)\n+\t\treturn true;\n+\treturn false;\n+}\n+\n int\n mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n {\n@@ -664,6 +714,15 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\tvirtq = &priv->virtqs[i];\n \t\t\tif (!virtq->enable)\n \t\t\t\tcontinue;\n+\t\t\tif (priv->queues && virtq->virtq) {\n+\t\t\t\tif (mlx5_vdpa_is_pre_created_vq_mismatch(priv, virtq)) {\n+\t\t\t\t\tmlx5_vdpa_prepare_virtq_destroy(priv);\n+\t\t\t\t\ti = 0;\n+\t\t\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\t\t\tif (!virtq->enable)\n+\t\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t}\n \t\t\tthrd_idx = i % (conf_thread_mng.max_thrds + 1);\n \t\t\tif (!thrd_idx) {\n \t\t\t\tmain_task_idx[task_num] = i;\n@@ -693,6 +752,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\t\tgoto error;\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t\tif (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,\n@@ -724,20 +784,32 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t} else {\n \t\tfor (i = 0; i < nr_vring; i++) {\n \t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tif (!virtq->enable)\n+\t\t\t\tcontinue;\n+\t\t\tif (priv->queues && virtq->virtq) {\n+\t\t\t\tif (mlx5_vdpa_is_pre_created_vq_mismatch(priv,\n+\t\t\t\t\tvirtq)) {\n+\t\t\t\t\tmlx5_vdpa_prepare_virtq_destroy(\n+\t\t\t\t\tpriv);\n+\t\t\t\t\ti = 0;\n+\t\t\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\t\t\tif (!virtq->enable)\n+\t\t\t\t\t\tcontinue;\n+\t\t\t\t}\n+\t\t\t}\n \t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\t\tif (virtq->enable) {\n-\t\t\t\tif (mlx5_vdpa_virtq_setup(priv, i, true)) {\n-\t\t\t\t\tpthread_mutex_unlock(\n+\t\t\tif (mlx5_vdpa_virtq_setup(priv, i, true)) {\n+\t\t\t\tpthread_mutex_unlock(\n \t\t\t\t\t\t&virtq->virtq_lock);\n-\t\t\t\t\tgoto error;\n-\t\t\t\t}\n+\t\t\t\tgoto error;\n \t\t\t}\n+\t\t\tvirtq->enable = 1;\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t}\n \treturn 0;\n error:\n-\tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_virtqs_release(priv, true);\n \treturn -1;\n }\n \n@@ -795,6 +867,11 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \t\t\t\t\t\"for virtq %d.\", index);\n \t\t}\n \t\tmlx5_vdpa_virtq_unset(virtq);\n+\t} else {\n+\t\tif (virtq->virtq &&\n+\t\t\tmlx5_vdpa_is_pre_created_vq_mismatch(priv, virtq))\n+\t\t\tDRV_LOG(WARNING,\n+\t\t\t\"Configuration mismatch dummy virtq %d.\", index);\n \t}\n \tif (enable) {\n \t\tret = mlx5_vdpa_virtq_setup(priv, index, true);\n",
    "prefixes": [
        "v3",
        "15/15"
    ]
}