get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113054/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113054,
    "url": "http://patches.dpdk.org/api/patches/113054/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-14-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-14-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-14-lizh@nvidia.com",
    "date": "2022-06-18T08:48:03",
    "name": "[v3,13/15] vdpa/mlx5: add device close task",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c9680d5822b5126ec517bc40beba454a3ec60148",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-14-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113054/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113054/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D9674A0032;\n\tSat, 18 Jun 2022 10:49:55 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 01B3442B9D;\n\tSat, 18 Jun 2022 10:49:07 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2056.outbound.protection.outlook.com [40.107.92.56])\n by mails.dpdk.org (Postfix) with ESMTP id CF5BC42B98\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:49:05 +0200 (CEST)",
            "from BN9PR03CA0624.namprd03.prod.outlook.com (2603:10b6:408:106::29)\n by BN6PR1201MB2496.namprd12.prod.outlook.com (2603:10b6:404:a7::21)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.14; Sat, 18 Jun\n 2022 08:49:03 +0000",
            "from BN8NAM11FT056.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:106:cafe::e4) by BN9PR03CA0624.outlook.office365.com\n (2603:10b6:408:106::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.17 via Frontend\n Transport; Sat, 18 Jun 2022 08:49:03 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n BN8NAM11FT056.mail.protection.outlook.com (10.13.177.26) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:49:02 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:49:01 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:48:58 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=P826FOt9NwizHP72Nl4Tzr7zwB0TZ0XL0mWdLV3umBtuBNY6+V4DLqobgWQQF9nSrF6fXokSH9bDKBWX4vyWjKoCSFA4OKU+8Od/wPEeijIZ/iN3INWiMUDNjyCEytEGzDK0s2i3ysjtBTwnDP34hBcDM0RswHUwIAiQ+1jck6Sq7XbhD3u6WftXhVGUf5hFN0k96eoY8Gb/K4jjh/vOAuhybn6m26CIj9s4qr5n0wL63lretEi347EVyRtF/OIf/XCMiSrSdZaIjoNXHAoFCIjX8ij5O6uW52OjE6buzJ/fqoZ25S5uKLxXeDp2ruYlxHsds+tZjThcuFFGUsRByg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=Nz+wRkVDge4DU6PjzTuXMBY+lP6M1+PsyhD3LK3zrMk=;\n b=V0z3rFBN/pY5EekLPlvxHVEt6yT8Hg+PK1Dhta9dvXJmn6hPKDevSCEd9CfeRSd4kCROK+oO6hpkjuKBRB9M7zJuAAEUy4Fxb1Vcd1vN8JG+1QCfEcF6wfwgcTysDKH6r6XQ1zoflI5tQcWRIZaVFAD6EIcbuVkJ7qDiJK/uY0Rv//wTVAYkCkuFIWUoBZmPeuKgr8KajGfBajtoY1u2uod9kI+CSCZVvJtaAxW4Z0TbKTfyhqsRqwP4ZteilAIrJD0oCWpRk6EExfeVFY4/KdWRzzYuaatrYhozNPElkkPllvaebMcTedPe2WO7LJv3Y2sNX+4lnIN6/Am+1SIygQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=Nz+wRkVDge4DU6PjzTuXMBY+lP6M1+PsyhD3LK3zrMk=;\n b=QhB46OSW4O7uzaX9jS5oNyB9k1uTQkFWUlH97+tmln8vBXfmO8wtrJxyEuMpBADxevwe2oddqkQf1mRk1iiIQg1g+q4knc2wSx+QOYj6qVykxuzq58m8DGA57pyx5jNA1c3ExPdGR358zuBXcVZrMrMlo8dyYq0lEfKDY1Fz+hlFD3cGC5oWyGbQksuYAOo/FQzVx54aSAuMYCjgRS0YyevqUZQxEGXFTUJUpB7YMe26hCsQMyJh3ymgQzViBnIMqBLg3aGeE08TkidpJJvYBFijzl3I/m+qJwqWyTcOA17sbRIiFhmnrV3kGixpvr4td207M8phwFBkHdkd0eF4ZA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH v3 13/15] vdpa/mlx5: add device close task",
        "Date": "Sat, 18 Jun 2022 11:48:03 +0300",
        "Message-ID": "<20220618084805.87315-14-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "1411d55c-666b-4ca0-596e-08da5107651b",
        "X-MS-TrafficTypeDiagnostic": "BN6PR1201MB2496:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR1201MB2496116C9F37CBEE6712AE9BBFAE9@BN6PR1201MB2496.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n nDsk5WplATj1gC+HVTR/ll+B2EwMlQQE4eUHyRh+QRxe5GF6BayB1qGAtan241NHAcHRfGtJQadqFvBX/FJUzx+FjkRRCdnAzNyxBNdy4d0+xLJ0tqT98/T5IqvMOT3MThdrYCcCDI8avXYhYSAiIY6ReY0ZA0qOcTDmQXjrSm6c5QaQ1oCkaz/T5B10FWMPvW7tRFCMtSXoGW6B3O+06iF0EIbAdWDXMVCbFz9UOrrcOv+aXOwJFG31LjnEVBt25J9LE0MXFhu65ysGQfcAPtevVx9Zd5e2678+Z+w0cWSoIeMMW3izKczrJgBhDNnLFzOE56qqinGXxnY48gYesOXuvgdNQwwdc1mwQrL5AbTL0mBmxgkwzmgJwVhLagdXJxw1ASH7PQTlObQ9pHS2Hld1e4Jssj9uUtkINybBVR9pIMxqHVzYiP+k+gV1tUd18p/top3XnmyttzMMsDOrhAAhM/es8GoPYM1JcCvr5ylgwir+w/teUy3OYmP9670U9mg1z5De4dRtyuepPseLWCzvjMEYPkHd/CmTnh2YKKPjNpGfwkAhCuAO8rmSfGx6DAwL+304SvmZrkX/dRbRiLtHRT/CYWboDo+VHO0KhgAhsuyeyWQXjvAWUIbqBUpeUwoxGVMD+1Vjd2RYk0eC/gYrlTgYq6PpaHgulxJlwKmISY8lVAUOHP/7LJY//GjRqhLatIw9sKyMr22pVzCh/g==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(46966006)(40470700004)(36840700001)(82310400005)(36860700001)(36756003)(40460700003)(86362001)(316002)(356005)(81166007)(498600001)(5660300002)(2906002)(70586007)(6636002)(8676002)(8936002)(110136005)(4326008)(7696005)(186003)(47076005)(16526019)(55016003)(426003)(336012)(83380400001)(54906003)(1076003)(2616005)(6286002)(70206006)(26005)(6666004)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:49:02.7539 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 1411d55c-666b-4ca0-596e-08da5107651b",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT056.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR1201MB2496",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Split the virtqs device close tasks after\nstopping virt-queue between the configuration threads.\nThis accelerates the LM process and\nreduces its time by 50%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c         | 56 +++++++++++++++++++++++++--\n drivers/vdpa/mlx5/mlx5_vdpa.h         |  8 ++++\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 20 +++++++++-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   | 14 +++++++\n 4 files changed, 94 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex e3b32fa087..d000854c08 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -245,7 +245,7 @@ mlx5_vdpa_mtu_set(struct mlx5_vdpa_priv *priv)\n \treturn kern_mtu == vhost_mtu ? 0 : -1;\n }\n \n-static void\n+void\n mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)\n {\n \t/* Clean pre-created resource in dev removal only. */\n@@ -254,6 +254,26 @@ mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv)\n \tmlx5_vdpa_mem_dereg(priv);\n }\n \n+static bool\n+mlx5_vdpa_wait_dev_close_tasks_done(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t timeout = 0;\n+\n+\t/* Check and wait all close tasks done. */\n+\twhile (__atomic_load_n(&priv->dev_close_progress,\n+\t\t__ATOMIC_RELAXED) != 0 && timeout < 1000) {\n+\t\trte_delay_us_sleep(10000);\n+\t\ttimeout++;\n+\t}\n+\tif (priv->dev_close_progress) {\n+\t\tDRV_LOG(ERR,\n+\t\t\"Failed to wait close device tasks done vid %d.\",\n+\t\tpriv->vid);\n+\t\treturn true;\n+\t}\n+\treturn false;\n+}\n+\n static int\n mlx5_vdpa_dev_close(int vid)\n {\n@@ -271,6 +291,27 @@ mlx5_vdpa_dev_close(int vid)\n \t\tret |= mlx5_vdpa_lm_log(priv);\n \t\tpriv->state = MLX5_VDPA_STATE_IN_PROGRESS;\n \t}\n+\tif (priv->use_c_thread) {\n+\t\tif (priv->last_c_thrd_idx >=\n+\t\t\t(conf_thread_mng.max_thrds - 1))\n+\t\t\tpriv->last_c_thrd_idx = 0;\n+\t\telse\n+\t\t\tpriv->last_c_thrd_idx++;\n+\t\t__atomic_store_n(&priv->dev_close_progress,\n+\t\t\t1, __ATOMIC_RELAXED);\n+\t\tif (mlx5_vdpa_task_add(priv,\n+\t\t\tpriv->last_c_thrd_idx,\n+\t\t\tMLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,\n+\t\t\tNULL, NULL, NULL, 1)) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\"Fail to add dev close task. \");\n+\t\t\tgoto single_thrd;\n+\t\t}\n+\t\tpriv->state = MLX5_VDPA_STATE_PROBED;\n+\t\tDRV_LOG(INFO, \"vDPA device %d was closed.\", vid);\n+\t\treturn ret;\n+\t}\n+single_thrd:\n \tpthread_mutex_lock(&priv->steer_update_lock);\n \tmlx5_vdpa_steer_unset(priv);\n \tpthread_mutex_unlock(&priv->steer_update_lock);\n@@ -278,10 +319,12 @@ mlx5_vdpa_dev_close(int vid)\n \tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n-\tpriv->state = MLX5_VDPA_STATE_PROBED;\n \tif (!priv->connected)\n \t\tmlx5_vdpa_dev_cache_clean(priv);\n \tpriv->vid = 0;\n+\t__atomic_store_n(&priv->dev_close_progress, 0,\n+\t\t__ATOMIC_RELAXED);\n+\tpriv->state = MLX5_VDPA_STATE_PROBED;\n \tDRV_LOG(INFO, \"vDPA device %d was closed.\", vid);\n \treturn ret;\n }\n@@ -302,6 +345,8 @@ mlx5_vdpa_dev_config(int vid)\n \t\tDRV_LOG(ERR, \"Failed to reconfigure vid %d.\", vid);\n \t\treturn -1;\n \t}\n+\tif (mlx5_vdpa_wait_dev_close_tasks_done(priv))\n+\t\treturn -1;\n \tpriv->vid = vid;\n \tpriv->connected = true;\n \tif (mlx5_vdpa_mtu_set(priv))\n@@ -444,8 +489,11 @@ mlx5_vdpa_dev_cleanup(int vid)\n \t\tDRV_LOG(ERR, \"Invalid vDPA device: %s.\", vdev->device->name);\n \t\treturn -1;\n \t}\n-\tif (priv->state == MLX5_VDPA_STATE_PROBED)\n+\tif (priv->state == MLX5_VDPA_STATE_PROBED) {\n+\t\tif (priv->use_c_thread)\n+\t\t\tmlx5_vdpa_wait_dev_close_tasks_done(priv);\n \t\tmlx5_vdpa_dev_cache_clean(priv);\n+\t}\n \tpriv->connected = false;\n \treturn 0;\n }\n@@ -839,6 +887,8 @@ mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n {\n \tif (priv->state == MLX5_VDPA_STATE_CONFIGURED)\n \t\tmlx5_vdpa_dev_close(priv->vid);\n+\tif (priv->use_c_thread)\n+\t\tmlx5_vdpa_wait_dev_close_tasks_done(priv);\n \tmlx5_vdpa_release_dev_resources(priv);\n \tif (priv->vdev)\n \t\trte_vdpa_unregister_device(priv->vdev);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex e08931719f..b6392b9d66 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -84,6 +84,7 @@ enum mlx5_vdpa_task_type {\n \tMLX5_VDPA_TASK_REG_MR = 1,\n \tMLX5_VDPA_TASK_SETUP_VIRTQ,\n \tMLX5_VDPA_TASK_STOP_VIRTQ,\n+\tMLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,\n };\n \n /* Generic task information and size must be multiple of 4B. */\n@@ -206,6 +207,7 @@ struct mlx5_vdpa_priv {\n \tuint64_t features; /* Negotiated features. */\n \tuint16_t log_max_rqt_size;\n \tuint16_t last_c_thrd_idx;\n+\tuint16_t dev_close_progress;\n \tuint16_t num_mrs; /* Number of memory regions. */\n \tstruct mlx5_vdpa_steer steer;\n \tstruct mlx5dv_var *var;\n@@ -578,4 +580,10 @@ mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,\n \t\tuint32_t *err_cnt, uint32_t sleep_time);\n int\n mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);\n+void\n+mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq);\n+void\n+mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);\n+void\n+mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nindex 98369f0887..bb2279440b 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -63,7 +63,8 @@ mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,\n \t\ttask[i].type = task_type;\n \t\ttask[i].remaining_cnt = remaining_cnt;\n \t\ttask[i].err_cnt = err_cnt;\n-\t\ttask[i].idx = data[i];\n+\t\tif (data)\n+\t\t\ttask[i].idx = data[i];\n \t}\n \tif (!mlx5_vdpa_c_thrd_ring_enqueue_bulk(rng, (void **)&task, num, NULL))\n \t\treturn -1;\n@@ -187,6 +188,23 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t    MLX5_VDPA_USED_RING_LEN(virtq->vq_size));\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\tbreak;\n+\t\tcase MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT:\n+\t\t\tmlx5_vdpa_virtq_unreg_intr_handle_all(priv);\n+\t\t\tpthread_mutex_lock(&priv->steer_update_lock);\n+\t\t\tmlx5_vdpa_steer_unset(priv);\n+\t\t\tpthread_mutex_unlock(&priv->steer_update_lock);\n+\t\t\tmlx5_vdpa_virtqs_release(priv);\n+\t\t\tmlx5_vdpa_drain_cq(priv);\n+\t\t\tif (priv->lm_mr.addr)\n+\t\t\t\tmlx5_os_wrapped_mkey_destroy(\n+\t\t\t\t\t&priv->lm_mr);\n+\t\t\tif (!priv->connected)\n+\t\t\t\tmlx5_vdpa_dev_cache_clean(priv);\n+\t\t\tpriv->vid = 0;\n+\t\t\t__atomic_store_n(\n+\t\t\t\t&priv->dev_close_progress, 0,\n+\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"Invalid vdpa task type %d.\",\n \t\t\ttask.type);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 50d59a8394..79d48a6569 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -102,6 +102,20 @@ mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)\n \tvirtq->intr_handle = NULL;\n }\n \n+void\n+mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv)\n+{\n+\tuint32_t i;\n+\tstruct mlx5_vdpa_virtq *virtq;\n+\n+\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\tmlx5_vdpa_virtq_unregister_intr_handle(virtq);\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t}\n+}\n+\n /* Release cached VQ resources. */\n void\n mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n",
    "prefixes": [
        "v3",
        "13/15"
    ]
}