get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112359/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112359,
    "url": "http://patches.dpdk.org/api/patches/112359/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-18-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-18-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-18-lizh@nvidia.com",
    "date": "2022-06-06T11:20:54",
    "name": "[v1,09/17] vdpa/mlx5: optimize datapath-control synchronization",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "0767396c1a1063657fcb04435f56851e52c95b55",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-18-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23347,
            "url": "http://patches.dpdk.org/api/series/23347/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23347",
            "date": "2022-06-06T11:20:54",
            "name": null,
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/23347/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/112359/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/112359/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 02163A0543;\n\tMon,  6 Jun 2022 13:23:54 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DD6DB42BBF;\n\tMon,  6 Jun 2022 13:22:31 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2058.outbound.protection.outlook.com [40.107.236.58])\n by mails.dpdk.org (Postfix) with ESMTP id 2D8F842BB4\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:22:30 +0200 (CEST)",
            "from BN1PR13CA0014.namprd13.prod.outlook.com (2603:10b6:408:e2::19)\n by BN6PR12MB1588.namprd12.prod.outlook.com (2603:10b6:405:7::23) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.12; Mon, 6 Jun\n 2022 11:22:28 +0000",
            "from BN8NAM11FT004.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:e2:cafe::7e) by BN1PR13CA0014.outlook.office365.com\n (2603:10b6:408:e2::19) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5332.10 via Frontend\n Transport; Mon, 6 Jun 2022 11:22:28 +0000",
            "from mail.nvidia.com (12.22.5.234) by\n BN8NAM11FT004.mail.protection.outlook.com (10.13.176.164) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:22:27 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL101.nvidia.com\n (10.27.9.10) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:22:26 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:22:23 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=XuWeUlfy+5JRhGJbid5qWcspORYzc3MFCfXrtBYKp2IqQkKMbLgpZfhO5eLjozdlErDlQ5FTfeZpuivP7MYhv356eF9G80gZaQII6P7CjfPcaX8OZUM2khIR/krloGPMabUJiH9muqjX4USj2becijN2geJlztJuSI1ODFfxA+WDXr1hBemx/393rx9NWnqV/sjA9BkD3aPmGDYRc0Hs0Rnz15RAU0EuuUsGaUYQFtRIOHmvGyMUtfEv+sxXQu+cmpYVXSzxuc1BVu/fyBwF9bjlNsvB0JiFA5BVwtNuGhjYM9FrTqpEZxNNO++1b/sNBtcBzTJuF9sUwSKd94JE+g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=isW+ckUr9Ps+iVzLqK0DbzwxpE7EafRBgztliaixwJE=;\n b=cABS1v6TJLAh+XHLLcnEfmF/EHA6ri2CYw2eZeESL+YA0ylHhivIApjIWBTA81vTGD9fN/IGMktcVQoxvg3bb1/1zYbOuzgI6Mdt3AOvGtELeSO1BO4Fm5IIU7hXsSqshfiJWjXcZi4MrFRmWTqw0RLpUHISg85qUdOxrNOIY3PGtl5rboRhLmXdLDSvviDI5d2MC0mHafRCrZ/E8gfWC3HARJQUE0YAXobOUc+WpIr5woUa1nL5G9Wj6TbnvtY4qEY9AuGKTwU99wSUQW7C0CXllYW2cTByZElYb6+awZOkCoNkAMIvxc4vMiMjCneqAz28aGH6Rf/VQBvuzEoYyw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.234) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=isW+ckUr9Ps+iVzLqK0DbzwxpE7EafRBgztliaixwJE=;\n b=N+is8wuD1A4oiIiYJ92Ewp+lDppk66KGUzpaC0CVVFbW/bgxTVqbihm8uTYsT5OUY+vg2aLnzmFjjaokeThJDErBvYwI2ZSwte9Xyu6Zfx5wILQ9QGFVN+F7wuW/CYll2AWaF7VP8sYJ+8QzMuA/6+lorqPMum6nGNmhadDkLWNkYDjWHN9nDIXcXB5nYqK8TjUOzO1AitQPL5DG6ICH93NIRVDDSR2A70NA3C+NwiGevdM9VAavoJsuRopR7Yk6ho5EbowduJPo0+xbOatONQ/qDAWc/wOVE3JbBcToBPWPGlmCqK/Y3pMj/Z24TtJgRBoUuM8EeHjI0ipDnkWBVQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.234)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.234 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.234; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>",
        "Subject": "[PATCH v1 09/17] vdpa/mlx5: optimize datapath-control synchronization",
        "Date": "Mon, 6 Jun 2022 14:20:54 +0300",
        "Message-ID": "<20220606112109.208873-18-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e4bc48e2-9615-4083-d4f1-08da47aed6c0",
        "X-MS-TrafficTypeDiagnostic": "BN6PR12MB1588:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BN6PR12MB1588A19A21E2B0C45BF78EEDBFA29@BN6PR12MB1588.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Fm5JCLPV+dfgL4/XK9St2hFt5L60FU5KiFsHuz1e7oxN57D0/PQL/sehomqphx4r3ZX/QZ7YledLzHKn0vcchSfvsxhwgE19+CBDqFBECHNXMJ528NjbOIcP9oVi2ZxjJ8Ni620hXaQ91PXtt1hxxMAL8HaKrjd8GQx4lBF9TC8FLefPgObpFmsTnQlPAyA5powffJZdztcursvrN/0jwYjRbLwhdVvN1foYwqgK/M/0nRpRUQRFPwhvtPOC50unFNyhabGcq25mst9seqB06YRo28ZCf7qZiJfwpT62dUCAIInPKQ0Qyi1SZpI3lzBhblT2ApnqO9CIqOenF2GxJFpNx7pJDQwU3sbpomzF+f9sAvmtXiQPi/esnU0uZ8MrdRzUGBrl7F/TdlckLFbWjrY9nJY5ThiC1rkxdXEYuazILlqge250f2wc5+OXD3CjXDwEiC8dCUvR2W+RZaYY6qsX0Ff2iR9GEqGfMMFckMY4jD9JTpkRVH6kePhfFju/pvmZGsLQUkUhLhjhMeNp4DosYIurJoMz8Ai7HnMkKn0DICc7PQAJ0fWmpyBEfQni8cZlMT0n05dPQ+azbgAu2dSKk8vIMp9XvOwq4CoUKPXLg7A7jBknuBPxwYCmo792lzqi8UVjKnkYt2jFDcq7p3mk7A23ULdvx38uWH9RGL1UCL/4Go0XqtVRRcm8+5uLOG3jKGnJImJNKpF5fU1dSg==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.234; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(40470700004)(36840700001)(46966006)(2616005)(316002)(1076003)(8936002)(6636002)(36860700001)(107886003)(336012)(47076005)(26005)(426003)(55016003)(6286002)(40460700003)(186003)(86362001)(110136005)(83380400001)(7696005)(16526019)(36756003)(8676002)(70206006)(81166007)(54906003)(4326008)(5660300002)(508600001)(30864003)(356005)(2906002)(70586007)(82310400005)(6666004)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:22:27.7002 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e4bc48e2-9615-4083-d4f1-08da47aed6c0",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.234];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT004.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN6PR12MB1588",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The driver used a single global lock for any synchronization\nneeded for the datapath and control path.\nIt is better to group the critical sections with\nthe other ones that should be synchronized.\n\nReplace the global lock with the following locks:\n\n1.virtq locks(per virtq) synchronize datapath polling and\n  parallel configurations on the same virtq.\n2.A doorbell lock synchronizes doorbell update,\n  which is shared for all the virtqs in the device.\n3.A steering lock for the shared steering objects updates.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       | 24 ++++---\n drivers/vdpa/mlx5/mlx5_vdpa.h       | 13 ++--\n drivers/vdpa/mlx5/mlx5_vdpa_event.c | 97 ++++++++++++++++++-----------\n drivers/vdpa/mlx5/mlx5_vdpa_lm.c    | 34 +++++++---\n drivers/vdpa/mlx5/mlx5_vdpa_steer.c |  7 ++-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 88 +++++++++++++++++++-------\n 6 files changed, 184 insertions(+), 79 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex ee99952e11..e5a11f72fd 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -135,6 +135,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)\n \tstruct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);\n \tstruct mlx5_vdpa_priv *priv =\n \t\tmlx5_vdpa_find_priv_resource_by_vdev(vdev);\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tint ret;\n \n \tif (priv == NULL) {\n@@ -145,9 +146,10 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)\n \t\tDRV_LOG(ERR, \"Too big vring id: %d.\", vring);\n \t\treturn -E2BIG;\n \t}\n-\tpthread_mutex_lock(&priv->vq_config_lock);\n+\tvirtq = &priv->virtqs[vring];\n+\tpthread_mutex_lock(&virtq->virtq_lock);\n \tret = mlx5_vdpa_virtq_enable(priv, vring, state);\n-\tpthread_mutex_unlock(&priv->vq_config_lock);\n+\tpthread_mutex_unlock(&virtq->virtq_lock);\n \treturn ret;\n }\n \n@@ -267,7 +269,9 @@ mlx5_vdpa_dev_close(int vid)\n \t\tret |= mlx5_vdpa_lm_log(priv);\n \t\tpriv->state = MLX5_VDPA_STATE_IN_PROGRESS;\n \t}\n+\tpthread_mutex_lock(&priv->steer_update_lock);\n \tmlx5_vdpa_steer_unset(priv);\n+\tpthread_mutex_unlock(&priv->steer_update_lock);\n \tmlx5_vdpa_virtqs_release(priv);\n \tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n@@ -276,8 +280,6 @@ mlx5_vdpa_dev_close(int vid)\n \tif (!priv->connected)\n \t\tmlx5_vdpa_dev_cache_clean(priv);\n \tpriv->vid = 0;\n-\t/* The mutex may stay locked after event thread cancel - initiate it. */\n-\tpthread_mutex_init(&priv->vq_config_lock, NULL);\n \tDRV_LOG(INFO, \"vDPA device %d was closed.\", vid);\n \treturn ret;\n }\n@@ -549,15 +551,21 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n static int\n mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n {\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tuint32_t index;\n \tuint32_t i;\n \n+\tfor (index = 0; index < priv->caps.max_num_virtio_queues * 2;\n+\t\tindex++) {\n+\t\tvirtq = &priv->virtqs[index];\n+\t\tpthread_mutex_init(&virtq->virtq_lock, NULL);\n+\t}\n \tif (!priv->queues)\n \t\treturn 0;\n \tfor (index = 0; index < (priv->queues * 2); ++index) {\n-\t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n+\t\tvirtq = &priv->virtqs[index];\n \t\tint ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,\n-\t\t\t\t\t-1, &virtq->eqp);\n+\t\t\t\t\t-1, virtq);\n \n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n@@ -713,7 +721,8 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \tpriv->num_lag_ports = attr->num_lag_ports;\n \tif (attr->num_lag_ports == 0)\n \t\tpriv->num_lag_ports = 1;\n-\tpthread_mutex_init(&priv->vq_config_lock, NULL);\n+\trte_spinlock_init(&priv->db_lock);\n+\tpthread_mutex_init(&priv->steer_update_lock, NULL);\n \tpriv->cdev = cdev;\n \tmlx5_vdpa_config_get(mkvlist, priv);\n \tif (mlx5_vdpa_create_dev_resources(priv))\n@@ -797,7 +806,6 @@ mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n \tmlx5_vdpa_release_dev_resources(priv);\n \tif (priv->vdev)\n \t\trte_vdpa_unregister_device(priv->vdev);\n-\tpthread_mutex_destroy(&priv->vq_config_lock);\n \trte_free(priv);\n }\n \ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex e5553079fe..3fd5eefc5e 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -82,6 +82,7 @@ struct mlx5_vdpa_virtq {\n \tbool stopped;\n \tuint32_t configured:1;\n \tuint32_t version;\n+\tpthread_mutex_t virtq_lock;\n \tstruct mlx5_vdpa_priv *priv;\n \tstruct mlx5_devx_obj *virtq;\n \tstruct mlx5_devx_obj *counters;\n@@ -126,7 +127,8 @@ struct mlx5_vdpa_priv {\n \tTAILQ_ENTRY(mlx5_vdpa_priv) next;\n \tbool connected;\n \tenum mlx5_dev_state state;\n-\tpthread_mutex_t vq_config_lock;\n+\trte_spinlock_t db_lock;\n+\tpthread_mutex_t steer_update_lock;\n \tuint64_t no_traffic_counter;\n \tpthread_t timer_tid;\n \tint event_mode;\n@@ -222,14 +224,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);\n  *   Number of descriptors.\n  * @param[in] callfd\n  *   The guest notification file descriptor.\n- * @param[in/out] eqp\n- *   Pointer to the event QP structure.\n+ * @param[in/out] virtq\n+ *   Pointer to the virt-queue structure.\n  *\n  * @return\n  *   0 on success, -1 otherwise and rte_errno is set.\n  */\n-int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n-\t\t\t      int callfd, struct mlx5_vdpa_event_qp *eqp);\n+int\n+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n+\tint callfd, struct mlx5_vdpa_virtq *virtq);\n \n /**\n  * Destroy an event QP and all its related resources.\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex b43dca9255..2b0f5936d1 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -85,12 +85,13 @@ mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)\n \n static int\n mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,\n-\t\t    int callfd, struct mlx5_vdpa_cq *cq)\n+\t\tint callfd, struct mlx5_vdpa_virtq *virtq)\n {\n \tstruct mlx5_devx_cq_attr attr = {\n \t\t.use_first_only = 1,\n \t\t.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),\n \t};\n+\tstruct mlx5_vdpa_cq *cq = &virtq->eqp.cq;\n \tuint16_t event_nums[1] = {0};\n \tint ret;\n \n@@ -102,10 +103,11 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,\n \tcq->log_desc_n = log_desc_n;\n \trte_spinlock_init(&cq->sl);\n \t/* Subscribe CQ event to the event channel controlled by the driver. */\n-\tret = mlx5_os_devx_subscribe_devx_event(priv->eventc,\n-\t\t\t\t\t\tcq->cq_obj.cq->obj,\n-\t\t\t\t\t\tsizeof(event_nums), event_nums,\n-\t\t\t\t\t\t(uint64_t)(uintptr_t)cq);\n+\tret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,\n+\t\t\t\t\t\t\tcq->cq_obj.cq->obj,\n+\t\t\t\t\t\t   sizeof(event_nums),\n+\t\t\t\t\t\t   event_nums,\n+\t\t\t\t\t\t   (uint64_t)(uintptr_t)virtq);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to subscribe CQE event.\");\n \t\trte_errno = errno;\n@@ -167,13 +169,17 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)\n static void\n mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)\n {\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tstruct mlx5_vdpa_cq *cq;\n \tint i;\n \n \tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tcq = &priv->virtqs[i].eqp.cq;\n \t\tif (cq->cq_obj.cq && !cq->armed)\n \t\t\tmlx5_vdpa_cq_arm(priv, cq);\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n }\n \n@@ -220,13 +226,18 @@ mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)\n static uint32_t\n mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)\n {\n-\tint i;\n+\tstruct mlx5_vdpa_virtq *virtq;\n+\tstruct mlx5_vdpa_cq *cq;\n \tuint32_t max = 0;\n+\tuint32_t comp;\n+\tint i;\n \n \tfor (i = 0; i < priv->nr_virtqs; i++) {\n-\t\tstruct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;\n-\t\tuint32_t comp = mlx5_vdpa_queue_complete(cq);\n-\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\tcq = &virtq->eqp.cq;\n+\t\tcomp = mlx5_vdpa_queue_complete(cq);\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\tif (comp > max)\n \t\t\tmax = comp;\n \t}\n@@ -253,7 +264,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)\n }\n \n /* Wait on all CQs channel for completion event. */\n-static struct mlx5_vdpa_cq *\n+static struct mlx5_vdpa_virtq *\n mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)\n {\n #ifdef HAVE_IBV_DEVX_EVENT\n@@ -265,7 +276,8 @@ mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)\n \t\t\t\t\t    sizeof(out.buf));\n \n \tif (ret >= 0)\n-\t\treturn (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;\n+\t\treturn (struct mlx5_vdpa_virtq *)\n+\t\t\t\t(uintptr_t)out.event_resp.cookie;\n \tDRV_LOG(INFO, \"Got error in devx_get_event, ret = %d, errno = %d.\",\n \t\tret, errno);\n #endif\n@@ -276,7 +288,7 @@ static void *\n mlx5_vdpa_event_handle(void *arg)\n {\n \tstruct mlx5_vdpa_priv *priv = arg;\n-\tstruct mlx5_vdpa_cq *cq;\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tuint32_t max;\n \n \tswitch (priv->event_mode) {\n@@ -284,7 +296,6 @@ mlx5_vdpa_event_handle(void *arg)\n \tcase MLX5_VDPA_EVENT_MODE_FIXED_TIMER:\n \t\tpriv->timer_delay_us = priv->event_us;\n \t\twhile (1) {\n-\t\t\tpthread_mutex_lock(&priv->vq_config_lock);\n \t\t\tmax = mlx5_vdpa_queues_complete(priv);\n \t\t\tif (max == 0 && priv->no_traffic_counter++ >=\n \t\t\t    priv->no_traffic_max) {\n@@ -292,32 +303,37 @@ mlx5_vdpa_event_handle(void *arg)\n \t\t\t\t\tpriv->vdev->device->name);\n \t\t\t\tmlx5_vdpa_arm_all_cqs(priv);\n \t\t\t\tdo {\n-\t\t\t\t\tpthread_mutex_unlock\n-\t\t\t\t\t\t\t(&priv->vq_config_lock);\n-\t\t\t\t\tcq = mlx5_vdpa_event_wait(priv);\n-\t\t\t\t\tpthread_mutex_lock\n-\t\t\t\t\t\t\t(&priv->vq_config_lock);\n-\t\t\t\t\tif (cq == NULL ||\n-\t\t\t\t\t       mlx5_vdpa_queue_complete(cq) > 0)\n+\t\t\t\t\tvirtq = mlx5_vdpa_event_wait(priv);\n+\t\t\t\t\tif (virtq == NULL)\n \t\t\t\t\t\tbreak;\n+\t\t\t\t\tpthread_mutex_lock(\n+\t\t\t\t\t\t&virtq->virtq_lock);\n+\t\t\t\t\tif (mlx5_vdpa_queue_complete(\n+\t\t\t\t\t\t&virtq->eqp.cq) > 0) {\n+\t\t\t\t\t\tpthread_mutex_unlock(\n+\t\t\t\t\t\t\t&virtq->virtq_lock);\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\t}\n+\t\t\t\t\tpthread_mutex_unlock(\n+\t\t\t\t\t\t&virtq->virtq_lock);\n \t\t\t\t} while (1);\n \t\t\t\tpriv->timer_delay_us = priv->event_us;\n \t\t\t\tpriv->no_traffic_counter = 0;\n \t\t\t} else if (max != 0) {\n \t\t\t\tpriv->no_traffic_counter = 0;\n \t\t\t}\n-\t\t\tpthread_mutex_unlock(&priv->vq_config_lock);\n \t\t\tmlx5_vdpa_timer_sleep(priv, max);\n \t\t}\n \t\treturn NULL;\n \tcase MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:\n \t\tdo {\n-\t\t\tcq = mlx5_vdpa_event_wait(priv);\n-\t\t\tif (cq != NULL) {\n-\t\t\t\tpthread_mutex_lock(&priv->vq_config_lock);\n-\t\t\t\tif (mlx5_vdpa_queue_complete(cq) > 0)\n-\t\t\t\t\tmlx5_vdpa_cq_arm(priv, cq);\n-\t\t\t\tpthread_mutex_unlock(&priv->vq_config_lock);\n+\t\t\tvirtq = mlx5_vdpa_event_wait(priv);\n+\t\t\tif (virtq != NULL) {\n+\t\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\t\tif (mlx5_vdpa_queue_complete(\n+\t\t\t\t\t&virtq->eqp.cq) > 0)\n+\t\t\t\t\tmlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\t}\n \t\t} while (1);\n \t\treturn NULL;\n@@ -339,7 +355,6 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)\n \tstruct mlx5_vdpa_virtq *virtq;\n \tuint64_t sec;\n \n-\tpthread_mutex_lock(&priv->vq_config_lock);\n \twhile (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,\n \t\t\t\t\t sizeof(out.buf)) >=\n \t\t\t\t       (ssize_t)sizeof(out.event_resp.cookie)) {\n@@ -351,10 +366,11 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)\n \t\t\tcontinue;\n \t\t}\n \t\tvirtq = &priv->virtqs[vq_index];\n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tif (!virtq->enable || virtq->version != version)\n-\t\t\tcontinue;\n+\t\t\tgoto unlock;\n \t\tif (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)\n-\t\t\tcontinue;\n+\t\t\tgoto unlock;\n \t\tvirtq->stopped = true;\n \t\t/* Query error info. */\n \t\tif (mlx5_vdpa_virtq_query(priv, vq_index))\n@@ -384,8 +400,9 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)\n \t\tfor (i = 1; i < RTE_DIM(virtq->err_time); i++)\n \t\t\tvirtq->err_time[i - 1] = virtq->err_time[i];\n \t\tvirtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();\n+unlock:\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n-\tpthread_mutex_unlock(&priv->vq_config_lock);\n #endif\n }\n \n@@ -533,11 +550,18 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)\n void\n mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)\n {\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tvoid *status;\n+\tint i;\n \n \tif (priv->timer_tid) {\n \t\tpthread_cancel(priv->timer_tid);\n \t\tpthread_join(priv->timer_tid, &status);\n+\t\t/* The mutex may stay locked after event thread cancel, initiate it. */\n+\t\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tpthread_mutex_init(&virtq->virtq_lock, NULL);\n+\t\t}\n \t}\n \tpriv->timer_tid = 0;\n }\n@@ -614,8 +638,9 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)\n \n int\n mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n-\t\t\t  int callfd, struct mlx5_vdpa_event_qp *eqp)\n+\tint callfd, struct mlx5_vdpa_virtq *virtq)\n {\n+\tstruct mlx5_vdpa_event_qp *eqp = &virtq->eqp;\n \tstruct mlx5_devx_qp_attr attr = {0};\n \tuint16_t log_desc_n = rte_log2_u32(desc_n);\n \tuint32_t ret;\n@@ -632,7 +657,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t}\n \tif (eqp->fw_qp)\n \t\tmlx5_vdpa_event_qp_destroy(eqp);\n-\tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))\n+\tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||\n+\t\t!eqp->cq.cq_obj.cq)\n \t\treturn -1;\n \tattr.pd = priv->cdev->pdn;\n \tattr.ts_format =\n@@ -650,8 +676,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \tattr.ts_format =\n \t\tmlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);\n \tret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),\n-\t\t\t\t\tattr.num_of_receive_wqes *\n-\t\t\t\t\tMLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);\n+\t\t\t\t  attr.num_of_receive_wqes * MLX5_WSEG_SIZE,\n+\t\t\t\t  &attr, SOCKET_ID_ANY);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create SW QP(%u).\", rte_errno);\n \t\tgoto error;\n@@ -668,3 +694,4 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \tmlx5_vdpa_event_qp_destroy(eqp);\n \treturn -1;\n }\n+\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\nindex a8faf0c116..efebf364d0 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n@@ -25,11 +25,18 @@ mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)\n \t\tif (!virtq->configured) {\n \t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for dirty bitmap \"\n \t\t\t\t\"enabling.\", i);\n-\t\t} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,\n+\t\t} else {\n+\t\t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n+\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tif (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,\n \t\t\t   &attr)) {\n-\t\t\tDRV_LOG(ERR, \"Failed to modify virtq %d for dirty \"\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tDRV_LOG(ERR, \"Failed to modify virtq %d for dirty \"\n \t\t\t\t\"bitmap enabling.\", i);\n-\t\t\treturn -1;\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t}\n \treturn 0;\n@@ -61,10 +68,19 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \t\tvirtq = &priv->virtqs[i];\n \t\tif (!virtq->configured) {\n \t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for LM.\", i);\n-\t\t} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,\n-\t\t\t\t\t\t      &attr)) {\n-\t\t\tDRV_LOG(ERR, \"Failed to modify virtq %d for LM.\", i);\n-\t\t\tgoto err;\n+\t\t} else {\n+\t\t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n+\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tif (mlx5_devx_cmd_modify_virtq(\n+\t\t\t\t\tpriv->virtqs[i].virtq,\n+\t\t\t\t\t&attr)) {\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to modify virtq %d for LM.\", i);\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t}\n \treturn 0;\n@@ -79,6 +95,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n int\n mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)\n {\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tuint64_t features;\n \tint ret = rte_vhost_get_negotiated_features(priv->vid, &features);\n \tint i;\n@@ -90,10 +107,13 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)\n \tif (!RTE_VHOST_NEED_LOG(features))\n \t\treturn 0;\n \tfor (i = 0; i < priv->nr_virtqs; ++i) {\n+\t\tvirtq = &priv->virtqs[i];\n \t\tif (!priv->virtqs[i].virtq) {\n \t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for LM log.\", i);\n \t\t} else {\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\t\tret = mlx5_vdpa_virtq_stop(priv, i);\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\tif (ret) {\n \t\t\t\tDRV_LOG(ERR, \"Failed to stop virtq %d for LM \"\n \t\t\t\t\t\"log.\", i);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\nindex d4b4375c88..4cbf09784e 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n@@ -237,19 +237,24 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)\n int\n mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)\n {\n-\tint ret = mlx5_vdpa_rqt_prepare(priv);\n+\tint ret;\n \n+\tpthread_mutex_lock(&priv->steer_update_lock);\n+\tret = mlx5_vdpa_rqt_prepare(priv);\n \tif (ret == 0) {\n \t\tmlx5_vdpa_steer_unset(priv);\n \t} else if (ret < 0) {\n+\t\tpthread_mutex_unlock(&priv->steer_update_lock);\n \t\treturn ret;\n \t} else if (!priv->steer.rss[0].flow) {\n \t\tret = mlx5_vdpa_rss_flows_create(priv);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Cannot create RSS flows.\");\n+\t\t\tpthread_mutex_unlock(&priv->steer_update_lock);\n \t\t\treturn -1;\n \t\t}\n \t}\n+\tpthread_mutex_unlock(&priv->steer_update_lock);\n \treturn 0;\n }\n \ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 55cbc9fad2..138b7bdbc5 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -24,13 +24,17 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)\n \tint nbytes;\n \tint retry;\n \n+\tpthread_mutex_lock(&virtq->virtq_lock);\n \tif (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\tDRV_LOG(ERR,  \"device %d queue %d down, skip kick handling\",\n \t\t\tpriv->vid, virtq->index);\n \t\treturn;\n \t}\n-\tif (rte_intr_fd_get(virtq->intr_handle) < 0)\n+\tif (rte_intr_fd_get(virtq->intr_handle) < 0) {\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\treturn;\n+\t}\n \tfor (retry = 0; retry < 3; ++retry) {\n \t\tnbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,\n \t\t\t      8);\n@@ -44,9 +48,14 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)\n \t\t}\n \t\tbreak;\n \t}\n-\tif (nbytes < 0)\n+\tif (nbytes < 0) {\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\treturn;\n+\t}\n+\trte_spinlock_lock(&priv->db_lock);\n \trte_write32(virtq->index, priv->virtq_db_addr);\n+\trte_spinlock_unlock(&priv->db_lock);\n+\tpthread_mutex_unlock(&virtq->virtq_lock);\n \tif (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {\n \t\tDRV_LOG(ERR,  \"device %d queue %d down, skip kick handling\",\n \t\t\tpriv->vid, virtq->index);\n@@ -66,6 +75,33 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)\n \tDRV_LOG(DEBUG, \"Ring virtq %u doorbell.\", virtq->index);\n }\n \n+/* Virtq must be locked before calling this function. */\n+static void\n+mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)\n+{\n+\tint ret = -EAGAIN;\n+\n+\tif (!virtq->intr_handle)\n+\t\treturn;\n+\tif (rte_intr_fd_get(virtq->intr_handle) >= 0) {\n+\t\twhile (ret == -EAGAIN) {\n+\t\t\tret = rte_intr_callback_unregister(virtq->intr_handle,\n+\t\t\t\t\tmlx5_vdpa_virtq_kick_handler, virtq);\n+\t\t\tif (ret == -EAGAIN) {\n+\t\t\t\tDRV_LOG(DEBUG, \"Try again to unregister fd %d of virtq %hu interrupt\",\n+\t\t\t\t\trte_intr_fd_get(virtq->intr_handle),\n+\t\t\t\t\tvirtq->index);\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tusleep(MLX5_VDPA_INTR_RETRIES_USEC);\n+\t\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\t}\n+\t\t}\n+\t\t(void)rte_intr_fd_set(virtq->intr_handle, -1);\n+\t}\n+\trte_intr_instance_free(virtq->intr_handle);\n+\tvirtq->intr_handle = NULL;\n+}\n+\n /* Release cached VQ resources. */\n void\n mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n@@ -75,6 +111,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n \n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tvirtq->configured = 0;\n \t\tfor (j = 0; j < RTE_DIM(virtq->umems); ++j) {\n \t\t\tif (virtq->umems[j].obj) {\n@@ -90,28 +127,17 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \t\t}\n \t\tif (virtq->eqp.fw_qp)\n \t\t\tmlx5_vdpa_event_qp_destroy(&virtq->eqp);\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n }\n \n+\n static int\n mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n {\n \tint ret = -EAGAIN;\n \n-\tif (rte_intr_fd_get(virtq->intr_handle) >= 0) {\n-\t\twhile (ret == -EAGAIN) {\n-\t\t\tret = rte_intr_callback_unregister(virtq->intr_handle,\n-\t\t\t\t\tmlx5_vdpa_virtq_kick_handler, virtq);\n-\t\t\tif (ret == -EAGAIN) {\n-\t\t\t\tDRV_LOG(DEBUG, \"Try again to unregister fd %d of virtq %hu interrupt\",\n-\t\t\t\t\trte_intr_fd_get(virtq->intr_handle),\n-\t\t\t\t\tvirtq->index);\n-\t\t\t\tusleep(MLX5_VDPA_INTR_RETRIES_USEC);\n-\t\t\t}\n-\t\t}\n-\t\trte_intr_fd_set(virtq->intr_handle, -1);\n-\t}\n-\trte_intr_instance_free(virtq->intr_handle);\n+\tmlx5_vdpa_virtq_unregister_intr_handle(virtq);\n \tif (virtq->configured) {\n \t\tret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);\n \t\tif (ret)\n@@ -128,10 +154,15 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n void\n mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n {\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tint i;\n \n-\tfor (i = 0; i < priv->nr_virtqs; i++)\n-\t\tmlx5_vdpa_virtq_unset(&priv->virtqs[i]);\n+\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\tmlx5_vdpa_virtq_unset(virtq);\n+\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t}\n \tpriv->features = 0;\n \tpriv->nr_virtqs = 0;\n }\n@@ -250,7 +281,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \tMLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n \tif (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n \t\tret = mlx5_vdpa_event_qp_prepare(priv,\n-\t\t\t\tvq->size, vq->callfd, &virtq->eqp);\n+\t\t\t\tvq->size, vq->callfd, virtq);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"Failed to create event QPs for virtq %d.\",\n@@ -420,7 +451,9 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t}\n \tclaim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));\n \tvirtq->configured = 1;\n+\trte_spinlock_lock(&priv->db_lock);\n \trte_write32(virtq->index, priv->virtq_db_addr);\n+\trte_spinlock_unlock(&priv->db_lock);\n \t/* Setup doorbell mapping. */\n \tvirtq->intr_handle =\n \t\trte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);\n@@ -441,7 +474,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\tif (rte_intr_callback_register(virtq->intr_handle,\n \t\t\t\t\t       mlx5_vdpa_virtq_kick_handler,\n \t\t\t\t\t       virtq)) {\n-\t\t\trte_intr_fd_set(virtq->intr_handle, -1);\n+\t\t\t(void)rte_intr_fd_set(virtq->intr_handle, -1);\n \t\t\tDRV_LOG(ERR, \"Failed to register virtq %d interrupt.\",\n \t\t\t\tindex);\n \t\t\tgoto error;\n@@ -537,6 +570,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \tuint32_t i;\n \tuint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);\n \tint ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);\n+\tstruct mlx5_vdpa_virtq *virtq;\n \n \tif (ret || mlx5_vdpa_features_validate(priv)) {\n \t\tDRV_LOG(ERR, \"Failed to configure negotiated features.\");\n@@ -556,9 +590,17 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn -1;\n \t}\n \tpriv->nr_virtqs = nr_vring;\n-\tfor (i = 0; i < nr_vring; i++)\n-\t\tif (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))\n-\t\t\tgoto error;\n+\tfor (i = 0; i < nr_vring; i++) {\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tif (virtq->enable) {\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tif (mlx5_vdpa_virtq_setup(priv, i)) {\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t}\n+\t}\n \treturn 0;\n error:\n \tmlx5_vdpa_virtqs_release(priv);\n",
    "prefixes": [
        "v1",
        "09/17"
    ]
}