get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112364/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112364,
    "url": "https://patches.dpdk.org/api/patches/112364/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-24-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-24-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-24-lizh@nvidia.com",
    "date": "2022-06-06T11:21:00",
    "name": "[12/16] vdpa/mlx5: add virtq creation task for MT management",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b31bbfd6eab53501efd32a56a728f34118635736",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-24-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23339,
            "url": "https://patches.dpdk.org/api/series/23339/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=23339",
            "date": "2022-06-06T11:20:37",
            "name": "Add vDPA multi-threads optiomization",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/23339/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/112364/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/112364/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 766F2A0543;\n\tMon,  6 Jun 2022 13:24:33 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id AD02B41611;\n\tMon,  6 Jun 2022 13:22:48 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2041.outbound.protection.outlook.com [40.107.93.41])\n by mails.dpdk.org (Postfix) with ESMTP id C62C141156\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:22:47 +0200 (CEST)",
            "from BN9PR03CA0557.namprd03.prod.outlook.com (2603:10b6:408:138::22)\n by DM4PR12MB5748.namprd12.prod.outlook.com (2603:10b6:8:5f::19) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.19; Mon, 6 Jun\n 2022 11:22:44 +0000",
            "from BN8NAM11FT047.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:138:cafe::f6) by BN9PR03CA0557.outlook.office365.com\n (2603:10b6:408:138::22) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.16 via Frontend\n Transport; Mon, 6 Jun 2022 11:22:44 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n BN8NAM11FT047.mail.protection.outlook.com (10.13.177.220) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:22:44 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:22:43 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:22:40 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=eQNY53XSSCSgHQ7MgAniGLmyXDzyHhHuMcz2GGg63E/iIp9jawBb6c0WbGZrah8Q8COrX/T4BKZsxQLFUYWAr+uIoW9O5ejucCOWP4CSlXbigr785pZEXsGlP8jSNUIlLkHrl3OTDHzfoWZevIwYlSV4557c7YI7ODheWXgKiM59vSnGqgAQ9iynkGxF2jtPF1+uOoqCOieMs17E7DnRySqKW05fJ4qx3fboyVa5GGSEx4IF4f6+osbq+19xNuIh29BKCZrwfRLZ75x5s/65VeMNeB2YIBwxB39pY7fVxstLRbnoJODON2BDdyLncQR7ZaM3w80YFE5SqB1xfIRe9g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=D0Zy9/Tg4z8XhCWAE1gPUw8MnOBjkfQd3cdsZE+Y7cw=;\n b=KX2CMefSHOYC8dTwoc37dgyJRQ/OdppeY66Kp50sLEm0V+xZux3WzQmaVb0xRONQUJNn7aHb/q8c4D6qtOnfbAD23ZMnvOPh/s7jfri2L/ZFlDSPVByfVf1nYHxwv54lrbDhkpjwBP2zi7Ya+b2jmpPdukE5HUK3UidN6SD346ci1r0N3VqGyP7C3PksbTIpSLTSIzyjEYS7toIIa6QTCrFJ1WsfctB4W2AlD1MeWSm09WJqoQhprLVpH6QDhXegq0xzr5+QQaI8IKqLUPVCVtvcmRY1yEW9Fs/xNtcuoJaqb3CNoTl5+ypHhI/fE4RtiYWrZdgh1/Wj1SnpDk1ieg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=D0Zy9/Tg4z8XhCWAE1gPUw8MnOBjkfQd3cdsZE+Y7cw=;\n b=F8rxw4AGe1WHd384hPw90oI3/HjnpVGW1jHk4qZD3VTvWcf3wvE1GKIymp+ywuycTJk33fqzQ6I/uKhFBqZeisWZWobesmZ44venq6gJw7HRxNk+OaT7BujMyDHjhsE9eP278q+cTB5O66/HpZxcUuk/5mRkGxu6c9hOgtTSHAum2kOJ6fUZWorBp5P2vlhzrd1j4AkpNTTQu0rsjHNEHqDl5omG0bMtCFfb+z1KCD222L6L7Ml57l22alZPHRnMg4i1OKdVRsatBR7byza5DMFcbqKd22GsblrF5ZDqt8fWORRhEHdkSs6f0wAD+JqfX3NgbI+OXQLksH+XLG4wwQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>",
        "Subject": "[PATCH 12/16] vdpa/mlx5: add virtq creation task for MT management",
        "Date": "Mon, 6 Jun 2022 14:21:00 +0300",
        "Message-ID": "<20220606112109.208873-24-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "2cc7e53a-c07d-405b-e2ce-08da47aee0bb",
        "X-MS-TrafficTypeDiagnostic": "DM4PR12MB5748:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <DM4PR12MB574834B1D446075EA40FE57ABFA29@DM4PR12MB5748.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n hNp96nbou6JgWPOk5vsqdIoKQ9rtl31rgNjukhPY/cvXiS61HZC+lM7LyiaCiGQzruua4nXMsx97a+l+FbYYMKJ/QKP2ykSReqw8vhEjN6/MX8PlLHVoM27x4Ua0Llhopdl5sF3qHoxQmRI+O567e5NAUODY3Fsq91d4VZc8V9S0m4G93XEIdOsEdzXqvnD2XDc0hhEZCqpCrAUMO6GxtVFNnO78zDOan3svGdDL21wd6lvW9V2V8IX1UQNub25zVVtby/337JFFFgMjY1ewh8DG7HtLcOWZvQzG368vqwaZvIsoEWg12Kv3HqPdDh9XwrDd2JdJ9t13ShmmqdUN7fUjyIiZjvckgqnPGkYNZNvmPo5vjci8acrfNqCcB2LUwsh61wVp/LDd9txAzAZYt9RpQxR4/J+ydWdMnHUEAsob8DFS9zX7cgRH2tV5NP67mIPSRBgPS4oCzX0pFCUQFNqHGUQkoTSFeAgu5fVlyzqNdv4fu2Ym870h5ehl1Xe3gA0FeLcP3HOAvl4Iyqfc3Wz+L6ByjqVJS4NrjAxrH40n75UadmxFqPTdUaurNVuhmUWhZg0121O1GMp6Pyq9ojDkHJIikg0EfWTJBDHGurJ3E/U3VFIA7NQXpbwOI2RKYV7AMXdH6bTxLQc9PqpqkiBD7LQukR3gMSdeoh5pILvkP+DrFScggJOm7sRf0ijF54A2WDZaMXVKi5B+HaWRdw==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(40470700004)(46966006)(36840700001)(426003)(6286002)(1076003)(36860700001)(8936002)(107886003)(6636002)(16526019)(186003)(2906002)(508600001)(47076005)(83380400001)(54906003)(40460700003)(336012)(2616005)(70586007)(70206006)(55016003)(36756003)(6666004)(81166007)(356005)(4326008)(8676002)(7696005)(30864003)(26005)(86362001)(5660300002)(82310400005)(110136005)(316002)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:22:44.4137 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 2cc7e53a-c07d-405b-e2ce-08da47aee0bb",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT047.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB5748",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The virtq object and all its sub-resources use a lot of\nFW commands and can be accelerated by the MT management.\nSplit the virtqs creation between the configuration threads.\nThis accelerates the LM process and reduces its time by 20%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.h         |   9 +-\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c |  14 +++\n drivers/vdpa/mlx5/mlx5_vdpa_event.c   |   2 +-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   | 149 +++++++++++++++++++-------\n 4 files changed, 134 insertions(+), 40 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex 3316ce42be..35221f5ddc 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -80,6 +80,7 @@ enum {\n /* Vdpa task types. */\n enum mlx5_vdpa_task_type {\n \tMLX5_VDPA_TASK_REG_MR = 1,\n+\tMLX5_VDPA_TASK_SETUP_VIRTQ,\n };\n \n /* Generic task information and size must be multiple of 4B. */\n@@ -117,12 +118,12 @@ struct mlx5_vdpa_vmem_info {\n \n struct mlx5_vdpa_virtq {\n \tSLIST_ENTRY(mlx5_vdpa_virtq) next;\n-\tuint8_t enable;\n \tuint16_t index;\n \tuint16_t vq_size;\n \tuint8_t notifier_state;\n-\tbool stopped;\n \tuint32_t configured:1;\n+\tuint32_t enable:1;\n+\tuint32_t stopped:1;\n \tuint32_t version;\n \tpthread_mutex_t virtq_lock;\n \tstruct mlx5_vdpa_priv *priv;\n@@ -565,11 +566,13 @@ bool\n mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,\n \t\tuint32_t thrd_idx,\n \t\tenum mlx5_vdpa_task_type task_type,\n-\t\tuint32_t *bulk_refcnt, uint32_t *bulk_err_cnt,\n+\t\tuint32_t *remaining_cnt, uint32_t *err_cnt,\n \t\tvoid **task_data, uint32_t num);\n int\n mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);\n bool\n mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,\n \t\tuint32_t *err_cnt, uint32_t sleep_time);\n+int\n+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nindex 10391931ae..1389d369ae 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -100,6 +100,7 @@ mlx5_vdpa_c_thread_handle(void *arg)\n {\n \tstruct mlx5_vdpa_conf_thread_mng *multhrd = arg;\n \tpthread_t thread_id = pthread_self();\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tstruct mlx5_vdpa_priv *priv;\n \tstruct mlx5_vdpa_task task;\n \tstruct rte_ring *rng;\n@@ -139,6 +140,19 @@ mlx5_vdpa_c_thread_handle(void *arg)\n \t\t\t\t__ATOMIC_RELAXED);\n \t\t\t}\n \t\t\tbreak;\n+\t\tcase MLX5_VDPA_TASK_SETUP_VIRTQ:\n+\t\t\tvirtq = &priv->virtqs[task.idx];\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tret = mlx5_vdpa_virtq_setup(priv,\n+\t\t\t\ttask.idx, false);\n+\t\t\tif (ret) {\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\t\"Failed to setup virtq %d.\", task.idx);\n+\t\t\t\t__atomic_fetch_add(\n+\t\t\t\t\ttask.err_cnt, 1, __ATOMIC_RELAXED);\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tDRV_LOG(ERR, \"Invalid vdpa task type %d.\",\n \t\t\ttask.type);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex b45fbac146..f782b6b832 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -371,7 +371,7 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)\n \t\t\tgoto unlock;\n \t\tif (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)\n \t\t\tgoto unlock;\n-\t\tvirtq->stopped = true;\n+\t\tvirtq->stopped = 1;\n \t\t/* Query error info. */\n \t\tif (mlx5_vdpa_virtq_query(priv, vq_index))\n \t\t\tgoto log;\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 0b317655db..db05220e76 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -111,8 +111,9 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n \n+\t\tif (virtq->index != i)\n+\t\t\tcontinue;\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\tvirtq->configured = 0;\n \t\tfor (j = 0; j < RTE_DIM(virtq->umems); ++j) {\n \t\t\tif (virtq->umems[j].obj) {\n \t\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n@@ -131,7 +132,6 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \t}\n }\n \n-\n static int\n mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n {\n@@ -191,7 +191,7 @@ mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)\n \tret = mlx5_vdpa_virtq_modify(virtq, 0);\n \tif (ret)\n \t\treturn -1;\n-\tvirtq->stopped = true;\n+\tvirtq->stopped = 1;\n \tDRV_LOG(DEBUG, \"vid %u virtq %u was stopped.\", priv->vid, index);\n \treturn mlx5_vdpa_virtq_query(priv, index);\n }\n@@ -411,7 +411,38 @@ mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)\n }\n \n static int\n-mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n+mlx5_vdpa_virtq_doorbell_setup(struct mlx5_vdpa_virtq *virtq,\n+\t\tstruct rte_vhost_vring *vq, int index)\n+{\n+\tvirtq->intr_handle =\n+\t\trte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);\n+\tif (virtq->intr_handle == NULL) {\n+\t\tDRV_LOG(ERR, \"Fail to allocate intr_handle\");\n+\t\treturn -1;\n+\t}\n+\tif (rte_intr_fd_set(virtq->intr_handle, vq->kickfd))\n+\t\treturn -1;\n+\tif (rte_intr_fd_get(virtq->intr_handle) == -1) {\n+\t\tDRV_LOG(WARNING, \"Virtq %d kickfd is invalid.\", index);\n+\t} else {\n+\t\tif (rte_intr_type_set(virtq->intr_handle,\n+\t\t\tRTE_INTR_HANDLE_EXT))\n+\t\t\treturn -1;\n+\t\tif (rte_intr_callback_register(virtq->intr_handle,\n+\t\t\tmlx5_vdpa_virtq_kick_handler, virtq)) {\n+\t\t\t(void)rte_intr_fd_set(virtq->intr_handle, -1);\n+\t\t\tDRV_LOG(ERR, \"Failed to register virtq %d interrupt.\",\n+\t\t\t\tindex);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tDRV_LOG(DEBUG, \"Register fd %d interrupt for virtq %d.\",\n+\t\t\trte_intr_fd_get(virtq->intr_handle), index);\n+\t}\n+\treturn 0;\n+}\n+\n+int\n+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)\n {\n \tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n \tstruct rte_vhost_vring vq;\n@@ -455,33 +486,11 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \trte_write32(virtq->index, priv->virtq_db_addr);\n \trte_spinlock_unlock(&priv->db_lock);\n \t/* Setup doorbell mapping. */\n-\tvirtq->intr_handle =\n-\t\trte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);\n-\tif (virtq->intr_handle == NULL) {\n-\t\tDRV_LOG(ERR, \"Fail to allocate intr_handle\");\n-\t\tgoto error;\n-\t}\n-\n-\tif (rte_intr_fd_set(virtq->intr_handle, vq.kickfd))\n-\t\tgoto error;\n-\n-\tif (rte_intr_fd_get(virtq->intr_handle) == -1) {\n-\t\tDRV_LOG(WARNING, \"Virtq %d kickfd is invalid.\", index);\n-\t} else {\n-\t\tif (rte_intr_type_set(virtq->intr_handle, RTE_INTR_HANDLE_EXT))\n-\t\t\tgoto error;\n-\n-\t\tif (rte_intr_callback_register(virtq->intr_handle,\n-\t\t\t\t\t       mlx5_vdpa_virtq_kick_handler,\n-\t\t\t\t\t       virtq)) {\n-\t\t\t(void)rte_intr_fd_set(virtq->intr_handle, -1);\n+\tif (reg_kick) {\n+\t\tif (mlx5_vdpa_virtq_doorbell_setup(virtq, &vq, index)) {\n \t\t\tDRV_LOG(ERR, \"Failed to register virtq %d interrupt.\",\n \t\t\t\tindex);\n \t\t\tgoto error;\n-\t\t} else {\n-\t\t\tDRV_LOG(DEBUG, \"Register fd %d interrupt for virtq %d.\",\n-\t\t\t\trte_intr_fd_get(virtq->intr_handle),\n-\t\t\t\tindex);\n \t\t}\n \t}\n \t/* Subscribe virtq error event. */\n@@ -497,7 +506,6 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\tvirtq->stopped = false;\n \t/* Initial notification to ask Qemu handling completed buffers. */\n \tif (virtq->eqp.cq.callfd != -1)\n \t\teventfd_write(virtq->eqp.cq.callfd, (eventfd_t)1);\n@@ -567,10 +575,12 @@ mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)\n int\n mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tuint32_t i;\n-\tuint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);\n \tint ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);\n+\tuint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);\n+\tuint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;\n+\tuint32_t i, thrd_idx, data[1];\n \tstruct mlx5_vdpa_virtq *virtq;\n+\tstruct rte_vhost_vring vq;\n \n \tif (ret || mlx5_vdpa_features_validate(priv)) {\n \t\tDRV_LOG(ERR, \"Failed to configure negotiated features.\");\n@@ -590,16 +600,83 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn -1;\n \t}\n \tpriv->nr_virtqs = nr_vring;\n-\tfor (i = 0; i < nr_vring; i++) {\n-\t\tvirtq = &priv->virtqs[i];\n-\t\tif (virtq->enable) {\n+\tif (priv->use_c_thread) {\n+\t\tuint32_t main_task_idx[nr_vring];\n+\n+\t\tfor (i = 0; i < nr_vring; i++) {\n+\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tif (!virtq->enable)\n+\t\t\t\tcontinue;\n+\t\t\tthrd_idx = i % (conf_thread_mng.max_thrds + 1);\n+\t\t\tif (!thrd_idx) {\n+\t\t\t\tmain_task_idx[task_num] = i;\n+\t\t\t\ttask_num++;\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tthrd_idx = priv->last_c_thrd_idx + 1;\n+\t\t\tif (thrd_idx >= conf_thread_mng.max_thrds)\n+\t\t\t\tthrd_idx = 0;\n+\t\t\tpriv->last_c_thrd_idx = thrd_idx;\n+\t\t\tdata[0] = i;\n+\t\t\tif (mlx5_vdpa_task_add(priv, thrd_idx,\n+\t\t\t\tMLX5_VDPA_TASK_SETUP_VIRTQ,\n+\t\t\t\t&remaining_cnt, &err_cnt,\n+\t\t\t\t(void **)&data, 1)) {\n+\t\t\t\tDRV_LOG(ERR, \"Fail to add \"\n+\t\t\t\t\t\t\"task setup virtq (%d).\", i);\n+\t\t\t\tmain_task_idx[task_num] = i;\n+\t\t\t\ttask_num++;\n+\t\t\t}\n+\t\t}\n+\t\tfor (i = 0; i < task_num; i++) {\n+\t\t\tvirtq = &priv->virtqs[main_task_idx[i]];\n \t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n-\t\t\tif (mlx5_vdpa_virtq_setup(priv, i)) {\n+\t\t\tif (mlx5_vdpa_virtq_setup(priv,\n+\t\t\t\tmain_task_idx[i], false)) {\n \t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t\t\tgoto error;\n \t\t\t}\n \t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n+\t\tif (mlx5_vdpa_c_thread_wait_bulk_tasks_done(&remaining_cnt,\n+\t\t\t&err_cnt, 2000)) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\"Failed to wait virt-queue setup tasks ready.\");\n+\t\t\tgoto error;\n+\t\t}\n+\t\tfor (i = 0; i < nr_vring; i++) {\n+\t\t\t/* Setup doorbell mapping in order for Qume. */\n+\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tif (!virtq->enable || !virtq->configured) {\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tif (rte_vhost_get_vhost_vring(priv->vid, i, &vq)) {\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t\tif (mlx5_vdpa_virtq_doorbell_setup(virtq, &vq, i)) {\n+\t\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to register virtq %d interrupt.\", i);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t}\n+\t} else {\n+\t\tfor (i = 0; i < nr_vring; i++) {\n+\t\t\tvirtq = &priv->virtqs[i];\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tif (virtq->enable) {\n+\t\t\t\tif (mlx5_vdpa_virtq_setup(priv, i, true)) {\n+\t\t\t\t\tpthread_mutex_unlock(\n+\t\t\t\t\t\t&virtq->virtq_lock);\n+\t\t\t\t\tgoto error;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n+\t\t}\n \t}\n \treturn 0;\n error:\n@@ -663,7 +740,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \t\tmlx5_vdpa_virtq_unset(virtq);\n \t}\n \tif (enable) {\n-\t\tret = mlx5_vdpa_virtq_setup(priv, index);\n+\t\tret = mlx5_vdpa_virtq_setup(priv, index, true);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to setup virtq %d.\", index);\n \t\t\treturn ret;\n",
    "prefixes": [
        "12/16"
    ]
}