get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112361/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112361,
    "url": "https://patches.dpdk.org/api/patches/112361/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-19-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-19-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-19-lizh@nvidia.com",
    "date": "2022-06-06T11:20:55",
    "name": "[v1,10/17] vdpa/mlx5: add multi-thread management for configuration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3d1cf0b1ad4bbe2aed3bb38ba0726e77cfdc2dc2",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-19-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23348,
            "url": "https://patches.dpdk.org/api/series/23348/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=23348",
            "date": "2022-06-06T11:20:55",
            "name": null,
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/23348/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/112361/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/112361/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3F559A0543;\n\tMon,  6 Jun 2022 13:24:10 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0992E42BC2;\n\tMon,  6 Jun 2022 13:22:38 +0200 (CEST)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2072.outbound.protection.outlook.com [40.107.237.72])\n by mails.dpdk.org (Postfix) with ESMTP id B8E8A4281B\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:22:35 +0200 (CEST)",
            "from BN9PR03CA0921.namprd03.prod.outlook.com (2603:10b6:408:107::26)\n by BYAPR12MB2838.namprd12.prod.outlook.com (2603:10b6:a03:6f::23)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.13; Mon, 6 Jun\n 2022 11:22:31 +0000",
            "from BN8NAM11FT056.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:107:cafe::37) by BN9PR03CA0921.outlook.office365.com\n (2603:10b6:408:107::26) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.16 via Frontend\n Transport; Mon, 6 Jun 2022 11:22:30 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n BN8NAM11FT056.mail.protection.outlook.com (10.13.177.26) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:22:30 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:22:29 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:22:26 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=lwg5NzTo7+Xwe3WRD5yFXqmKSrXoSVUipOgPopCoxqBJGigRvhf/QqrYWwkgD1f3FKiA1KpUFE3/OaY10BZiqq32z0mViIjbmmOlVU0lPwGL9yJXmsZZ/OKX3EM31a7UeRPKGFwuWojDiX8yzeb3fE7ospP56wgj1d9LRXXAUlqh0jhey7csMjjBUqD5swCtvHjJGsXJIddakj38lAf8jTlqvXD6mvscVpvtzKnmu9POLI2f4gj5UMj/xK5bFQAo/7inShH+XI/VLa3bcsmZCWA20ljQN2qsaaXJtc/QKuwpwAuQHZ2duGFGQStr6fkENtmTuNN9XV1BoaRpYuVDCg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=XqNbWRPHMsXACs/uazvCCyzyqWQJ9s88f+cJ+vO9Xjc=;\n b=GHfbIK+URgSC9mCGwHB7TjLCyBfLgi8AI64HQ+FanET6z3j1S4IAJXIA0crQIfNUWmLTuSgR94b9tEa8cds1QAyZ6reT/A/4iUbNQZbHgzzZFrFrw+MxcHCE7KNZkJ0Dgstr1yo8WHPzRrFGHm88wtDn08sT5vRHe6x9DBgPoCKLlmO95adtkyj9cvH4CBAb7+3H1E3IzhpeSOkMwr3ydoCeB9SXuIJlq9oB79E49ES/WVtoSsCIrfXZfn24lyON6ohTlXYCnq6KkCIlVRFI+2Sop6MiwXHngcmP+umthwXcUxqr3Pgwf5eUrBDusfWP+y+f3kFruzBhiJZEyO38yQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=XqNbWRPHMsXACs/uazvCCyzyqWQJ9s88f+cJ+vO9Xjc=;\n b=B1/BLmCrhYddyxXjBdasybDE0KvJ+Kw9oOzl+UNcQyNRCnoswFxSPTouYeIqUPny32cl1JkYApguqsj8k991WhI9MKFlVBetUM/2OV15yJi17zbquL6QH3pdq26Jk6TIJoCa/kHHPHQK9KcSsrEnbFw1EklmqyqMZtDztg+qtxihV+iTOgZB0rsDaEVhWaFba6HapxaIh38yrU50ljibEVYXIOEfVTza9zkHVWY69U5ZDsOj2PYMJUsVjPzFjXCQSg5ujpsNftP7dyz08bU/QJ/gj006HLhSy5OSrvRFbdP0KV3xLfwTEDj9KdMfOhkBuEc2s5ledT0bPyOTvsGqqw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>",
        "Subject": "[PATCH v1 10/17] vdpa/mlx5: add multi-thread management for\n configuration",
        "Date": "Mon, 6 Jun 2022 14:20:55 +0300",
        "Message-ID": "<20220606112109.208873-19-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "b9e73181-c793-4ec2-859d-08da47aed861",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB2838:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB2838393F23A2E7FD4EBC70BABFA29@BYAPR12MB2838.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 6Q8UtAIzRfhD+6tjLIYm/GO9ZzTEIlJKOfgMLVdAVhl4P8gtPCikH+/UNDltOSnDIZ1BWp6VXUzbB790uEh2zyYGu0UKuEF6LUhWYUCYJz+majFP+PEWZ/KDPyHvwPAAYM17nC+VHmb8YErO9RcX/1wb2i4yVbr72Wr7vpEaowOknETSBRKyThVi5PvQsc1HtUVew+OBl6/pePCz+X6AoHtlRCeZCcOAt98eoJqzQqRxVXatunvF172Oxoej0nju8TfbxjIGDvykb5MRIgao3kynMvf4As4CwlA+1YhgOeCs1PRrvGWypzIg296DfGpFkwk74y2P6kXKUIUHjlvjfxqRQbyVCUd0Cg451uWz1Aw2jgXbfezED136j8QnqP8YG8+yn/0t9vZ1XDdSGQwGCHl84h3py6GoI1mdQ5ak5QQimOkcdZBGQdOz0mYQjfBdo0pjNrm0uBJ75qo1rPoc8kWs2bFqRTGdEl/6uIiiPdqw7xhVepjX61c5yqHGOKCMRumT1N3WO7auyN5e/R/yyR/S130TE7cbC18P9xtrQCWHvRWwV1ln/lQAnveNkW7iTwBEHmSDuQ0xpTFEPNyvFYjJ5Dh6iBw5DY9BRBJPwzXPDouvyVH0M2HOmxBOFbfiO5td44jFYYzsm7fnOWD6qOiLaW16W/CZAiiNx2Lffz1F3lRHiqT9pN8t/eiXknaVxP/T7DdJj8AMTGu1PSzVEw==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(46966006)(40470700004)(36840700001)(83380400001)(336012)(2616005)(6636002)(47076005)(70586007)(16526019)(186003)(70206006)(54906003)(426003)(1076003)(316002)(107886003)(110136005)(7696005)(6666004)(6286002)(2906002)(30864003)(40460700003)(26005)(36756003)(508600001)(55016003)(8936002)(4326008)(5660300002)(8676002)(82310400005)(36860700001)(86362001)(81166007)(356005)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:22:30.4154 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b9e73181-c793-4ec2-859d-08da47aed861",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT056.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB2838",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The LM process includes a lot of objects creations and\ndestructions in the source and the destination servers.\nAs much as LM time increases, the packet drop of the VM increases.\nTo improve LM time need to parallel the configurations for mlx5 FW.\nAdd internal multi-thread management in the driver for it.\n\nA new devarg defines the number of threads and their CPU.\nThe management is shared between all the devices of the driver.\nSince the event_core also affects the datapath events thread,\nreduce the priority of the datapath event thread to\nallow fast configuration of the devices doing the LM.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\n---\n doc/guides/vdpadevs/mlx5.rst          |  11 +++\n drivers/vdpa/mlx5/meson.build         |   1 +\n drivers/vdpa/mlx5/mlx5_vdpa.c         |  41 ++++++++\n drivers/vdpa/mlx5/mlx5_vdpa.h         |  36 +++++++\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 129 ++++++++++++++++++++++++++\n drivers/vdpa/mlx5/mlx5_vdpa_event.c   |   2 +-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   |   8 +-\n 7 files changed, 223 insertions(+), 5 deletions(-)\n create mode 100644 drivers/vdpa/mlx5/mlx5_vdpa_cthread.c",
    "diff": "diff --git a/doc/guides/vdpadevs/mlx5.rst b/doc/guides/vdpadevs/mlx5.rst\nindex 0ad77bf535..b75a01688d 100644\n--- a/doc/guides/vdpadevs/mlx5.rst\n+++ b/doc/guides/vdpadevs/mlx5.rst\n@@ -78,6 +78,17 @@ for an additional list of options shared with other mlx5 drivers.\n   CPU core number to set polling thread affinity to, default to control plane\n   cpu.\n \n+- ``max_conf_threads`` parameter [int]\n+\n+  Allow the driver to use internal threads to obtain fast configuration.\n+  All the threads will be open on the same core of the event completion queue scheduling thread.\n+\n+  - 0, default, don't use internal threads for configuration.\n+\n+  - 1 - 256, number of internal threads in addition to the caller thread (8 is suggested).\n+    This value, if not 0, should be the same for all the devices;\n+    the first prob will take it with the event_core for all the multi-thread configurations in the driver.\n+\n - ``hw_latency_mode`` parameter [int]\n \n   The completion queue moderation mode:\ndiff --git a/drivers/vdpa/mlx5/meson.build b/drivers/vdpa/mlx5/meson.build\nindex 0fa82ad257..9d8dbb1a82 100644\n--- a/drivers/vdpa/mlx5/meson.build\n+++ b/drivers/vdpa/mlx5/meson.build\n@@ -15,6 +15,7 @@ sources = files(\n         'mlx5_vdpa_virtq.c',\n         'mlx5_vdpa_steer.c',\n         'mlx5_vdpa_lm.c',\n+        'mlx5_vdpa_cthread.c',\n )\n cflags_options = [\n         '-std=c11',\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex e5a11f72fd..a9d023ed08 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -50,6 +50,8 @@ TAILQ_HEAD(mlx5_vdpa_privs, mlx5_vdpa_priv) priv_list =\n \t\t\t\t\t      TAILQ_HEAD_INITIALIZER(priv_list);\n static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;\n \n+struct mlx5_vdpa_conf_thread_mng conf_thread_mng;\n+\n static void mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv);\n \n static struct mlx5_vdpa_priv *\n@@ -493,6 +495,29 @@ mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)\n \t\t\tDRV_LOG(WARNING, \"Invalid event_core %s.\", val);\n \t\telse\n \t\t\tpriv->event_core = tmp;\n+\t} else if (strcmp(key, \"max_conf_threads\") == 0) {\n+\t\tif (tmp) {\n+\t\t\tpriv->use_c_thread = true;\n+\t\t\tif (!conf_thread_mng.initializer_priv) {\n+\t\t\t\tconf_thread_mng.initializer_priv = priv;\n+\t\t\t\tif (tmp > MLX5_VDPA_MAX_C_THRD) {\n+\t\t\t\t\tDRV_LOG(WARNING,\n+\t\t\t\t\"Invalid max_conf_threads %s \"\n+\t\t\t\t\"and set max_conf_threads to %d\",\n+\t\t\t\tval, MLX5_VDPA_MAX_C_THRD);\n+\t\t\t\t\ttmp = MLX5_VDPA_MAX_C_THRD;\n+\t\t\t\t}\n+\t\t\t\tconf_thread_mng.max_thrds = tmp;\n+\t\t\t} else if (tmp != conf_thread_mng.max_thrds) {\n+\t\t\t\tDRV_LOG(WARNING,\n+\t\"max_conf_threads is PMD argument and not per device, \"\n+\t\"only the first device configuration set it, current value is %d \"\n+\t\"and will not be changed to %d.\",\n+\t\t\t\tconf_thread_mng.max_thrds, (int)tmp);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tpriv->use_c_thread = false;\n+\t\t}\n \t} else if (strcmp(key, \"hw_latency_mode\") == 0) {\n \t\tpriv->hw_latency_mode = (uint32_t)tmp;\n \t} else if (strcmp(key, \"hw_max_latency_us\") == 0) {\n@@ -521,6 +546,9 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n \t\t\"hw_max_latency_us\",\n \t\t\"hw_max_pending_comp\",\n \t\t\"no_traffic_time\",\n+\t\t\"queue_size\",\n+\t\t\"queues\",\n+\t\t\"max_conf_threads\",\n \t\tNULL,\n \t};\n \n@@ -725,6 +753,13 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \tpthread_mutex_init(&priv->steer_update_lock, NULL);\n \tpriv->cdev = cdev;\n \tmlx5_vdpa_config_get(mkvlist, priv);\n+\tif (priv->use_c_thread) {\n+\t\tif (conf_thread_mng.initializer_priv == priv)\n+\t\t\tif (mlx5_vdpa_mult_threads_create(priv->event_core))\n+\t\t\t\tgoto error;\n+\t\t__atomic_fetch_add(&conf_thread_mng.refcnt, 1,\n+\t\t\t__ATOMIC_RELAXED);\n+\t}\n \tif (mlx5_vdpa_create_dev_resources(priv))\n \t\tgoto error;\n \tpriv->vdev = rte_vdpa_register_device(cdev->dev, &mlx5_vdpa_ops);\n@@ -739,6 +774,8 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,\n \tpthread_mutex_unlock(&priv_list_lock);\n \treturn 0;\n error:\n+\tif (conf_thread_mng.initializer_priv == priv)\n+\t\tmlx5_vdpa_mult_threads_destroy(false);\n \tif (priv)\n \t\tmlx5_vdpa_dev_release(priv);\n \treturn -rte_errno;\n@@ -806,6 +843,10 @@ mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)\n \tmlx5_vdpa_release_dev_resources(priv);\n \tif (priv->vdev)\n \t\trte_vdpa_unregister_device(priv->vdev);\n+\tif (priv->use_c_thread)\n+\t\tif (__atomic_fetch_sub(&conf_thread_mng.refcnt,\n+\t\t\t1, __ATOMIC_RELAXED) == 1)\n+\t\t\tmlx5_vdpa_mult_threads_destroy(true);\n \trte_free(priv);\n }\n \ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex 3fd5eefc5e..4e7c2557b7 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -73,6 +73,22 @@ enum {\n \tMLX5_VDPA_NOTIFIER_STATE_ERR\n };\n \n+#define MLX5_VDPA_MAX_C_THRD 256\n+\n+/* Generic mlx5_vdpa_c_thread information. */\n+struct mlx5_vdpa_c_thread {\n+\tpthread_t tid;\n+};\n+\n+struct mlx5_vdpa_conf_thread_mng {\n+\tvoid *initializer_priv;\n+\tuint32_t refcnt;\n+\tuint32_t max_thrds;\n+\tpthread_mutex_t cthrd_lock;\n+\tstruct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];\n+};\n+extern struct mlx5_vdpa_conf_thread_mng conf_thread_mng;\n+\n struct mlx5_vdpa_virtq {\n \tSLIST_ENTRY(mlx5_vdpa_virtq) next;\n \tuint8_t enable;\n@@ -126,6 +142,7 @@ enum mlx5_dev_state {\n struct mlx5_vdpa_priv {\n \tTAILQ_ENTRY(mlx5_vdpa_priv) next;\n \tbool connected;\n+\tbool use_c_thread;\n \tenum mlx5_dev_state state;\n \trte_spinlock_t db_lock;\n \tpthread_mutex_t steer_update_lock;\n@@ -496,4 +513,23 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);\n \n bool\n mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);\n+\n+/**\n+ * Create configuration multi-threads resource\n+ *\n+ * @param[in] cpu_core\n+ *   CPU core number to set configuration threads affinity to.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+int\n+mlx5_vdpa_mult_threads_create(int cpu_core);\n+\n+/**\n+ * Destroy configuration multi-threads resource\n+ *\n+ */\n+void\n+mlx5_vdpa_mult_threads_destroy(bool need_unlock);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nnew file mode 100644\nindex 0000000000..ba7d8b63b3\n--- /dev/null\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -0,0 +1,129 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+#include <string.h>\n+#include <unistd.h>\n+#include <sys/eventfd.h>\n+\n+#include <rte_malloc.h>\n+#include <rte_errno.h>\n+#include <rte_io.h>\n+#include <rte_alarm.h>\n+#include <rte_tailq.h>\n+#include <rte_ring_elem.h>\n+\n+#include <mlx5_common.h>\n+\n+#include \"mlx5_vdpa_utils.h\"\n+#include \"mlx5_vdpa.h\"\n+\n+static void *\n+mlx5_vdpa_c_thread_handle(void *arg)\n+{\n+\t/* To be added later. */\n+\treturn arg;\n+}\n+\n+static void\n+mlx5_vdpa_c_thread_destroy(uint32_t thrd_idx, bool need_unlock)\n+{\n+\tif (conf_thread_mng.cthrd[thrd_idx].tid) {\n+\t\tpthread_cancel(conf_thread_mng.cthrd[thrd_idx].tid);\n+\t\tpthread_join(conf_thread_mng.cthrd[thrd_idx].tid, NULL);\n+\t\tconf_thread_mng.cthrd[thrd_idx].tid = 0;\n+\t\tif (need_unlock)\n+\t\t\tpthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL);\n+\t}\n+}\n+\n+static int\n+mlx5_vdpa_c_thread_create(int cpu_core)\n+{\n+\tconst struct sched_param sp = {\n+\t\t.sched_priority = sched_get_priority_max(SCHED_RR),\n+\t};\n+\trte_cpuset_t cpuset;\n+\tpthread_attr_t attr;\n+\tuint32_t thrd_idx;\n+\tchar name[32];\n+\tint ret;\n+\n+\tpthread_mutex_lock(&conf_thread_mng.cthrd_lock);\n+\tpthread_attr_init(&attr);\n+\tret = pthread_attr_setschedpolicy(&attr, SCHED_RR);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to set thread sched policy = RR.\");\n+\t\tgoto c_thread_err;\n+\t}\n+\tret = pthread_attr_setschedparam(&attr, &sp);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to set thread priority.\");\n+\t\tgoto c_thread_err;\n+\t}\n+\tfor (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;\n+\t\tthrd_idx++) {\n+\t\tret = pthread_create(&conf_thread_mng.cthrd[thrd_idx].tid,\n+\t\t\t\t&attr, mlx5_vdpa_c_thread_handle,\n+\t\t\t\t(void *)&conf_thread_mng);\n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create vdpa multi-threads %d.\",\n+\t\t\t\t\tthrd_idx);\n+\t\t\tgoto c_thread_err;\n+\t\t}\n+\t\tCPU_ZERO(&cpuset);\n+\t\tif (cpu_core != -1)\n+\t\t\tCPU_SET(cpu_core, &cpuset);\n+\t\telse\n+\t\t\tcpuset = rte_lcore_cpuset(rte_get_main_lcore());\n+\t\tret = pthread_setaffinity_np(\n+\t\t\t\tconf_thread_mng.cthrd[thrd_idx].tid,\n+\t\t\t\tsizeof(cpuset), &cpuset);\n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"Failed to set thread affinity for \"\n+\t\t\t\"vdpa multi-threads %d.\", thrd_idx);\n+\t\t\tgoto c_thread_err;\n+\t\t}\n+\t\tsnprintf(name, sizeof(name), \"vDPA-mthread-%d\", thrd_idx);\n+\t\tret = pthread_setname_np(\n+\t\t\t\tconf_thread_mng.cthrd[thrd_idx].tid, name);\n+\t\tif (ret)\n+\t\t\tDRV_LOG(ERR, \"Failed to set vdpa multi-threads name %s.\",\n+\t\t\t\t\tname);\n+\t\telse\n+\t\t\tDRV_LOG(DEBUG, \"Thread name: %s.\", name);\n+\t}\n+\tpthread_mutex_unlock(&conf_thread_mng.cthrd_lock);\n+\treturn 0;\n+c_thread_err:\n+\tfor (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;\n+\t\tthrd_idx++)\n+\t\tmlx5_vdpa_c_thread_destroy(thrd_idx, false);\n+\tpthread_mutex_unlock(&conf_thread_mng.cthrd_lock);\n+\treturn -1;\n+}\n+\n+int\n+mlx5_vdpa_mult_threads_create(int cpu_core)\n+{\n+\tpthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL);\n+\tif (mlx5_vdpa_c_thread_create(cpu_core)) {\n+\t\tDRV_LOG(ERR, \"Cannot create vDPA configuration threads.\");\n+\t\tmlx5_vdpa_mult_threads_destroy(false);\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+void\n+mlx5_vdpa_mult_threads_destroy(bool need_unlock)\n+{\n+\tuint32_t thrd_idx;\n+\n+\tif (!conf_thread_mng.initializer_priv)\n+\t\treturn;\n+\tfor (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;\n+\t\tthrd_idx++)\n+\t\tmlx5_vdpa_c_thread_destroy(thrd_idx, need_unlock);\n+\tpthread_mutex_destroy(&conf_thread_mng.cthrd_lock);\n+\tmemset(&conf_thread_mng, 0, sizeof(struct mlx5_vdpa_conf_thread_mng));\n+}\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex 2b0f5936d1..b45fbac146 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -507,7 +507,7 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)\n \tpthread_attr_t attr;\n \tchar name[16];\n \tconst struct sched_param sp = {\n-\t\t.sched_priority = sched_get_priority_max(SCHED_RR),\n+\t\t.sched_priority = sched_get_priority_max(SCHED_RR) - 1,\n \t};\n \n \tif (!priv->eventc)\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 138b7bdbc5..599809b09b 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -43,7 +43,7 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)\n \t\t\t    errno == EWOULDBLOCK ||\n \t\t\t    errno == EAGAIN)\n \t\t\t\tcontinue;\n-\t\t\tDRV_LOG(ERR,  \"Failed to read kickfd of virtq %d: %s\",\n+\t\t\tDRV_LOG(ERR,  \"Failed to read kickfd of virtq %d: %s.\",\n \t\t\t\tvirtq->index, strerror(errno));\n \t\t}\n \t\tbreak;\n@@ -57,7 +57,7 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)\n \trte_spinlock_unlock(&priv->db_lock);\n \tpthread_mutex_unlock(&virtq->virtq_lock);\n \tif (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {\n-\t\tDRV_LOG(ERR,  \"device %d queue %d down, skip kick handling\",\n+\t\tDRV_LOG(ERR,  \"device %d queue %d down, skip kick handling.\",\n \t\t\tpriv->vid, virtq->index);\n \t\treturn;\n \t}\n@@ -218,7 +218,7 @@ mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)\n \t\treturn -1;\n \t}\n \tif (attr.state == MLX5_VIRTQ_STATE_ERROR)\n-\t\tDRV_LOG(WARNING, \"vid %d vring %d hw error=%hhu\",\n+\t\tDRV_LOG(WARNING, \"vid %d vring %d hw error=%hhu.\",\n \t\t\tpriv->vid, index, attr.error_type);\n \treturn 0;\n }\n@@ -380,7 +380,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \tif (ret) {\n \t\tlast_avail_idx = 0;\n \t\tlast_used_idx = 0;\n-\t\tDRV_LOG(WARNING, \"Couldn't get vring base, idx are set to 0\");\n+\t\tDRV_LOG(WARNING, \"Couldn't get vring base, idx are set to 0.\");\n \t} else {\n \t\tDRV_LOG(INFO, \"vid %d: Init last_avail_idx=%d, last_used_idx=%d for \"\n \t\t\t\t\"virtq %d.\", priv->vid, last_avail_idx,\n",
    "prefixes": [
        "v1",
        "10/17"
    ]
}