get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113048/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113048,
    "url": "http://patches.dpdk.org/api/patches/113048/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-10-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-10-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-10-lizh@nvidia.com",
    "date": "2022-06-18T08:47:59",
    "name": "[v3,09/15] vdpa/mlx5: add task ring for MT management",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "af012d0f857b48b6bf067fb996191a2a6cae0b90",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-10-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113048/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113048/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9D391A0032;\n\tSat, 18 Jun 2022 10:49:17 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C0E7942905;\n\tSat, 18 Jun 2022 10:48:56 +0200 (CEST)",
            "from NAM04-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam04on2063.outbound.protection.outlook.com [40.107.102.63])\n by mails.dpdk.org (Postfix) with ESMTP id 6978A42802\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:48:53 +0200 (CEST)",
            "from BN6PR13CA0037.namprd13.prod.outlook.com (2603:10b6:404:13e::23)\n by SJ1PR12MB6050.namprd12.prod.outlook.com (2603:10b6:a03:48b::17)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.15; Sat, 18 Jun\n 2022 08:48:51 +0000",
            "from BN8NAM11FT005.eop-nam11.prod.protection.outlook.com\n (2603:10b6:404:13e:cafe::8e) by BN6PR13CA0037.outlook.office365.com\n (2603:10b6:404:13e::23) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5332.9 via Frontend\n Transport; Sat, 18 Jun 2022 08:48:51 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n BN8NAM11FT005.mail.protection.outlook.com (10.13.176.69) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:48:50 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:48:50 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:48:47 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=mogJAxdSAKXC8vcgj+nRIdOs01p5M6EzocCYpWC8RJM7ls1NbuBndYgqHkF+5hXHvSV3s51UlRLInfy0j/9EzLssORXWy6gEAC3SGfV+C6sbyeFdLgk7PCIEAAFxih0lSVCwSufHxX15KeASEKMdB0Ni4DAnKmyMqwuShJPC5cYa5HK65jBvtdQGsvWOdEhabfrWUNgT5zxZZjz167d816vCU5VWJFiNo5XqQieKJIKonocgn3b9gj+lw5EmYVw+f77yeSDYT37Fk0llAJv7DTXYVFTuZ7gVnBUmNwR7TjE6y9lxE4z4//lU/TBh9bMkydBOWOr5OLK5E0uGJj5HNw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=YPu1wYmMGMNONwSklESS2qkn2Pbt4H+YVTTowcpQjjY=;\n b=FFFmHlluGqPBPtGpO2BYomAsODdo/pkUhpnIbq8KZcBgxLDK9g+gXNf7UaTEIlplY2YrAPXaxorfqYv/GPP3nnTIgMLxY3MHnIP7o18GWE7NXty2SFV0qFgDsxRvBLUf0CGwnLagKBzlnFjCq80Uu3FetVvNBcpzwA5ZaPUBcRCp86JDAg95S9A4OLXV4EPb0ZetKIfUEjLQO2SevDpfS1+Q3ETewrEwS3qk7aF71Uj2E3JfLqmnjte8Mms6cxUb/lcp8/MUzbCRNLKojoK0JCSlRq6YOw3didRA3MlzKHo3h25yHKyB52Ir/PWLu1dy8q+7WkivDwvinoG/D4VK3w==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=YPu1wYmMGMNONwSklESS2qkn2Pbt4H+YVTTowcpQjjY=;\n b=Ikaipvcjyu2KDOtXj9x9YM75OzZrtzuqzYfXbo5DGe1xLFk8+og2uGJVkPwvdC+VDsmDgklzkTXtp6Z1Cxz9DcgeaLR/d+Xm+QDpYm0v/OMxpJSsoj0jHYYyVXAmJttJQeWYdNFWhDI3dhWjcIoQr9DZ/Xp7MdUhZKBmSA7YTx0Op9FbFfwx1KKDIoed9PCdmoaMDf8HLRNW0pTQ57Opi9F36OndMLUQ60cTBbpMUQGTCA6UL4NXEug9KmUkzxdWDwQ74xhlIg2udx6U2mbT2homK7GN4gKZJqvGFCMfpnNlUctIfxCddgC5eav/7YaEaAH2BMyVxq7IbWOLqVpWXg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH v3 09/15] vdpa/mlx5: add task ring for MT management",
        "Date": "Sat, 18 Jun 2022 11:47:59 +0300",
        "Message-ID": "<20220618084805.87315-10-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "4453ab3f-fc23-45f9-df0f-08da51075dfb",
        "X-MS-TrafficTypeDiagnostic": "SJ1PR12MB6050:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <SJ1PR12MB6050AB1CD7EB79F2E3419953BFAE9@SJ1PR12MB6050.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n tLRB6w5gCuHBwGuyyQHL+CzdPltIyJQK+hvuujFy4wFsjjjcyE6u9XFolj54Wb3ATc1LGzdqjS64dv+h6UjH6AQm0M7f7+SQh8PahmJaKeG+hpPx7naEG+S15vjU5XTpFI2GgdpyGPJLp7oC4rYbXjh7qYgm+LqrVzoJaEblg2UvMh3CLyIHr4mdJaITokH9SRW9vnPYZs1E1LBhWy/EpPkbuGKR+ZJphh01NwVlhpxwvtGgt7SMKmIqMynXJ+XFQsjiiSkESQNa62gJ+mvlomvK19eoSAu1xjYXZWFQgwvR9o+Jel8pkRL6GkF9CV+vGpz1g8ZWb7URnUBDSwI0iG6fb/nl92Wg/Pc7CszYfN8nP9juRUu3Kk+zTpeiDfG0hECankVFoCCzzSVsPize5dzmBj5Rgl0UAffUiu0JSnGIr/HaRhOsQ99+lpY+yqKhE/8fgYaW9pUwcBRx3e7hiFsAauUI7UuaPRH4InikXI8+gJALSsOmfFuViUc1kYvRbznz9hthbPgAkOWqrG5vOQ9rOZK7Bks07WNyEe+xjrSLZvrL3MPjqiF1gN2qqQMfTU0yQ2FPirc6Flwtp9GOV6yJXndwvuVe0XB8Pasgqiyl5YY0I8C34UGjINpMkxk5chw8CLFx/tc1w10ODZJDiVju6HgNhTazBtZ/kb2pSpk7CuSvfuOP9BhsDC5bjnThdDWqQEx7TAonkitU1swQaw==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(46966006)(40470700004)(36840700001)(82310400005)(186003)(70206006)(2906002)(426003)(5660300002)(7696005)(83380400001)(110136005)(1076003)(16526019)(336012)(47076005)(36756003)(55016003)(70586007)(8676002)(40460700003)(6286002)(2616005)(54906003)(316002)(6666004)(4326008)(8936002)(356005)(36860700001)(6636002)(26005)(81166007)(86362001)(498600001)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:48:50.7880 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 4453ab3f-fc23-45f9-df0f-08da51075dfb",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT005.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SJ1PR12MB6050",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The configuration threads tasks need a container to\nsupport multiple tasks assigned to a thread in parallel.\nUse rte_ring container per thread to manage\nthe thread tasks without locks.\nThe caller thread from the user context opens a task to\na thread and enqueue it to the thread ring.\nThe thread polls its ring and dequeue tasks.\nThat’s why the ring should be in multi-producer\nand single consumer mode.\nAnatomic counter manages the tasks completion notification.\nThe threads report errors to the caller by\na dedicated error counter per task.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.h         |  17 ++++\n drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 115 +++++++++++++++++++++++++-\n 2 files changed, 130 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex 4e7c2557b7..2bbb868ec6 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -74,10 +74,22 @@ enum {\n };\n \n #define MLX5_VDPA_MAX_C_THRD 256\n+#define MLX5_VDPA_MAX_TASKS_PER_THRD 4096\n+#define MLX5_VDPA_TASKS_PER_DEV 64\n+\n+/* Generic task information and size must be multiple of 4B. */\n+struct mlx5_vdpa_task {\n+\tstruct mlx5_vdpa_priv *priv;\n+\tuint32_t *remaining_cnt;\n+\tuint32_t *err_cnt;\n+\tuint32_t idx;\n+} __rte_packed __rte_aligned(4);\n \n /* Generic mlx5_vdpa_c_thread information. */\n struct mlx5_vdpa_c_thread {\n \tpthread_t tid;\n+\tstruct rte_ring *rng;\n+\tpthread_cond_t c_cond;\n };\n \n struct mlx5_vdpa_conf_thread_mng {\n@@ -532,4 +544,9 @@ mlx5_vdpa_mult_threads_create(int cpu_core);\n  */\n void\n mlx5_vdpa_mult_threads_destroy(bool need_unlock);\n+\n+bool\n+mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,\n+\t\tuint32_t thrd_idx,\n+\t\tuint32_t num);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\nindex ba7d8b63b3..1fdc92d3ad 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c\n@@ -11,17 +11,103 @@\n #include <rte_alarm.h>\n #include <rte_tailq.h>\n #include <rte_ring_elem.h>\n+#include <rte_ring_peek.h>\n \n #include <mlx5_common.h>\n \n #include \"mlx5_vdpa_utils.h\"\n #include \"mlx5_vdpa.h\"\n \n+static inline uint32_t\n+mlx5_vdpa_c_thrd_ring_dequeue_bulk(struct rte_ring *r,\n+\tvoid **obj, uint32_t n, uint32_t *avail)\n+{\n+\tuint32_t m;\n+\n+\tm = rte_ring_dequeue_bulk_elem_start(r, obj,\n+\t\tsizeof(struct mlx5_vdpa_task), n, avail);\n+\tn = (m == n) ? n : 0;\n+\trte_ring_dequeue_elem_finish(r, n);\n+\treturn n;\n+}\n+\n+static inline uint32_t\n+mlx5_vdpa_c_thrd_ring_enqueue_bulk(struct rte_ring *r,\n+\tvoid * const *obj, uint32_t n, uint32_t *free)\n+{\n+\tuint32_t m;\n+\n+\tm = rte_ring_enqueue_bulk_elem_start(r, n, free);\n+\tn = (m == n) ? n : 0;\n+\trte_ring_enqueue_elem_finish(r, obj,\n+\t\tsizeof(struct mlx5_vdpa_task), n);\n+\treturn n;\n+}\n+\n+bool\n+mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,\n+\t\tuint32_t thrd_idx,\n+\t\tuint32_t num)\n+{\n+\tstruct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;\n+\tstruct mlx5_vdpa_task task[MLX5_VDPA_TASKS_PER_DEV];\n+\tuint32_t i;\n+\n+\tMLX5_ASSERT(num <= MLX5_VDPA_TASKS_PER_DEV);\n+\tfor (i = 0 ; i < num; i++) {\n+\t\ttask[i].priv = priv;\n+\t\t/* To be added later. */\n+\t}\n+\tif (!mlx5_vdpa_c_thrd_ring_enqueue_bulk(rng, (void **)&task, num, NULL))\n+\t\treturn -1;\n+\tfor (i = 0 ; i < num; i++)\n+\t\tif (task[i].remaining_cnt)\n+\t\t\t__atomic_fetch_add(task[i].remaining_cnt, 1,\n+\t\t\t\t__ATOMIC_RELAXED);\n+\t/* wake up conf thread. */\n+\tpthread_mutex_lock(&conf_thread_mng.cthrd_lock);\n+\tpthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);\n+\tpthread_mutex_unlock(&conf_thread_mng.cthrd_lock);\n+\treturn 0;\n+}\n+\n static void *\n mlx5_vdpa_c_thread_handle(void *arg)\n {\n-\t/* To be added later. */\n-\treturn arg;\n+\tstruct mlx5_vdpa_conf_thread_mng *multhrd = arg;\n+\tpthread_t thread_id = pthread_self();\n+\tstruct mlx5_vdpa_priv *priv;\n+\tstruct mlx5_vdpa_task task;\n+\tstruct rte_ring *rng;\n+\tuint32_t thrd_idx;\n+\tuint32_t task_num;\n+\n+\tfor (thrd_idx = 0; thrd_idx < multhrd->max_thrds;\n+\t\tthrd_idx++)\n+\t\tif (multhrd->cthrd[thrd_idx].tid == thread_id)\n+\t\t\tbreak;\n+\tif (thrd_idx >= multhrd->max_thrds)\n+\t\treturn NULL;\n+\trng = multhrd->cthrd[thrd_idx].rng;\n+\twhile (1) {\n+\t\ttask_num = mlx5_vdpa_c_thrd_ring_dequeue_bulk(rng,\n+\t\t\t(void **)&task, 1, NULL);\n+\t\tif (!task_num) {\n+\t\t\t/* No task and condition wait. */\n+\t\t\tpthread_mutex_lock(&multhrd->cthrd_lock);\n+\t\t\tpthread_cond_wait(\n+\t\t\t\t&multhrd->cthrd[thrd_idx].c_cond,\n+\t\t\t\t&multhrd->cthrd_lock);\n+\t\t\tpthread_mutex_unlock(&multhrd->cthrd_lock);\n+\t\t}\n+\t\tpriv = task.priv;\n+\t\tif (priv == NULL)\n+\t\t\tcontinue;\n+\t\t__atomic_fetch_sub(task.remaining_cnt,\n+\t\t\t1, __ATOMIC_RELAXED);\n+\t\t/* To be added later. */\n+\t}\n+\treturn NULL;\n }\n \n static void\n@@ -34,6 +120,10 @@ mlx5_vdpa_c_thread_destroy(uint32_t thrd_idx, bool need_unlock)\n \t\tif (need_unlock)\n \t\t\tpthread_mutex_init(&conf_thread_mng.cthrd_lock, NULL);\n \t}\n+\tif (conf_thread_mng.cthrd[thrd_idx].rng) {\n+\t\trte_ring_free(conf_thread_mng.cthrd[thrd_idx].rng);\n+\t\tconf_thread_mng.cthrd[thrd_idx].rng = NULL;\n+\t}\n }\n \n static int\n@@ -45,6 +135,7 @@ mlx5_vdpa_c_thread_create(int cpu_core)\n \trte_cpuset_t cpuset;\n \tpthread_attr_t attr;\n \tuint32_t thrd_idx;\n+\tuint32_t ring_num;\n \tchar name[32];\n \tint ret;\n \n@@ -60,8 +151,26 @@ mlx5_vdpa_c_thread_create(int cpu_core)\n \t\tDRV_LOG(ERR, \"Failed to set thread priority.\");\n \t\tgoto c_thread_err;\n \t}\n+\tring_num = MLX5_VDPA_MAX_TASKS_PER_THRD / conf_thread_mng.max_thrds;\n+\tif (!ring_num) {\n+\t\tDRV_LOG(ERR, \"Invalid ring number for thread.\");\n+\t\tgoto c_thread_err;\n+\t}\n \tfor (thrd_idx = 0; thrd_idx < conf_thread_mng.max_thrds;\n \t\tthrd_idx++) {\n+\t\tsnprintf(name, sizeof(name), \"vDPA-mthread-ring-%d\",\n+\t\t\tthrd_idx);\n+\t\tconf_thread_mng.cthrd[thrd_idx].rng = rte_ring_create_elem(name,\n+\t\t\tsizeof(struct mlx5_vdpa_task), ring_num,\n+\t\t\trte_socket_id(),\n+\t\t\tRING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ |\n+\t\t\tRING_F_EXACT_SZ);\n+\t\tif (!conf_thread_mng.cthrd[thrd_idx].rng) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\"Failed to create vdpa multi-threads %d ring.\",\n+\t\t\tthrd_idx);\n+\t\t\tgoto c_thread_err;\n+\t\t}\n \t\tret = pthread_create(&conf_thread_mng.cthrd[thrd_idx].tid,\n \t\t\t\t&attr, mlx5_vdpa_c_thread_handle,\n \t\t\t\t(void *)&conf_thread_mng);\n@@ -91,6 +200,8 @@ mlx5_vdpa_c_thread_create(int cpu_core)\n \t\t\t\t\tname);\n \t\telse\n \t\t\tDRV_LOG(DEBUG, \"Thread name: %s.\", name);\n+\t\tpthread_cond_init(&conf_thread_mng.cthrd[thrd_idx].c_cond,\n+\t\t\tNULL);\n \t}\n \tpthread_mutex_unlock(&conf_thread_mng.cthrd_lock);\n \treturn 0;\n",
    "prefixes": [
        "v3",
        "09/15"
    ]
}