get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113047/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113047,
    "url": "http://patches.dpdk.org/api/patches/113047/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-7-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-7-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-7-lizh@nvidia.com",
    "date": "2022-06-18T08:47:56",
    "name": "[v3,06/15] vdpa/mlx5: pre-create virtq at probe time",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a24a52b9149bfe4457f23503e915355c46b68013",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-7-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113047/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113047/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id EE066A0032;\n\tSat, 18 Jun 2022 10:49:10 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id B57A142847;\n\tSat, 18 Jun 2022 10:48:53 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2045.outbound.protection.outlook.com [40.107.220.45])\n by mails.dpdk.org (Postfix) with ESMTP id 2683E42802\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:48:52 +0200 (CEST)",
            "from BN6PR2001CA0010.namprd20.prod.outlook.com\n (2603:10b6:404:b4::20) by CO6PR12MB5475.namprd12.prod.outlook.com\n (2603:10b6:5:354::7) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.13; Sat, 18 Jun\n 2022 08:48:43 +0000",
            "from BN8NAM11FT066.eop-nam11.prod.protection.outlook.com\n (2603:10b6:404:b4:cafe::e1) by BN6PR2001CA0010.outlook.office365.com\n (2603:10b6:404:b4::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.16 via Frontend\n Transport; Sat, 18 Jun 2022 08:48:43 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n BN8NAM11FT066.mail.protection.outlook.com (10.13.177.138) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:48:42 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:48:41 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:48:38 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=afik/F/8MYZeBG7ISGJA7P4vDVXU7WdbCo7JRpd4mz4vu+7FAXv6lbf5QZFa9X4doDtR4WhCJ8pG9pUrZP4jZbNIhfs0ulFZ2fTjShpo+KzvqO9/DsmaHsSTkiHow5TOhfuB8tqNVwIzBgRQ1xqPd18c7Wj33u7DE8vgbcaACoNu8NnILGAv9rMYtQ7Owpyh2XDgyEDxlVwp0OxOtOJ5fh4C9ugM6b50KWq1JbsfeZpRC9gVnr+zUXmgmmxq6023LbzxbkUVeEz8a7zaRTX90Y//N4gs4/oucWe1T+R3mwwuisJtXa3HTTAZRXXiRh7d6DqMu21SPjgX7XAxIOMf4A==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=4UCgtUYYYSAazyjejbn9a1cuUy6Faq+X456aEYCkhX0=;\n b=hmkx0mi+g70XuSEpWboZJOLTuQs2tmCnkyY78IP2Lba7Gq2WiIpfZL5KpcwoVFoRyYcYj+lGQZCnLoqGcWsaib1Ch10RAs6oYnGnBeu0x4velM0b84T70b1RIN4yi4Zw4h2Ic6/78eYOISYxaqDRYJj2to8Ce8dc64V6g0O97uAU5eXhY4EVhBf6+vCnLxiQ250OoD2z2NyWDsb//49U+rdZz4T6XFGo/3sw5FsoutIbZaloHI/RY7pGVilrSdH20IJRYbbm5kR2q1Fdx1YGllcdJWC4fd0IVJ6TW5yhDKSucxvXN9GvUoIBJ3FtABSwtnw0g1mKJ9NZKz27DmfgXQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4UCgtUYYYSAazyjejbn9a1cuUy6Faq+X456aEYCkhX0=;\n b=Bz3KoJmIO1fUP82spUs+thIA7NRnokkCfHINc4Fy5HcoBaiczW5SR94/Ar4y0Uw6S1+LfPsBUF7Re80bKCYlQm8pyk2u//+Ynx+8bMp5R6HAsD/cclJB9FtJAkZs0RXwvFy0Rwk4M5NIvxym7mtAXU/KJVnkNWsMAGKxYP4o/1npmEt+7Vorr9CZ4A/Yvzmlm9K//hYla/EFuGPyJVrq+kLXjfVmGXEhuE5aTrOcO1W3ztYuox0qlPIbaMG/H5ILdSY9lCkARIq797e0SkcUkG5BkcWRvWhUT+cZRb4B3TC5cbdj3fJbUroSQwBTKmKybqtczaZq8qWU7WUilWHTIw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>",
        "Subject": "[PATCH v3 06/15] vdpa/mlx5: pre-create virtq at probe time",
        "Date": "Sat, 18 Jun 2022 11:47:56 +0300",
        "Message-ID": "<20220618084805.87315-7-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "c516fc59-6b0c-4309-c6a2-08da51075946",
        "X-MS-TrafficTypeDiagnostic": "CO6PR12MB5475:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <CO6PR12MB547561BD274C17ED94749B64BFAE9@CO6PR12MB5475.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 5RQkF7K6CxAFDbYsIqJidRLnQzgJfsiFWuIi7K168wABMzeE+isL+Sd4axK7ez0kbBbzoy6fKfRXCF0SzC6pYBEr5k2iBwTiaXfaegCUondqf5zPdQANrIrvK4TkOdegFyEEuYy7rMcIZmQk+5ej5NSNNskjVByc+QT9MTXtTA2KEd2HaKyWdFqJ/l2JV9pA0NrMJhsZUht/1RvfaupGLRxIKA/psgQLewoBhmV7DOaODyuf19/tAqc5OHXnV+lsDM9SwSn1h67kPN2dKymZIyoHeXBaiR3X7iSn/0iK3nfhquYW1jtQVKd7sDF6IdPyz+IGds5SwVDHh477yn7HZcIil1SH8E4Ahl4FLNIcO/fcIhBefDAE5rsKbO3ZEWVz6F88bXd6IbTbnfzV1luOXhZCLcKQcIGVh9t8gs6UAZnekE54WRVe/50QyHawXJn4kImjAwTgqD9vhmLReQIfFx+doZrDfqVhoIwyGPgZPgIB+Ot4EJ54ZRrcQvZMorOW+VIbP3CnuYIWjtvxq6604/8QYf7YjB6ZKw/VR5wWkIdcRP/bqiCdJ4a/QSR2NcsVwLOtkdglIIjsdIkCPUgOwICpvXQ3Q1oJ/lhXKbZXvsaIqxfpNmI0dfZo8Nd4SuUYb/rw0S3/ekxceIaic4nObNB69NHVAs/oU4lpEgc577n40XDkxqHFYMCv6IZerpUdEQBK5T8JolhOi89x5b9m/A==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(36840700001)(46966006)(40470700004)(5660300002)(55016003)(336012)(30864003)(426003)(1076003)(81166007)(36860700001)(498600001)(8936002)(356005)(6286002)(26005)(40460700003)(83380400001)(2616005)(2906002)(6666004)(47076005)(86362001)(7696005)(186003)(8676002)(70586007)(70206006)(110136005)(16526019)(54906003)(6636002)(36756003)(316002)(4326008)(82310400005)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:48:42.8113 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n c516fc59-6b0c-4309-c6a2-08da51075946",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT066.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CO6PR12MB5475",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "dev_config operation is called in LM progress.\nLM time is very critical because all\nthe VM packets are dropped directly at that time.\n\nMove the virtq creation to probe time and\nonly modify the configuration later in\nthe dev_config stage using the new ability\nto modify virtq.\n\nThis optimization accelerates the LM process and\nreduces its time by 70%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n doc/guides/rel_notes/release_22_07.rst |   4 +\n drivers/vdpa/mlx5/mlx5_vdpa.h          |   4 +\n drivers/vdpa/mlx5/mlx5_vdpa_lm.c       |  19 +-\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c    | 257 +++++++++++++++----------\n 4 files changed, 176 insertions(+), 108 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst\nindex f2cf41def9..2056cd9ee7 100644\n--- a/doc/guides/rel_notes/release_22_07.rst\n+++ b/doc/guides/rel_notes/release_22_07.rst\n@@ -175,6 +175,10 @@ New Features\n   This is a fall-back implementation for platforms that\n   don't support vector operations.\n \n+* **Updated Nvidia mlx5 vDPA driver.**\n+\n+  * Added new devargs ``queue_size`` and ``queues`` to allow prior creation of virtq resources.\n+\n \n Removed Items\n -------------\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex bf82026e37..e5553079fe 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -80,6 +80,7 @@ struct mlx5_vdpa_virtq {\n \tuint16_t vq_size;\n \tuint8_t notifier_state;\n \tbool stopped;\n+\tuint32_t configured:1;\n \tuint32_t version;\n \tstruct mlx5_vdpa_priv *priv;\n \tstruct mlx5_devx_obj *virtq;\n@@ -489,4 +490,7 @@ mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);\n  */\n void\n mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);\n+\n+bool\n+mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\nindex 43a2b98255..284758ad56 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c\n@@ -12,20 +12,21 @@ int\n mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)\n {\n \tstruct mlx5_devx_virtq_attr attr = {\n-\t\t.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,\n+\t\t.mod_fields_bitmap =\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,\n \t\t.dirty_bitmap_dump_enable = enable,\n \t};\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tint i;\n \n \tfor (i = 0; i < priv->nr_virtqs; ++i) {\n \t\tattr.queue_index = i;\n-\t\tif (!priv->virtqs[i].virtq) {\n-\t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for dirty bitmap \"\n-\t\t\t\t\"enabling.\", i);\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tif (!virtq->configured) {\n+\t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for dirty bitmap enabling.\", i);\n \t\t} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,\n \t\t\t   &attr)) {\n-\t\t\tDRV_LOG(ERR, \"Failed to modify virtq %d for dirty \"\n-\t\t\t\t\"bitmap enabling.\", i);\n+\t\t\tDRV_LOG(ERR, \"Failed to modify virtq %d for dirty bitmap enabling.\", i);\n \t\t\treturn -1;\n \t\t}\n \t}\n@@ -37,10 +38,11 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \t\t\t   uint64_t log_size)\n {\n \tstruct mlx5_devx_virtq_attr attr = {\n-\t\t.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,\n+\t\t.mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,\n \t\t.dirty_bitmap_addr = log_base,\n \t\t.dirty_bitmap_size = log_size,\n \t};\n+\tstruct mlx5_vdpa_virtq *virtq;\n \tint i;\n \tint ret = mlx5_os_wrapped_mkey_create(priv->cdev->ctx, priv->cdev->pd,\n \t\t\t\t\t      priv->cdev->pdn,\n@@ -54,7 +56,8 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,\n \tattr.dirty_bitmap_mkey = priv->lm_mr.lkey;\n \tfor (i = 0; i < priv->nr_virtqs; ++i) {\n \t\tattr.queue_index = i;\n-\t\tif (!priv->virtqs[i].virtq) {\n+\t\tvirtq = &priv->virtqs[i];\n+\t\tif (!virtq->configured) {\n \t\t\tDRV_LOG(DEBUG, \"virtq %d is invalid for LM.\", i);\n \t\t} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,\n \t\t\t\t\t\t      &attr)) {\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 6637ba1503..6e08d619e4 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -75,6 +75,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];\n \n+\t\tvirtq->configured = 0;\n \t\tfor (j = 0; j < RTE_DIM(virtq->umems); ++j) {\n \t\t\tif (virtq->umems[j].obj) {\n \t\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n@@ -111,11 +112,12 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\trte_intr_fd_set(virtq->intr_handle, -1);\n \t}\n \trte_intr_instance_free(virtq->intr_handle);\n-\tif (virtq->virtq) {\n+\tif (virtq->configured) {\n \t\tret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);\n \t\tif (ret)\n \t\t\tDRV_LOG(WARNING, \"Failed to stop virtq %d.\",\n \t\t\t\tvirtq->index);\n+\t\tvirtq->configured = 0;\n \t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n \t}\n \tvirtq->virtq = NULL;\n@@ -138,7 +140,7 @@ int\n mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)\n {\n \tstruct mlx5_devx_virtq_attr attr = {\n-\t\t\t.type = MLX5_VIRTQ_MODIFY_TYPE_STATE,\n+\t\t\t.mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE,\n \t\t\t.state = state ? MLX5_VIRTQ_STATE_RDY :\n \t\t\t\t\t MLX5_VIRTQ_STATE_SUSPEND,\n \t\t\t.queue_index = virtq->index,\n@@ -153,7 +155,7 @@ mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)\n \tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n \tint ret;\n \n-\tif (virtq->stopped)\n+\tif (virtq->stopped || !virtq->configured)\n \t\treturn 0;\n \tret = mlx5_vdpa_virtq_modify(virtq, 0);\n \tif (ret)\n@@ -209,51 +211,54 @@ mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)\n }\n \n static int\n-mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n+mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n+\t\tstruct mlx5_devx_virtq_attr *attr,\n+\t\tstruct rte_vhost_vring *vq, int index)\n {\n \tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n-\tstruct rte_vhost_vring vq;\n-\tstruct mlx5_devx_virtq_attr attr = {0};\n \tuint64_t gpa;\n \tint ret;\n \tunsigned int i;\n-\tuint16_t last_avail_idx;\n-\tuint16_t last_used_idx;\n-\tuint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;\n-\tuint64_t cookie;\n-\n-\tret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);\n-\tif (ret)\n-\t\treturn -1;\n-\tif (vq.size == 0)\n-\t\treturn 0;\n-\tvirtq->index = index;\n-\tvirtq->vq_size = vq.size;\n-\tattr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));\n-\tattr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));\n-\tattr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));\n-\tattr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));\n-\tattr.virtio_version_1_0 = !!(priv->features & (1ULL <<\n-\t\t\t\t\t\t\tVIRTIO_F_VERSION_1));\n-\tattr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?\n+\tuint16_t last_avail_idx = 0;\n+\tuint16_t last_used_idx = 0;\n+\n+\tif (virtq->virtq)\n+\t\tattr->mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_ADDR |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_HW_AVAILABLE_INDEX |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_HW_USED_INDEX |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_VERSION_1_0 |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_Q_TYPE |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |\n+\t\t\tMLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;\n+\tattr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));\n+\tattr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));\n+\tattr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));\n+\tattr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));\n+\tattr->virtio_version_1_0 =\n+\t\t!!(priv->features & (1ULL << VIRTIO_F_VERSION_1));\n+\tattr->q_type =\n+\t\t(priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?\n \t\t\tMLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;\n \t/*\n \t * No need event QPs creation when the guest in poll mode or when the\n \t * capability allows it.\n \t */\n-\tattr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<\n-\t\t\t\t\t       MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?\n-\t\t\t\t\t\t      MLX5_VIRTQ_EVENT_MODE_QP :\n-\t\t\t\t\t\t  MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n-\tif (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n-\t\tret = mlx5_vdpa_event_qp_prepare(priv, vq.size, vq.callfd,\n-\t\t\t\t\t\t&virtq->eqp);\n+\tattr->event_mode = vq->callfd != -1 ||\n+\t!(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?\n+\tMLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n+\tif (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n+\t\tret = mlx5_vdpa_event_qp_prepare(priv,\n+\t\t\t\tvq->size, vq->callfd, &virtq->eqp);\n \t\tif (ret) {\n-\t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to create event QPs for virtq %d.\",\n \t\t\t\tindex);\n \t\t\treturn -1;\n \t\t}\n-\t\tattr.qp_id = virtq->eqp.fw_qp->id;\n+\t\tattr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;\n+\t\tattr->qp_id = virtq->eqp.fw_qp->id;\n \t} else {\n \t\tDRV_LOG(INFO, \"Virtq %d is, for sure, working by poll mode, no\"\n \t\t\t\" need event QPs and event mechanism.\", index);\n@@ -265,77 +270,82 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\tif (!virtq->counters) {\n \t\t\tDRV_LOG(ERR, \"Failed to create virtq couners for virtq\"\n \t\t\t\t\" %d.\", index);\n-\t\t\tgoto error;\n+\t\t\treturn -1;\n \t\t}\n-\t\tattr.counters_obj_id = virtq->counters->id;\n+\t\tattr->counters_obj_id = virtq->counters->id;\n \t}\n \t/* Setup 3 UMEMs for each virtq. */\n-\tfor (i = 0; i < RTE_DIM(virtq->umems); ++i) {\n-\t\tuint32_t size;\n-\t\tvoid *buf;\n-\t\tstruct mlx5dv_devx_umem *obj;\n-\n-\t\tsize = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b;\n-\t\tif (virtq->umems[i].size == size &&\n-\t\t    virtq->umems[i].obj != NULL) {\n-\t\t\t/* Reuse registered memory. */\n-\t\t\tmemset(virtq->umems[i].buf, 0, size);\n-\t\t\tgoto reuse;\n-\t\t}\n-\t\tif (virtq->umems[i].obj)\n-\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n+\tif (virtq->virtq) {\n+\t\tfor (i = 0; i < RTE_DIM(virtq->umems); ++i) {\n+\t\t\tuint32_t size;\n+\t\t\tvoid *buf;\n+\t\t\tstruct mlx5dv_devx_umem *obj;\n+\n+\t\t\tsize =\n+\t\tpriv->caps.umems[i].a * vq->size + priv->caps.umems[i].b;\n+\t\t\tif (virtq->umems[i].size == size &&\n+\t\t\t\tvirtq->umems[i].obj != NULL) {\n+\t\t\t\t/* Reuse registered memory. */\n+\t\t\t\tmemset(virtq->umems[i].buf, 0, size);\n+\t\t\t\tgoto reuse;\n+\t\t\t}\n+\t\t\tif (virtq->umems[i].obj)\n+\t\t\t\tclaim_zero(mlx5_glue->devx_umem_dereg\n \t\t\t\t   (virtq->umems[i].obj));\n-\t\tif (virtq->umems[i].buf)\n-\t\t\trte_free(virtq->umems[i].buf);\n-\t\tvirtq->umems[i].size = 0;\n-\t\tvirtq->umems[i].obj = NULL;\n-\t\tvirtq->umems[i].buf = NULL;\n-\t\tbuf = rte_zmalloc(__func__, size, 4096);\n-\t\tif (buf == NULL) {\n-\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq\"\n+\t\t\tif (virtq->umems[i].buf)\n+\t\t\t\trte_free(virtq->umems[i].buf);\n+\t\t\tvirtq->umems[i].size = 0;\n+\t\t\tvirtq->umems[i].obj = NULL;\n+\t\t\tvirtq->umems[i].buf = NULL;\n+\t\t\tbuf = rte_zmalloc(__func__,\n+\t\t\t\tsize, 4096);\n+\t\t\tif (buf == NULL) {\n+\t\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq\"\n \t\t\t\t\" %u.\", i, index);\n-\t\t\tgoto error;\n-\t\t}\n-\t\tobj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf, size,\n-\t\t\t\t\t       IBV_ACCESS_LOCAL_WRITE);\n-\t\tif (obj == NULL) {\n-\t\t\tDRV_LOG(ERR, \"Failed to register umem %d for virtq %u.\",\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tobj = mlx5_glue->devx_umem_reg(priv->cdev->ctx,\n+\t\t\t\tbuf, size, IBV_ACCESS_LOCAL_WRITE);\n+\t\t\tif (obj == NULL) {\n+\t\t\t\tDRV_LOG(ERR, \"Failed to register umem %d for virtq %u.\",\n \t\t\t\ti, index);\n-\t\t\tgoto error;\n-\t\t}\n-\t\tvirtq->umems[i].size = size;\n-\t\tvirtq->umems[i].buf = buf;\n-\t\tvirtq->umems[i].obj = obj;\n+\t\t\t\trte_free(buf);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tvirtq->umems[i].size = size;\n+\t\t\tvirtq->umems[i].buf = buf;\n+\t\t\tvirtq->umems[i].obj = obj;\n reuse:\n-\t\tattr.umems[i].id = virtq->umems[i].obj->umem_id;\n-\t\tattr.umems[i].offset = 0;\n-\t\tattr.umems[i].size = virtq->umems[i].size;\n+\t\t\tattr->umems[i].id = virtq->umems[i].obj->umem_id;\n+\t\t\tattr->umems[i].offset = 0;\n+\t\t\tattr->umems[i].size = virtq->umems[i].size;\n+\t\t}\n \t}\n-\tif (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {\n+\tif (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {\n \t\tgpa = mlx5_vdpa_hva_to_gpa(priv->vmem,\n-\t\t\t\t\t   (uint64_t)(uintptr_t)vq.desc);\n+\t\t\t\t\t   (uint64_t)(uintptr_t)vq->desc);\n \t\tif (!gpa) {\n \t\t\tDRV_LOG(ERR, \"Failed to get descriptor ring GPA.\");\n-\t\t\tgoto error;\n+\t\t\treturn -1;\n \t\t}\n-\t\tattr.desc_addr = gpa;\n+\t\tattr->desc_addr = gpa;\n \t\tgpa = mlx5_vdpa_hva_to_gpa(priv->vmem,\n-\t\t\t\t\t   (uint64_t)(uintptr_t)vq.used);\n+\t\t\t\t\t   (uint64_t)(uintptr_t)vq->used);\n \t\tif (!gpa) {\n \t\t\tDRV_LOG(ERR, \"Failed to get GPA for used ring.\");\n-\t\t\tgoto error;\n+\t\t\treturn -1;\n \t\t}\n-\t\tattr.used_addr = gpa;\n+\t\tattr->used_addr = gpa;\n \t\tgpa = mlx5_vdpa_hva_to_gpa(priv->vmem,\n-\t\t\t\t\t   (uint64_t)(uintptr_t)vq.avail);\n+\t\t\t\t\t   (uint64_t)(uintptr_t)vq->avail);\n \t\tif (!gpa) {\n \t\t\tDRV_LOG(ERR, \"Failed to get GPA for available ring.\");\n-\t\t\tgoto error;\n+\t\t\treturn -1;\n \t\t}\n-\t\tattr.available_addr = gpa;\n+\t\tattr->available_addr = gpa;\n \t}\n-\tret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,\n-\t\t\t\t &last_used_idx);\n+\tret = rte_vhost_get_vring_base(priv->vid,\n+\t\t\tindex, &last_avail_idx, &last_used_idx);\n \tif (ret) {\n \t\tlast_avail_idx = 0;\n \t\tlast_used_idx = 0;\n@@ -345,24 +355,71 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\t\t\t\"virtq %d.\", priv->vid, last_avail_idx,\n \t\t\t\tlast_used_idx, index);\n \t}\n-\tattr.hw_available_index = last_avail_idx;\n-\tattr.hw_used_index = last_used_idx;\n-\tattr.q_size = vq.size;\n-\tattr.mkey = priv->gpa_mkey_index;\n-\tattr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;\n-\tattr.queue_index = index;\n-\tattr.pd = priv->cdev->pdn;\n-\tattr.hw_latency_mode = priv->hw_latency_mode;\n-\tattr.hw_max_latency_us = priv->hw_max_latency_us;\n-\tattr.hw_max_pending_comp = priv->hw_max_pending_comp;\n-\tvirtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);\n+\tattr->hw_available_index = last_avail_idx;\n+\tattr->hw_used_index = last_used_idx;\n+\tattr->q_size = vq->size;\n+\tattr->mkey = priv->gpa_mkey_index;\n+\tattr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;\n+\tattr->queue_index = index;\n+\tattr->pd = priv->cdev->pdn;\n+\tattr->hw_latency_mode = priv->hw_latency_mode;\n+\tattr->hw_max_latency_us = priv->hw_max_latency_us;\n+\tattr->hw_max_pending_comp = priv->hw_max_pending_comp;\n+\tif (attr->hw_latency_mode || attr->hw_max_latency_us ||\n+\t\tattr->hw_max_pending_comp)\n+\t\tattr->mod_fields_bitmap |= MLX5_VIRTQ_MODIFY_TYPE_QUEUE_PERIOD;\n+\treturn 0;\n+}\n+\n+bool\n+mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)\n+{\n+\treturn (priv->caps.vnet_modify_ext &&\n+\t\t\tpriv->caps.virtio_net_q_addr_modify &&\n+\t\t\tpriv->caps.virtio_q_index_modify) ? true : false;\n+}\n+\n+static int\n+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n+{\n+\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n+\tstruct rte_vhost_vring vq;\n+\tstruct mlx5_devx_virtq_attr attr = {0};\n+\tint ret;\n+\tuint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;\n+\tuint64_t cookie;\n+\n+\tret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);\n+\tif (ret)\n+\t\treturn -1;\n+\tif (vq.size == 0)\n+\t\treturn 0;\n \tvirtq->priv = priv;\n-\tif (!virtq->virtq)\n+\tvirtq->stopped = 0;\n+\tret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,\n+\t\t\t\t&vq, index);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to setup update virtq attr %d.\",\n+\t\t\tindex);\n \t\tgoto error;\n-\tclaim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));\n-\tif (mlx5_vdpa_virtq_modify(virtq, 1))\n+\t}\n+\tif (!virtq->virtq) {\n+\t\tvirtq->index = index;\n+\t\tvirtq->vq_size = vq.size;\n+\t\tvirtq->virtq = mlx5_devx_cmd_create_virtq(priv->cdev->ctx,\n+\t\t\t&attr);\n+\t\tif (!virtq->virtq)\n+\t\t\tgoto error;\n+\t\tattr.mod_fields_bitmap = MLX5_VIRTQ_MODIFY_TYPE_STATE;\n+\t}\n+\tattr.state = MLX5_VIRTQ_STATE_RDY;\n+\tret = mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to modify virtq %d.\", index);\n \t\tgoto error;\n-\tvirtq->priv = priv;\n+\t}\n+\tclaim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));\n+\tvirtq->configured = 1;\n \trte_write32(virtq->index, priv->virtq_db_addr);\n \t/* Setup doorbell mapping. */\n \tvirtq->intr_handle =\n@@ -553,7 +610,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \t\t\treturn 0;\n \t\tDRV_LOG(INFO, \"Virtq %d was modified, recreate it.\", index);\n \t}\n-\tif (virtq->virtq) {\n+\tif (virtq->configured) {\n \t\tvirtq->enable = 0;\n \t\tif (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {\n \t\t\tret = mlx5_vdpa_steer_update(priv);\n",
    "prefixes": [
        "v3",
        "06/15"
    ]
}