get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113055/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113055,
    "url": "http://patches.dpdk.org/api/patches/113055/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-15-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-15-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-15-lizh@nvidia.com",
    "date": "2022-06-18T08:48:04",
    "name": "[v3,14/15] vdpa/mlx5: add virtq sub-resources creation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "bc3d91f49cc9d7edc381fa9edfa396694a47a62f",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-15-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113055/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113055/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4B886A0093;\n\tSat, 18 Jun 2022 10:50:01 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0C01542B7D;\n\tSat, 18 Jun 2022 10:49:11 +0200 (CEST)",
            "from NAM10-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam10on2063.outbound.protection.outlook.com [40.107.94.63])\n by mails.dpdk.org (Postfix) with ESMTP id 8157C42B6E\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:49:09 +0200 (CEST)",
            "from CO1PR15CA0087.namprd15.prod.outlook.com (2603:10b6:101:20::31)\n by PH0PR12MB5679.namprd12.prod.outlook.com (2603:10b6:510:14f::6)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.14; Sat, 18 Jun\n 2022 08:49:07 +0000",
            "from CO1NAM11FT056.eop-nam11.prod.protection.outlook.com\n (2603:10b6:101:20:cafe::bb) by CO1PR15CA0087.outlook.office365.com\n (2603:10b6:101:20::31) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.18 via Frontend\n Transport; Sat, 18 Jun 2022 08:49:06 +0000",
            "from mail.nvidia.com (12.22.5.234) by\n CO1NAM11FT056.mail.protection.outlook.com (10.13.175.107) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:49:06 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL101.nvidia.com\n (10.27.9.10) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:49:04 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:49:01 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=BOCH0RDubvzOE24F6b1qA1sZRH3JrxHKC5rbWBKAek7+fcfPFrk4O1cfj37gjNzdNVGwSBOQFn8LL9VzPDcnnMKhvr+CsH3T4GnUnSlFEo+QPftMTHoiQiM20w97NbgcCYklRnI1dLv9Jd8jwoHs8Vs2uGvS0rrNZj2jPNmF4ICg7wY006p1iqXq3mGUWdhI6RwsISqOzqlLc99gLm3LSs14znte4v9jEe4M9287J4RWH8tCPt+xifihC4T0v1Wugl4C8K+FRd8R3T6Ovrd+AixXdMq5PJqTYV48Ob7qi17OyxnOG1RRyFVvX6lxQwVN0GJvWTxw1X+hwO9EgvAD0w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=Zp8jaWoKMbFHKWOBZFLoGOMo/qsk8Z6T0efYCXrA6/c=;\n b=GaUFKGYbmj8QXSRa2Bzn8RVu/9nAI7EOQ6Wdwi1oPMcCwgupBeZMKoPs3LQ1lnh1WKmPBSXVT+fee2CLbBhsVXzQ3bx0Uwjwi5Jn83EeY1jPn35vpCkhkGQPpCDNI2mb3x8nIXWG8erBwWLmsmhCdPzY5ZBCmfnZSWt5WFpxTJWuPbOrWDNNXoIw4R9l+F1oBTGnTwNHA2liSIvuq2pSKG21xKMonmJlQmLK8h7tmqApXuDFHWi+DR8d+H0Wbn7mNW7LJMTZfTL22UA8j0ifk+GeHi/JrcBp5wW9TEq2HB0DiDZHSYVt5l6JfEmgpd3Qb6Jw3zVMOeFJnihTfqUs4g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.234) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=Zp8jaWoKMbFHKWOBZFLoGOMo/qsk8Z6T0efYCXrA6/c=;\n b=qAH5RQnSkAkoam5RLsVfzF42J0uWewUcVJCBEpHu78lYoIIT1+GvA7Nf2HOhm+OaTnPb3GOH9O3Y7lAqWr7KIY32el/iQe29OzSPcHq5RPaIr/EVinAwS6ADZU4fCqEVNFLenYt682bClPreM99AkLKV5IIQ0sU3WWkN7taj3YMdyi7B5kheWTx9Bb177rigs5y5b8+XpKTFupCH9z7lkoS0vmGqm76HQRiTXD61H1XCYslrIsckK/vxSiKeMUfpyIuZ0auJoXf3ezglp4dAsi0ZX5rL19utj8+JgOEbLgdNyV8SCUoUXuyRkAZXq1Mz203SMloZ+sjtDWwp+0Jg4w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.234)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.234 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.234; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>, Yajun Wu\n <yajunw@nvidia.com>",
        "Subject": "[PATCH v3 14/15] vdpa/mlx5: add virtq sub-resources creation",
        "Date": "Sat, 18 Jun 2022 11:48:04 +0300",
        "Message-ID": "<20220618084805.87315-15-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "d6c37743-6f18-47d3-b4b8-08da5107677a",
        "X-MS-TrafficTypeDiagnostic": "PH0PR12MB5679:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <PH0PR12MB5679E9C756065E1EAC20E61ABFAE9@PH0PR12MB5679.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n WjBnJB7bBLQ8MZ32DEmuAxfznfOseKn5suJ0tyWcPDpdgGFXf43DwV4wpeBbJE9/vgF2ogJ9coAqMuodUk6U1RmB7iUkRmmY4vR/UC07p28bw547Hu0aKEw8IigsS18poWGB6QbKwfDipqhPdLOT27XyJNXZpVFcFYKOTTWD46/wmsOeW2yVCXE6JS508udUApr4TAfjJ6S+73dR2wbZTXU18KD/vY0RAPEWw7HKj4cQqCUcF+ZpGE7mI4+EWipL1FYi9D6xkiYI5AnHxiCe+neotZV2fAfAx3CUMKJyiHa+a0Xp6uJzNk5gR8983pPO96fytph6E66eOC7v680NYg/A/hgjr625Q24gq5oqNSYolV8a6uS7TYdUvnaMj5Hl6oiPc2Tlthzi64HqbBWnCTR0tfPzCb2Eo3sP/me3/8KHoVYnKyB6T/56zxX5ao7h7fmDG/FKEfobs0K398QHAyUcJ0cLoCZpHO/FTaoNXyMF3muuqhw6qROguW6SlpsTC9WFwvdhHDwbWrwS7Y/dTAs40SG0n4bJonXwdvWqiB/9lN+ztzyE1wEmRAd7TYuRL7QdievgeHw6woFR/T5aRL1iSdULYlc+EXk1jDvlhT1GGdwWX0VYEXb7w/Iusf+BKUkrGthYkJINtx28Co/rGcJRFpnxpCtwwttEgxp59Z42kLjTI9/KxbzVNcFxTGt5GAZSvApTuxMVkrCS3SZGug==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.234; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(36840700001)(40470700004)(46966006)(82310400005)(4326008)(356005)(2906002)(36756003)(70586007)(498600001)(40460700003)(30864003)(83380400001)(8936002)(8676002)(81166007)(5660300002)(36860700001)(55016003)(316002)(54906003)(1076003)(6636002)(426003)(2616005)(70206006)(7696005)(186003)(110136005)(6286002)(16526019)(336012)(47076005)(26005)(107886003)(86362001)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:49:06.7968 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n d6c37743-6f18-47d3-b4b8-08da5107677a",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.234];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT056.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH0PR12MB5679",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "pre-created virt-queue sub-resource in device probe stage\nand then modify virtqueue in device config stage.\nSteer table also need to support dummy virt-queue.\nThis accelerates the LM process and reduces its time by 40%.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nSigned-off-by: Yajun Wu <yajunw@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       | 72 +++++++--------------\n drivers/vdpa/mlx5/mlx5_vdpa.h       | 17 +++--\n drivers/vdpa/mlx5/mlx5_vdpa_event.c | 11 ++--\n drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 17 +++--\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 99 +++++++++++++++++++++--------\n 5 files changed, 123 insertions(+), 93 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex d000854c08..f006a9cd3f 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -627,65 +627,39 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,\n static int\n mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n {\n-\tstruct mlx5_vdpa_virtq *virtq;\n+\tuint32_t max_queues;\n \tuint32_t index;\n-\tuint32_t i;\n+\tstruct mlx5_vdpa_virtq *virtq;\n \n-\tfor (index = 0; index < priv->caps.max_num_virtio_queues * 2;\n+\tfor (index = 0; index < priv->caps.max_num_virtio_queues;\n \t\tindex++) {\n \t\tvirtq = &priv->virtqs[index];\n \t\tpthread_mutex_init(&virtq->virtq_lock, NULL);\n \t}\n-\tif (!priv->queues)\n+\tif (!priv->queues || !priv->queue_size)\n \t\treturn 0;\n-\tfor (index = 0; index < (priv->queues * 2); ++index) {\n+\tmax_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?\n+\t\t(priv->queues * 2) : (priv->caps.max_num_virtio_queues);\n+\tfor (index = 0; index < max_queues; ++index)\n+\t\tif (mlx5_vdpa_virtq_single_resource_prepare(priv,\n+\t\t\tindex))\n+\t\t\tgoto error;\n+\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tif (mlx5_vdpa_steer_update(priv, true))\n+\t\t\tgoto error;\n+\treturn 0;\n+error:\n+\tfor (index = 0; index < max_queues; ++index) {\n \t\tvirtq = &priv->virtqs[index];\n-\t\tint ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,\n-\t\t\t\t\t-1, virtq);\n-\n-\t\tif (ret) {\n-\t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n-\t\t\t\tindex);\n-\t\t\treturn -1;\n-\t\t}\n-\t\tif (priv->caps.queue_counters_valid) {\n-\t\t\tif (!virtq->counters)\n-\t\t\t\tvirtq->counters =\n-\t\t\t\t\tmlx5_devx_cmd_create_virtio_q_counters\n-\t\t\t\t\t\t(priv->cdev->ctx);\n-\t\t\tif (!virtq->counters) {\n-\t\t\t\tDRV_LOG(ERR, \"Failed to create virtq couners for virtq\"\n-\t\t\t\t\t\" %d.\", index);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t}\n-\t\tfor (i = 0; i < RTE_DIM(virtq->umems); ++i) {\n-\t\t\tuint32_t size;\n-\t\t\tvoid *buf;\n-\t\t\tstruct mlx5dv_devx_umem *obj;\n-\n-\t\t\tsize = priv->caps.umems[i].a * priv->queue_size +\n-\t\t\t\t\tpriv->caps.umems[i].b;\n-\t\t\tbuf = rte_zmalloc(__func__, size, 4096);\n-\t\t\tif (buf == NULL) {\n-\t\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq\"\n-\t\t\t\t\t\t\" %u.\", i, index);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t\tobj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf,\n-\t\t\t\t\tsize, IBV_ACCESS_LOCAL_WRITE);\n-\t\t\tif (obj == NULL) {\n-\t\t\t\trte_free(buf);\n-\t\t\t\tDRV_LOG(ERR, \"Failed to register umem %d for virtq %u.\",\n-\t\t\t\t\t\ti, index);\n-\t\t\t\treturn -1;\n-\t\t\t}\n-\t\t\tvirtq->umems[i].size = size;\n-\t\t\tvirtq->umems[i].buf = buf;\n-\t\t\tvirtq->umems[i].obj = obj;\n+\t\tif (virtq->virtq) {\n+\t\t\tpthread_mutex_lock(&virtq->virtq_lock);\n+\t\t\tmlx5_vdpa_virtq_unset(virtq);\n+\t\t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t\t}\n \t}\n-\treturn 0;\n+\tif (mlx5_vdpa_is_modify_virtq_supported(priv))\n+\t\tmlx5_vdpa_steer_unset(priv);\n+\treturn -1;\n }\n \n static int\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex b6392b9d66..f353db62ac 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -277,13 +277,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);\n  *   The guest notification file descriptor.\n  * @param[in/out] virtq\n  *   Pointer to the virt-queue structure.\n+ * @param[in] reset\n+ *   If true, it will reset event qp.\n  *\n  * @return\n  *   0 on success, -1 otherwise and rte_errno is set.\n  */\n int\n mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n-\tint callfd, struct mlx5_vdpa_virtq *virtq);\n+\tint callfd, struct mlx5_vdpa_virtq *virtq, bool reset);\n \n /**\n  * Destroy an event QP and all its related resources.\n@@ -403,11 +405,13 @@ void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);\n  *\n  * @param[in] priv\n  *   The vdpa driver private structure.\n+ * @param[in] is_dummy\n+ *   If set, it is updated with dummy queue for prepare resource.\n  *\n  * @return\n  *   0 on success, a negative value otherwise.\n  */\n-int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);\n+int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy);\n \n /**\n  * Setup steering and all its related resources to enable RSS traffic from the\n@@ -581,9 +585,14 @@ mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,\n int\n mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);\n void\n-mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq);\n-void\n mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);\n void\n mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);\n+bool\n+mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,\n+\t\tint index);\n+int\n+mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);\n+void\n+mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex f782b6b832..22f0920c88 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -249,7 +249,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)\n {\n \tunsigned int i;\n \n-\tfor (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {\n+\tfor (i = 0; i < priv->caps.max_num_virtio_queues; i++) {\n \t\tstruct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;\n \n \t\tmlx5_vdpa_queue_complete(cq);\n@@ -618,7 +618,7 @@ mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)\n \treturn 0;\n }\n \n-static int\n+int\n mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)\n {\n \tif (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,\n@@ -638,7 +638,7 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)\n \n int\n mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n-\tint callfd, struct mlx5_vdpa_virtq *virtq)\n+\tint callfd, struct mlx5_vdpa_virtq *virtq, bool reset)\n {\n \tstruct mlx5_vdpa_event_qp *eqp = &virtq->eqp;\n \tstruct mlx5_devx_qp_attr attr = {0};\n@@ -649,11 +649,10 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\t/* Reuse existing resources. */\n \t\teqp->cq.callfd = callfd;\n \t\t/* FW will set event qp to error state in q destroy. */\n-\t\tif (!mlx5_vdpa_qps2rst2rts(eqp)) {\n+\t\tif (reset && !mlx5_vdpa_qps2rst2rts(eqp))\n \t\t\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n \t\t\t\t\t&eqp->sw_qp.db_rec[0]);\n-\t\t\treturn 0;\n-\t\t}\n+\t\treturn 0;\n \t}\n \tif (eqp->fw_qp)\n \t\tmlx5_vdpa_event_qp_destroy(eqp);\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\nindex 4cbf09784e..c2e0a17ace 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c\n@@ -57,7 +57,7 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)\n  * -1 on error.\n  */\n static int\n-mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)\n+mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv, bool is_dummy)\n {\n \tint i;\n \tuint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,\n@@ -67,15 +67,20 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)\n \t\t\t\t\t\t      sizeof(uint32_t), 0);\n \tuint32_t k = 0, j;\n \tint ret = 0, num;\n+\tuint16_t nr_vring = is_dummy ?\n+\t(((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?\n+\t(priv->queues * 2) : priv->caps.max_num_virtio_queues) : priv->nr_virtqs;\n \n \tif (!attr) {\n \t\tDRV_LOG(ERR, \"Failed to allocate RQT attributes memory.\");\n \t\trte_errno = ENOMEM;\n \t\treturn -ENOMEM;\n \t}\n-\tfor (i = 0; i < priv->nr_virtqs; i++) {\n+\tfor (i = 0; i < nr_vring; i++) {\n \t\tif (is_virtq_recvq(i, priv->nr_virtqs) &&\n-\t\t    priv->virtqs[i].enable && priv->virtqs[i].virtq) {\n+\t\t\t(is_dummy || (priv->virtqs[i].enable &&\n+\t\t\tpriv->virtqs[i].configured)) &&\n+\t\t\tpriv->virtqs[i].virtq) {\n \t\t\tattr->rq_list[k] = priv->virtqs[i].virtq->id;\n \t\t\tk++;\n \t\t}\n@@ -235,12 +240,12 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)\n }\n \n int\n-mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)\n+mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy)\n {\n \tint ret;\n \n \tpthread_mutex_lock(&priv->steer_update_lock);\n-\tret = mlx5_vdpa_rqt_prepare(priv);\n+\tret = mlx5_vdpa_rqt_prepare(priv, is_dummy);\n \tif (ret == 0) {\n \t\tmlx5_vdpa_steer_unset(priv);\n \t} else if (ret < 0) {\n@@ -261,7 +266,7 @@ mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)\n int\n mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)\n {\n-\tif (mlx5_vdpa_steer_update(priv))\n+\tif (mlx5_vdpa_steer_update(priv, false))\n \t\tgoto error;\n \treturn 0;\n error:\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex 79d48a6569..58466b3c0b 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -146,10 +146,10 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \t}\n }\n \n-static int\n+void\n mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n {\n-\tint ret = -EAGAIN;\n+\tint ret;\n \n \tmlx5_vdpa_virtq_unregister_intr_handle(virtq);\n \tif (virtq->configured) {\n@@ -157,12 +157,12 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\tif (ret)\n \t\t\tDRV_LOG(WARNING, \"Failed to stop virtq %d.\",\n \t\t\t\tvirtq->index);\n-\t\tvirtq->configured = 0;\n \t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n+\t\tvirtq->index = 0;\n+\t\tvirtq->virtq = NULL;\n+\t\tvirtq->configured = 0;\n \t}\n-\tvirtq->virtq = NULL;\n \tvirtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;\n-\treturn 0;\n }\n \n void\n@@ -175,6 +175,9 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)\n \t\tvirtq = &priv->virtqs[i];\n \t\tpthread_mutex_lock(&virtq->virtq_lock);\n \t\tmlx5_vdpa_virtq_unset(virtq);\n+\t\tif (i < (priv->queues * 2))\n+\t\t\tmlx5_vdpa_virtq_single_resource_prepare(\n+\t\t\t\t\tpriv, i);\n \t\tpthread_mutex_unlock(&virtq->virtq_lock);\n \t}\n \tpriv->features = 0;\n@@ -258,7 +261,8 @@ mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)\n static int\n mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\tstruct mlx5_devx_virtq_attr *attr,\n-\t\tstruct rte_vhost_vring *vq, int index)\n+\t\tstruct rte_vhost_vring *vq,\n+\t\tint index, bool is_prepare)\n {\n \tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n \tuint64_t gpa;\n@@ -277,11 +281,15 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\t\tMLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |\n \t\t\tMLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |\n \t\t\tMLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;\n-\tattr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));\n-\tattr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));\n-\tattr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));\n-\tattr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));\n-\tattr->virtio_version_1_0 =\n+\tattr->tso_ipv4 = is_prepare ? 1 :\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));\n+\tattr->tso_ipv6 = is_prepare ? 1 :\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));\n+\tattr->tx_csum = is_prepare ? 1 :\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));\n+\tattr->rx_csum = is_prepare ? 1 :\n+\t\t!!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));\n+\tattr->virtio_version_1_0 = is_prepare ? 1 :\n \t\t!!(priv->features & (1ULL << VIRTIO_F_VERSION_1));\n \tattr->q_type =\n \t\t(priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?\n@@ -290,12 +298,12 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t * No need event QPs creation when the guest in poll mode or when the\n \t * capability allows it.\n \t */\n-\tattr->event_mode = vq->callfd != -1 ||\n+\tattr->event_mode = is_prepare || vq->callfd != -1 ||\n \t!(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?\n \tMLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n \tif (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n-\t\tret = mlx5_vdpa_event_qp_prepare(priv,\n-\t\t\t\tvq->size, vq->callfd, virtq);\n+\t\tret = mlx5_vdpa_event_qp_prepare(priv, vq->size,\n+\t\t\t\tvq->callfd, virtq, !virtq->virtq);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"Failed to create event QPs for virtq %d.\",\n@@ -320,7 +328,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\tattr->counters_obj_id = virtq->counters->id;\n \t}\n \t/* Setup 3 UMEMs for each virtq. */\n-\tif (virtq->virtq) {\n+\tif (!virtq->virtq) {\n \t\tfor (i = 0; i < RTE_DIM(virtq->umems); ++i) {\n \t\t\tuint32_t size;\n \t\t\tvoid *buf;\n@@ -345,7 +353,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\t\tbuf = rte_zmalloc(__func__,\n \t\t\t\tsize, 4096);\n \t\t\tif (buf == NULL) {\n-\t\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq\"\n+\t\t\t\tDRV_LOG(ERR, \"Cannot allocate umem %d memory for virtq.\"\n \t\t\t\t\" %u.\", i, index);\n \t\t\t\treturn -1;\n \t\t\t}\n@@ -366,7 +374,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\t\tattr->umems[i].size = virtq->umems[i].size;\n \t\t}\n \t}\n-\tif (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {\n+\tif (!is_prepare && attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {\n \t\tgpa = mlx5_vdpa_hva_to_gpa(priv->vmem_info.vmem,\n \t\t\t\t\t   (uint64_t)(uintptr_t)vq->desc);\n \t\tif (!gpa) {\n@@ -389,21 +397,23 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \t\t}\n \t\tattr->available_addr = gpa;\n \t}\n-\tret = rte_vhost_get_vring_base(priv->vid,\n+\tif (!is_prepare) {\n+\t\tret = rte_vhost_get_vring_base(priv->vid,\n \t\t\tindex, &last_avail_idx, &last_used_idx);\n-\tif (ret) {\n-\t\tlast_avail_idx = 0;\n-\t\tlast_used_idx = 0;\n-\t\tDRV_LOG(WARNING, \"Couldn't get vring base, idx are set to 0.\");\n-\t} else {\n-\t\tDRV_LOG(INFO, \"vid %d: Init last_avail_idx=%d, last_used_idx=%d for \"\n+\t\tif (ret) {\n+\t\t\tlast_avail_idx = 0;\n+\t\t\tlast_used_idx = 0;\n+\t\t\tDRV_LOG(WARNING, \"Couldn't get vring base, idx are set to 0.\");\n+\t\t} else {\n+\t\t\tDRV_LOG(INFO, \"vid %d: Init last_avail_idx=%d, last_used_idx=%d for \"\n \t\t\t\t\"virtq %d.\", priv->vid, last_avail_idx,\n \t\t\t\tlast_used_idx, index);\n+\t\t}\n \t}\n \tattr->hw_available_index = last_avail_idx;\n \tattr->hw_used_index = last_used_idx;\n \tattr->q_size = vq->size;\n-\tattr->mkey = priv->gpa_mkey_index;\n+\tattr->mkey = is_prepare ? 0 : priv->gpa_mkey_index;\n \tattr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;\n \tattr->queue_index = index;\n \tattr->pd = priv->cdev->pdn;\n@@ -416,6 +426,39 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,\n \treturn 0;\n }\n \n+bool\n+mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,\n+\t\tint index)\n+{\n+\tstruct mlx5_devx_virtq_attr attr = {0};\n+\tstruct mlx5_vdpa_virtq *virtq;\n+\tstruct rte_vhost_vring vq = {\n+\t\t.size = priv->queue_size,\n+\t\t.callfd = -1,\n+\t};\n+\tint ret;\n+\n+\tvirtq = &priv->virtqs[index];\n+\tvirtq->index = index;\n+\tvirtq->vq_size = vq.size;\n+\tvirtq->configured = 0;\n+\tvirtq->virtq = NULL;\n+\tret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr, &vq, index, true);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR,\n+\t\t\"Cannot prepare setup resource for virtq %d.\", index);\n+\t\treturn true;\n+\t}\n+\tif (mlx5_vdpa_is_modify_virtq_supported(priv)) {\n+\t\tvirtq->virtq =\n+\t\tmlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);\n+\t\tvirtq->priv = priv;\n+\t\tif (!virtq->virtq)\n+\t\t\treturn true;\n+\t}\n+\treturn false;\n+}\n+\n bool\n mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)\n {\n@@ -473,7 +516,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)\n \tvirtq->priv = priv;\n \tvirtq->stopped = 0;\n \tret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,\n-\t\t\t\t&vq, index);\n+\t\t\t\t&vq, index, false);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to setup update virtq attr %d.\",\n \t\t\tindex);\n@@ -746,7 +789,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \tif (virtq->configured) {\n \t\tvirtq->enable = 0;\n \t\tif (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {\n-\t\t\tret = mlx5_vdpa_steer_update(priv);\n+\t\t\tret = mlx5_vdpa_steer_update(priv, false);\n \t\t\tif (ret)\n \t\t\t\tDRV_LOG(WARNING, \"Failed to disable steering \"\n \t\t\t\t\t\"for virtq %d.\", index);\n@@ -761,7 +804,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)\n \t\t}\n \t\tvirtq->enable = 1;\n \t\tif (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {\n-\t\t\tret = mlx5_vdpa_steer_update(priv);\n+\t\t\tret = mlx5_vdpa_steer_update(priv, false);\n \t\t\tif (ret)\n \t\t\t\tDRV_LOG(WARNING, \"Failed to enable steering \"\n \t\t\t\t\t\"for virtq %d.\", index);\n",
    "prefixes": [
        "v3",
        "14/15"
    ]
}