get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113044/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113044,
    "url": "http://patches.dpdk.org/api/patches/113044/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-5-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220618084805.87315-5-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220618084805.87315-5-lizh@nvidia.com",
    "date": "2022-06-18T08:47:54",
    "name": "[v3,04/15] vdpa/mlx5: support event qp reuse",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5d7c514d7a87dc2106d18ece0d39375a754cf6af",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patches.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220618084805.87315-5-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23621,
            "url": "http://patches.dpdk.org/api/series/23621/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23621",
            "date": "2022-06-18T08:47:50",
            "name": "mlx5/vdpa: optimize live migration time",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/23621/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113044/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/113044/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 45478A0032;\n\tSat, 18 Jun 2022 10:48:47 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E1FDC42823;\n\tSat, 18 Jun 2022 10:48:41 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2047.outbound.protection.outlook.com [40.107.92.47])\n by mails.dpdk.org (Postfix) with ESMTP id 2FF33410DC\n for <dev@dpdk.org>; Sat, 18 Jun 2022 10:48:39 +0200 (CEST)",
            "from MWHPR1401CA0018.namprd14.prod.outlook.com\n (2603:10b6:301:4b::28) by MW5PR12MB5624.namprd12.prod.outlook.com\n (2603:10b6:303:19d::19) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.15; Sat, 18 Jun\n 2022 08:48:37 +0000",
            "from CO1NAM11FT049.eop-nam11.prod.protection.outlook.com\n (2603:10b6:301:4b:cafe::d) by MWHPR1401CA0018.outlook.office365.com\n (2603:10b6:301:4b::28) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5353.14 via Frontend\n Transport; Sat, 18 Jun 2022 08:48:37 +0000",
            "from mail.nvidia.com (12.22.5.234) by\n CO1NAM11FT049.mail.protection.outlook.com (10.13.175.50) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5353.14 via Frontend Transport; Sat, 18 Jun 2022 08:48:37 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL101.nvidia.com\n (10.27.9.10) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Sat, 18 Jun 2022 08:48:35 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Sat, 18 Jun\n 2022 01:48:32 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=DJY6m1OfrSOVkcYFTQRlwT0MWCdbYeqsPSp6FWT/eFDsSOwUgivPrlkimmtXM+BraUY35TO3q3dIf8igb9CRurNPnJn4+DxfreilgWJ3n4Ze/hrZ0j5rfXx6nJ4aVisVLYDirHst1Y9CFtUcS/wRsh5+5olCBzSc5OVy/0qr44nYe8nZ3DfPno03yz6fCFU3c9xbiIxREAjY/nae9EG1mcTnFEAgtjS4iwt3BTWddEY97ZkcQ22sPXcZIC4PFZkk1ZU/x16zV9rqAX7PTJW6rQI08Mc94YFuf9kX7yX7rlPoDyvpigdjLTsuh+YIs0+cNS7kPoRAAA9dBU4p5h3Evg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=fz8s3Xt/ssoryB4WJelyNm9eC+AKVB6SLGme1tZ4T6s=;\n b=JU5CmaBxIeg65yZzFybnwef8e4dc8bx59Xxr0txchhlUuU18RrnuT0JIiYk+lv+mILngZIluDY+r4I4uLlzLt3gOwAStVsDhnQk+mRz9jE0v50VnwCl8UWw3WMu+oduR4sORFmUVD0KKuLNL3p7rAnZtw/jHPV+x4DM/Xs48A+PE3qaxM5ZPt44cHjFb/mXwwNf6W61Q5o4kDExSzlysWNO2USJEhqSY82YLAeXwMcpqLkAC7+iIKqfyF+Ric+bfCB/tpU+KA4LHLhGA5LjV6SxAv8PvmvTZzLMfZZmaT+dhBLikkaBNgqqRb/nwcTMRhwNnOhWDgiINUuYHVm4Xeg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.234) smtp.rcpttodomain=redhat.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=fz8s3Xt/ssoryB4WJelyNm9eC+AKVB6SLGme1tZ4T6s=;\n b=HhU0s/CWBcpS1RMYGtiywx0LDI/Tet2CHN5bD3QcUYbyM29j5N1V7OmqxwgWiWfh833LkJQ/aX51IaUckClP9Iyw5bp1hAR6gcY//am1wHgxoP7cAJjE8IDw3G3Tbo9HuiBTuEcqOqEWJBl6ktTWPtrLsOGMSh0TCklTr5AfYhroRzPK0BZb5SdpXClJRK9OCiN3smSoZytPrJqgsafoXtlVG0HMphChu7C+tyQ3NjT11HZTLvufgNCLR+SJOhs0pyonDiX+nJchoasDqBehsLTSFiYlHyLASiwus78LQN8bk5pcM+MlaLOJOIQHA4BPSiOEUyv1kNw7nfzPNLwMcA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.234)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.234 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.234; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, <maxime.coquelin@redhat.com>, Yajun Wu\n <yajunw@nvidia.com>",
        "Subject": "[PATCH v3 04/15] vdpa/mlx5: support event qp reuse",
        "Date": "Sat, 18 Jun 2022 11:47:54 +0300",
        "Message-ID": "<20220618084805.87315-5-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220618084805.87315-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220618084805.87315-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "d85d3537-6341-42de-673c-08da510755e2",
        "X-MS-TrafficTypeDiagnostic": "MW5PR12MB5624:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <MW5PR12MB5624388DC7425C49FE47C3CFBFAE9@MW5PR12MB5624.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n DRNlzFfYFSB/xkL24Pys41EJVsX3A75al87RVuDPkapn0mvDS/9fUDKePfhQPmq/xT71xe+2R5HR6HOHMVn6V+r03LzSpHeeSXOTwKmmwoSJMN1eZUg+C1RIAAOfSq7dNK9oxWRCKgCdvDglPJeC/bDI8GDARAVqj/I3d7FIqJfArVaBrgnARyzM71N3NV7ahym5GK6Ur6oSgHw0JtSxR1fzS5bxqDWoyxEhQysL+/r7XNTEI+cf8gVytnhd+9475J5dCjjL9z4awaRL4/nbXmf+pOQmkjk47qkuNCoh0Sox2Ax9E2x5ZqBLLDznVxBy/LZrSgcLEW0f1QCqx3W7Sfu/lmSE3fcp0YPk4H8TLct3bIPsRZqQx5FQLkfxKsgykD3BRGt7AMg/wItxcVpjRqimytvgTgZYoJbDrXB105ZkTtdno6L52RPuwghdhvl54LKfk5LH9Xpe+hJ+SxFa1os2YhTO05JWkYT2NBlSlfUbwlaVTKe/o17TKkC1S45MygafnB44RBs2amUuEgUTs6SHjRvdD9oSpabULHm+Bc3t0sb6BgcGWHlwX9bh63QMyTEY8w/4g3ujtP/3zq9EHvGobfsLxY98wPpRmUSfcWrH/NrJASXG0gR+Kp/0ApfZ2WjrFWUV3V8lfFMc7A32jvFb3OZuO+eBQFZJLAnp0eBRj96tOFkp6E5J5R4HeHETXYNxb9XtCYLRHwijDV1glw==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.234; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230016)(4636009)(46966006)(36840700001)(40470700004)(83380400001)(55016003)(54906003)(81166007)(8936002)(70206006)(2906002)(107886003)(16526019)(1076003)(5660300002)(186003)(40460700003)(498600001)(316002)(26005)(426003)(86362001)(2616005)(6286002)(4326008)(36860700001)(82310400005)(356005)(7696005)(6666004)(6636002)(110136005)(336012)(47076005)(8676002)(36756003)(70586007)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Jun 2022 08:48:37.2774 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n d85d3537-6341-42de-673c-08da510755e2",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.234];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT049.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MW5PR12MB5624",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Yajun Wu <yajunw@nvidia.com>\n\nTo speed up queue create time, event qp and cq will create only once.\nEach virtq creation will reuse same event qp and cq.\n\nBecause FW will set event qp to error state during virtq destroy,\nneed modify event qp to RESET state, then modify qp to RTS state as\nusual. This can save about 1.5ms for each virtq creation.\n\nAfter SW qp reset, qp pi/ci all become 0 while cq pi/ci keep as\nprevious. Add new variable qp_ci to save SW qp ci. Move qp pi\nindependently with cq ci.\n\nAdd new function mlx5_vdpa_drain_cq to drain cq CQE after virtq\nrelease.\n\nSigned-off-by: Yajun Wu <yajunw@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       |  8 ++++\n drivers/vdpa/mlx5/mlx5_vdpa.h       | 12 +++++-\n drivers/vdpa/mlx5/mlx5_vdpa_event.c | 60 +++++++++++++++++++++++++++--\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +--\n 4 files changed, 78 insertions(+), 8 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex faf833ee2f..ee99952e11 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -269,6 +269,7 @@ mlx5_vdpa_dev_close(int vid)\n \t}\n \tmlx5_vdpa_steer_unset(priv);\n \tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n \tpriv->state = MLX5_VDPA_STATE_PROBED;\n@@ -555,7 +556,14 @@ mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn 0;\n \tfor (index = 0; index < (priv->queues * 2); ++index) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n+\t\tint ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,\n+\t\t\t\t\t-1, &virtq->eqp);\n \n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n+\t\t\t\tindex);\n+\t\t\treturn -1;\n+\t\t}\n \t\tif (priv->caps.queue_counters_valid) {\n \t\t\tif (!virtq->counters)\n \t\t\t\tvirtq->counters =\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex f6719a3c60..bf82026e37 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -55,6 +55,7 @@ struct mlx5_vdpa_event_qp {\n \tstruct mlx5_vdpa_cq cq;\n \tstruct mlx5_devx_obj *fw_qp;\n \tstruct mlx5_devx_qp sw_qp;\n+\tuint16_t qp_pi;\n };\n \n struct mlx5_vdpa_query_mr {\n@@ -226,7 +227,7 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);\n  * @return\n  *   0 on success, -1 otherwise and rte_errno is set.\n  */\n-int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n+int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\t\t      int callfd, struct mlx5_vdpa_event_qp *eqp);\n \n /**\n@@ -479,4 +480,13 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,\n  */\n int\n mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);\n+\n+/**\n+ * Drain virtq CQ CQE.\n+ *\n+ * @param[in] priv\n+ *   The vdpa driver private structure.\n+ */\n+void\n+mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex 7167a98db0..b43dca9255 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -137,7 +137,7 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)\n \t\t};\n \t\tuint32_t word;\n \t} last_word;\n-\tuint16_t next_wqe_counter = cq->cq_ci;\n+\tuint16_t next_wqe_counter = eqp->qp_pi;\n \tuint16_t cur_wqe_counter;\n \tuint16_t comp;\n \n@@ -156,9 +156,10 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)\n \t\trte_io_wmb();\n \t\t/* Ring CQ doorbell record. */\n \t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n+\t\teqp->qp_pi += comp;\n \t\trte_io_wmb();\n \t\t/* Ring SW QP doorbell record. */\n-\t\teqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);\n+\t\teqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);\n \t}\n \treturn comp;\n }\n@@ -232,6 +233,25 @@ mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)\n \treturn max;\n }\n \n+void\n+mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {\n+\t\tstruct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;\n+\n+\t\tmlx5_vdpa_queue_complete(cq);\n+\t\tif (cq->cq_obj.cq) {\n+\t\t\tcq->cq_obj.cqes[0].wqe_counter =\n+\t\t\t\trte_cpu_to_be_16(UINT16_MAX);\n+\t\t\tpriv->virtqs[i].eqp.qp_pi = 0;\n+\t\t\tif (!cq->armed)\n+\t\t\t\tmlx5_vdpa_cq_arm(priv, cq);\n+\t\t}\n+\t}\n+}\n+\n /* Wait on all CQs channel for completion event. */\n static struct mlx5_vdpa_cq *\n mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)\n@@ -574,14 +594,44 @@ mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)\n \treturn 0;\n }\n \n+static int\n+mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)\n+{\n+\tif (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,\n+\t\t\t\t\t  eqp->sw_qp.qp->id)) {\n+\t\tDRV_LOG(ERR, \"Failed to modify FW QP to RST state(%u).\",\n+\t\t\trte_errno);\n+\t\treturn -1;\n+\t}\n+\tif (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,\n+\t\t\tMLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {\n+\t\tDRV_LOG(ERR, \"Failed to modify SW QP to RST state(%u).\",\n+\t\t\trte_errno);\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_vdpa_qps2rts(eqp);\n+}\n+\n int\n-mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\t\t  int callfd, struct mlx5_vdpa_event_qp *eqp)\n {\n \tstruct mlx5_devx_qp_attr attr = {0};\n \tuint16_t log_desc_n = rte_log2_u32(desc_n);\n \tuint32_t ret;\n \n+\tif (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {\n+\t\t/* Reuse existing resources. */\n+\t\teqp->cq.callfd = callfd;\n+\t\t/* FW will set event qp to error state in q destroy. */\n+\t\tif (!mlx5_vdpa_qps2rst2rts(eqp)) {\n+\t\t\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n+\t\t\t\t\t&eqp->sw_qp.db_rec[0]);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\tif (eqp->fw_qp)\n+\t\tmlx5_vdpa_event_qp_destroy(eqp);\n \tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))\n \t\treturn -1;\n \tattr.pd = priv->cdev->pdn;\n@@ -608,8 +658,10 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t}\n \tif (mlx5_vdpa_qps2rts(eqp))\n \t\tgoto error;\n+\teqp->qp_pi = 0;\n \t/* First ringing. */\n-\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n+\tif (eqp->sw_qp.db_rec)\n+\t\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n \t\t\t&eqp->sw_qp.db_rec[0]);\n \treturn 0;\n error:\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex c258eb3024..6637ba1503 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -87,6 +87,8 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \t\t\t}\n \t\t\tvirtq->umems[j].size = 0;\n \t\t}\n+\t\tif (virtq->eqp.fw_qp)\n+\t\t\tmlx5_vdpa_event_qp_destroy(&virtq->eqp);\n \t}\n }\n \n@@ -117,8 +119,6 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n \t}\n \tvirtq->virtq = NULL;\n-\tif (virtq->eqp.fw_qp)\n-\t\tmlx5_vdpa_event_qp_destroy(&virtq->eqp);\n \tvirtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;\n \treturn 0;\n }\n@@ -246,7 +246,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\t\t\t\t\t      MLX5_VIRTQ_EVENT_MODE_QP :\n \t\t\t\t\t\t  MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n \tif (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n-\t\tret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,\n+\t\tret = mlx5_vdpa_event_qp_prepare(priv, vq.size, vq.callfd,\n \t\t\t\t\t\t&virtq->eqp);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n",
    "prefixes": [
        "v3",
        "04/15"
    ]
}