get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/112353/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 112353,
    "url": "https://patches.dpdk.org/api/patches/112353/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-12-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220606112109.208873-12-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220606112109.208873-12-lizh@nvidia.com",
    "date": "2022-06-06T11:20:48",
    "name": "[v1,06/17] vdpa/mlx5: support event qp reuse",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5d7c514d7a87dc2106d18ece0d39375a754cf6af",
    "submitter": {
        "id": 1967,
        "url": "https://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220606112109.208873-12-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 23344,
            "url": "https://patches.dpdk.org/api/series/23344/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=23344",
            "date": "2022-06-06T11:20:48",
            "name": null,
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/23344/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/112353/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/112353/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6539FA0543;\n\tMon,  6 Jun 2022 13:23:03 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9838242B9B;\n\tMon,  6 Jun 2022 13:22:14 +0200 (CEST)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2045.outbound.protection.outlook.com [40.107.244.45])\n by mails.dpdk.org (Postfix) with ESMTP id 138BC42B99\n for <dev@dpdk.org>; Mon,  6 Jun 2022 13:22:13 +0200 (CEST)",
            "from BN6PR16CA0034.namprd16.prod.outlook.com (2603:10b6:405:14::20)\n by MWHPR1201MB0272.namprd12.prod.outlook.com (2603:10b6:301:52::13)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.15; Mon, 6 Jun\n 2022 11:22:11 +0000",
            "from BN8NAM11FT061.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:14:cafe::2f) by BN6PR16CA0034.outlook.office365.com\n (2603:10b6:405:14::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5314.19 via Frontend\n Transport; Mon, 6 Jun 2022 11:22:10 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n BN8NAM11FT061.mail.protection.outlook.com (10.13.177.144) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5314.12 via Frontend Transport; Mon, 6 Jun 2022 11:22:10 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.32;\n Mon, 6 Jun 2022 11:22:09 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Mon, 6 Jun 2022\n 04:22:06 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=CCzxuv7GcNaIw/7OF1/L5OjUOxpGYeW4kpBqeTg7DW3d5rmQZBOkLXdu4g6XDuN+xYx4Anzrg2fNYhBQKIdstEn2KFhr56a8F38R1G80MNuiUnjc8ubLZW4eUmbSmcAFAKDEz8Z/50Nsct8XAceTDr4JlfMtadxEIo9hskLI689mJbnb3WC1nvjyHhwtV5p4LCiOESKB6Ml449YPbfDMQoQyRi9gWANGlkVNY3joQzghMvww3wIg5f5an3qfASHWcdFOU4gMeMxbyih3ycOwQuO/JaR7Aguj+pkrahosh2ytVdn3WvZInGsMmiXmP+35M9kk5HCmytqrf23DrB1ofg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=2zp3UNiQdPV3kY553RSNWiyXlxSBg95xFLWHTx0l1LI=;\n b=BuAW6ycYCYSakQNfSRyHDWyZZrEnCuZj9DQQgr3fIvBkRgGVD8jyeqOJP7YHhxdq4wy6IGMjJJA9LjOD6z8v5yPAEwrmCXiw65e8Nca526VuiT5u9WckRn8B7atWCgKh7HT/wOWI0igjJjajz0271c0K2KhKHDQRU5URifQdxl6V3zsn6gehDHJg/gVg6RU4GlOT7vgRXGua3qYOXhWvoyaNxXsjGySNOQSBIv9UMR9LAfMydt7uIvBgwZyIESfjCuN13e3aCJB2uu+74yi3b306MW+yqF47sKzL+nOPnPqjFRQntGBfYKWp3nXD3uLNjMEK/tWW5pFBtH5p9avKVQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=2zp3UNiQdPV3kY553RSNWiyXlxSBg95xFLWHTx0l1LI=;\n b=byoa3eZGv07qO53eftgwJrCTyKOFPpUf5WS1PogTvWWxgKqPmiiVDfBgHhv03w4/6KmhjmCbDiRsG+XVnero1vQ+UfpkxdqizCxgeRyxZYmxkKXSvPuMiy/2skrwcgYzUqGoJIOWm4+tvRNeKKd5zxdAwi90E/CK5COuOJrzgAZq+NKRBwPjZBtKeaFQ4CuPazF0A4RXaRfPq/BL1U8U/PKO0gYOrEk8Rny9xIzo9+KUT2dwqd+7vsMTMTFqMNLoOKHszUAu2reIAXR1MHZWvUB0QXAbXy4Gyue0EEf56ZV2sqfEUpoconPoJlCMNIXg141HdGFdD7OXyUg69dkTdQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com; pr=C",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,\n <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>, Yajun Wu <yajunw@nvidia.com>",
        "Subject": "[PATCH v1 06/17] vdpa/mlx5: support event qp reuse",
        "Date": "Mon, 6 Jun 2022 14:20:48 +0300",
        "Message-ID": "<20220606112109.208873-12-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<20220606112109.208873-1-lizh@nvidia.com>",
        "References": "<20220408075606.33056-1-lizh@nvidia.com>\n <20220606112109.208873-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "2307824a-a604-421d-b255-08da47aecc61",
        "X-MS-TrafficTypeDiagnostic": "MWHPR1201MB0272:EE_",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <MWHPR1201MB0272D8C3DF897274483DE6A0BFA29@MWHPR1201MB0272.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 4xBASMewzYlhAzWvdmFaHMgGX+DLBGBAqTkq0O5KdRsPwQ6OFILiskATEOvay9LjnI5orN/h9jB5kvQkgYkWKxx/sbPgk9vdJwwEl8G8kUe0T8SIdoeSMQwABX5uN1QXCozXPnsVlQsToGhsPSloSnZmEyhVzlWAKe29vxtv+ZliBsiLtRAD9YqIUND+N+Pilb5UIXKsOPW50qpJmb0qz3RLO8G2PUdtU3RcNZZarW9Aq2Y+y4Nl9+uS8/qyKxttO6tNUrCFzBbq0MBmc1a/mNXNO3dP/7o9lSnxBDclPSNbRgCIJ0yn1iXK/VzwdIporUqabDg3cdupR+HSPyxeueUe0SSFPnu/2DvAPT3jt65Z7XqCYS9lX+ydLi4Oa/Bp5Jr7tKp7lWN3tprCVwbUoB+bdk1qNap8iaNhu6G+W8h2Y2Afy4T9FflAI9N3KjjPiMicZzaS6FBCko/Ys1bRk/bwxq8fxHjClbLVHfHo1pwHiEGSN1ZwyUS/11i+jDGgdyrnnTqnpfnj5NXKCmsUB2BuP9XjWSnvcA387QEFSyWVoEMTrEE5vSPp7DPwjSQvaUbWWNygEoGWdc2hv7MOfbiU2ob3YgZzZVXNF0888qo/ruaqRndXoJ162QCLuMCev/n5krd+rZn5ADK+JC7LN5MhRnK3clJ0u7tP89L2A4Y8SHV1f09++sdqqiQfI3HCj8G+C1+cC2j9Yp3lsesGSA==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(46966006)(40470700004)(36840700001)(110136005)(36860700001)(54906003)(316002)(6286002)(82310400005)(6666004)(8676002)(4326008)(7696005)(70586007)(2906002)(70206006)(6636002)(86362001)(8936002)(81166007)(36756003)(107886003)(40460700003)(83380400001)(26005)(55016003)(5660300002)(2616005)(1076003)(336012)(186003)(426003)(508600001)(356005)(47076005)(16526019)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "06 Jun 2022 11:22:10.3464 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 2307824a-a604-421d-b255-08da47aecc61",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT061.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MWHPR1201MB0272",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Yajun Wu <yajunw@nvidia.com>\n\nTo speed up queue create time, event qp and cq will create only once.\nEach virtq creation will reuse same event qp and cq.\n\nBecause FW will set event qp to error state during virtq destroy,\nneed modify event qp to RESET state, then modify qp to RTS state as\nusual. This can save about 1.5ms for each virtq creation.\n\nAfter SW qp reset, qp pi/ci all become 0 while cq pi/ci keep as\nprevious. Add new variable qp_ci to save SW qp ci. Move qp pi\nindependently with cq ci.\n\nAdd new function mlx5_vdpa_drain_cq to drain cq CQE after virtq\nrelease.\n\nSigned-off-by: Yajun Wu <yajunw@nvidia.com>\n---\n drivers/vdpa/mlx5/mlx5_vdpa.c       |  8 ++++\n drivers/vdpa/mlx5/mlx5_vdpa.h       | 12 +++++-\n drivers/vdpa/mlx5/mlx5_vdpa_event.c | 60 +++++++++++++++++++++++++++--\n drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +--\n 4 files changed, 78 insertions(+), 8 deletions(-)",
    "diff": "diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c\nindex faf833ee2f..ee99952e11 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c\n@@ -269,6 +269,7 @@ mlx5_vdpa_dev_close(int vid)\n \t}\n \tmlx5_vdpa_steer_unset(priv);\n \tmlx5_vdpa_virtqs_release(priv);\n+\tmlx5_vdpa_drain_cq(priv);\n \tif (priv->lm_mr.addr)\n \t\tmlx5_os_wrapped_mkey_destroy(&priv->lm_mr);\n \tpriv->state = MLX5_VDPA_STATE_PROBED;\n@@ -555,7 +556,14 @@ mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)\n \t\treturn 0;\n \tfor (index = 0; index < (priv->queues * 2); ++index) {\n \t\tstruct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];\n+\t\tint ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,\n+\t\t\t\t\t-1, &virtq->eqp);\n \n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n+\t\t\t\tindex);\n+\t\t\treturn -1;\n+\t\t}\n \t\tif (priv->caps.queue_counters_valid) {\n \t\t\tif (!virtq->counters)\n \t\t\t\tvirtq->counters =\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h\nindex f6719a3c60..bf82026e37 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa.h\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h\n@@ -55,6 +55,7 @@ struct mlx5_vdpa_event_qp {\n \tstruct mlx5_vdpa_cq cq;\n \tstruct mlx5_devx_obj *fw_qp;\n \tstruct mlx5_devx_qp sw_qp;\n+\tuint16_t qp_pi;\n };\n \n struct mlx5_vdpa_query_mr {\n@@ -226,7 +227,7 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);\n  * @return\n  *   0 on success, -1 otherwise and rte_errno is set.\n  */\n-int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n+int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\t\t      int callfd, struct mlx5_vdpa_event_qp *eqp);\n \n /**\n@@ -479,4 +480,13 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,\n  */\n int\n mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);\n+\n+/**\n+ * Drain virtq CQ CQE.\n+ *\n+ * @param[in] priv\n+ *   The vdpa driver private structure.\n+ */\n+void\n+mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);\n #endif /* RTE_PMD_MLX5_VDPA_H_ */\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\nindex 7167a98db0..b43dca9255 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c\n@@ -137,7 +137,7 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)\n \t\t};\n \t\tuint32_t word;\n \t} last_word;\n-\tuint16_t next_wqe_counter = cq->cq_ci;\n+\tuint16_t next_wqe_counter = eqp->qp_pi;\n \tuint16_t cur_wqe_counter;\n \tuint16_t comp;\n \n@@ -156,9 +156,10 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)\n \t\trte_io_wmb();\n \t\t/* Ring CQ doorbell record. */\n \t\tcq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);\n+\t\teqp->qp_pi += comp;\n \t\trte_io_wmb();\n \t\t/* Ring SW QP doorbell record. */\n-\t\teqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);\n+\t\teqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);\n \t}\n \treturn comp;\n }\n@@ -232,6 +233,25 @@ mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)\n \treturn max;\n }\n \n+void\n+mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {\n+\t\tstruct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;\n+\n+\t\tmlx5_vdpa_queue_complete(cq);\n+\t\tif (cq->cq_obj.cq) {\n+\t\t\tcq->cq_obj.cqes[0].wqe_counter =\n+\t\t\t\trte_cpu_to_be_16(UINT16_MAX);\n+\t\t\tpriv->virtqs[i].eqp.qp_pi = 0;\n+\t\t\tif (!cq->armed)\n+\t\t\t\tmlx5_vdpa_cq_arm(priv, cq);\n+\t\t}\n+\t}\n+}\n+\n /* Wait on all CQs channel for completion event. */\n static struct mlx5_vdpa_cq *\n mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)\n@@ -574,14 +594,44 @@ mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)\n \treturn 0;\n }\n \n+static int\n+mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)\n+{\n+\tif (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,\n+\t\t\t\t\t  eqp->sw_qp.qp->id)) {\n+\t\tDRV_LOG(ERR, \"Failed to modify FW QP to RST state(%u).\",\n+\t\t\trte_errno);\n+\t\treturn -1;\n+\t}\n+\tif (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,\n+\t\t\tMLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {\n+\t\tDRV_LOG(ERR, \"Failed to modify SW QP to RST state(%u).\",\n+\t\t\trte_errno);\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_vdpa_qps2rts(eqp);\n+}\n+\n int\n-mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n+mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t\t\t  int callfd, struct mlx5_vdpa_event_qp *eqp)\n {\n \tstruct mlx5_devx_qp_attr attr = {0};\n \tuint16_t log_desc_n = rte_log2_u32(desc_n);\n \tuint32_t ret;\n \n+\tif (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {\n+\t\t/* Reuse existing resources. */\n+\t\teqp->cq.callfd = callfd;\n+\t\t/* FW will set event qp to error state in q destroy. */\n+\t\tif (!mlx5_vdpa_qps2rst2rts(eqp)) {\n+\t\t\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n+\t\t\t\t\t&eqp->sw_qp.db_rec[0]);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\tif (eqp->fw_qp)\n+\t\tmlx5_vdpa_event_qp_destroy(eqp);\n \tif (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))\n \t\treturn -1;\n \tattr.pd = priv->cdev->pdn;\n@@ -608,8 +658,10 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,\n \t}\n \tif (mlx5_vdpa_qps2rts(eqp))\n \t\tgoto error;\n+\teqp->qp_pi = 0;\n \t/* First ringing. */\n-\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n+\tif (eqp->sw_qp.db_rec)\n+\t\trte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),\n \t\t\t&eqp->sw_qp.db_rec[0]);\n \treturn 0;\n error:\ndiff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\nindex c258eb3024..6637ba1503 100644\n--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c\n@@ -87,6 +87,8 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)\n \t\t\t}\n \t\t\tvirtq->umems[j].size = 0;\n \t\t}\n+\t\tif (virtq->eqp.fw_qp)\n+\t\t\tmlx5_vdpa_event_qp_destroy(&virtq->eqp);\n \t}\n }\n \n@@ -117,8 +119,6 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)\n \t\tclaim_zero(mlx5_devx_cmd_destroy(virtq->virtq));\n \t}\n \tvirtq->virtq = NULL;\n-\tif (virtq->eqp.fw_qp)\n-\t\tmlx5_vdpa_event_qp_destroy(&virtq->eqp);\n \tvirtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;\n \treturn 0;\n }\n@@ -246,7 +246,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)\n \t\t\t\t\t\t      MLX5_VIRTQ_EVENT_MODE_QP :\n \t\t\t\t\t\t  MLX5_VIRTQ_EVENT_MODE_NO_MSIX;\n \tif (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {\n-\t\tret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,\n+\t\tret = mlx5_vdpa_event_qp_prepare(priv, vq.size, vq.callfd,\n \t\t\t\t\t\t&virtq->eqp);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to create event QPs for virtq %d.\",\n",
    "prefixes": [
        "v1",
        "06/17"
    ]
}