get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/93290/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 93290,
    "url": "https://patches.dpdk.org/api/patches/93290/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210517151841.57847-1-bingz@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210517151841.57847-1-bingz@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210517151841.57847-1-bingz@nvidia.com",
    "date": "2021-05-17T15:18:41",
    "name": "[v3] net/mlx5: fix loopback for DV queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "06abb99441c4326a007706c91916555d56ab8ba2",
    "submitter": {
        "id": 1976,
        "url": "https://patches.dpdk.org/api/people/1976/?format=api",
        "name": "Bing Zhao",
        "email": "bingz@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210517151841.57847-1-bingz@nvidia.com/mbox/",
    "series": [
        {
            "id": 17007,
            "url": "https://patches.dpdk.org/api/series/17007/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17007",
            "date": "2021-05-17T15:18:41",
            "name": "[v3] net/mlx5: fix loopback for DV queue",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/17007/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/93290/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/93290/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 00751A0A02;\n\tMon, 17 May 2021 17:19:07 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E309D4014E;\n\tMon, 17 May 2021 17:19:06 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2057.outbound.protection.outlook.com [40.107.243.57])\n by mails.dpdk.org (Postfix) with ESMTP id 2831240041;\n Mon, 17 May 2021 17:19:05 +0200 (CEST)",
            "from DM5PR2201CA0001.namprd22.prod.outlook.com (2603:10b6:4:14::11)\n by BY5PR12MB4965.namprd12.prod.outlook.com (2603:10b6:a03:1c4::16)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4129.26; Mon, 17 May\n 2021 15:19:03 +0000",
            "from DM6NAM11FT005.eop-nam11.prod.protection.outlook.com\n (2603:10b6:4:14:cafe::90) by DM5PR2201CA0001.outlook.office365.com\n (2603:10b6:4:14::11) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4108.25 via Frontend\n Transport; Mon, 17 May 2021 15:19:03 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT005.mail.protection.outlook.com (10.13.172.238) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4129.25 via Frontend Transport; Mon, 17 May 2021 15:19:02 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 17 May\n 2021 15:19:00 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=TiLNnCzhoUssnrzN4BeqX9uNAOgmLWY8MfoYyAWf/h1yxq2XknvOK7TosB+LBDhZGFDr1agcYmeaUSPMpSxQNSIb+SWVffCehibyL3fOgnoab5Qit/1/69Liz9JgXLXH+5Ms3NsMHTBX6fMZIzpxhD85+6el6rhP4oa1ULUvzEHxbS3wg5LsIbpomhQiCik++WMqShFqgfGzOtJEnfR1K6rqx2Vru+KlMIQHi9WHP+EFgFUrz8rd685ESmFec10LpdOmOxfSvtnnvQ4RB0ZCoRHvdG5QfUMoeKypC8MCw4xyRdeB1lCAVx9C+Iu1JZrTHf/G+kYw9OKIWIL6JZPw7w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=00LZPZJDaMTdmUBbqAH4+6pWMic9EVjTwp50J9hpS1s=;\n b=nL8HLAN7pZlQLmKaEE5nfmk/QfKcfpxAtTh1gjFBmiaL+0mjq1jUTzd5Tqg/pu5Xq7KBOQjnF0PDfYSoosqiRopYbdT9ZJkWjk59Yw8Jxlqd85muxBU78lbV1MSroM9oFI6VD3gxLQsdYNPvO1L5aFjlJUypcnUQJHXnKYB+pnLMY15iB4gumOCACxkR0IRYhgVXKWiWWpL0MW+OsJbzf+R6TBbWE8cGYfGJe+F6sXnCHriACiQjWmefR+5QoXGcZyqedViffWzieq9RW1Xi9IU3FUxqDxHhzzLOdKb5oRKGifIdrVAdJjTEavny+1s2VWX5d164HoPxjzMt8xWrgA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=00LZPZJDaMTdmUBbqAH4+6pWMic9EVjTwp50J9hpS1s=;\n b=SNpmfbL7PlEfvWKk8UgQmTxApsbCVt0IQAOrgNmN2ZfmHQI8V7B71OMp+U9CLztn/2UQke/Kb7bfGx9M+U7loaVR1TmBgh01qqqgfyW2pGzPhhu72w5UlRAUCz/Tm7jrGdsOL1OAVLb5eyI8mS5OMv8J1TfI/hlQ7I6XPegbbm5s+F/nHY0P/FhX68dQBZCF8Out1qo2/jy50gz8UNcZqJhz85KpeAA/ltCOUGQScHh4Mqr0bxDXqut7ldfu0BEibNgjzAnVzaV3LDm9liKDGtlu37X6GVfwh/6p2zFaL6eDv38n+M/5FuNpsbR9mXQLnGXvPtJRIiH0wEgxwCY9Mw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Bing Zhao <bingz@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>, <thomas@monjalon.net>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, <rasland@nvidia.com>,\n <stable@dpdk.org>, <talshn@nvidia.com>",
        "Date": "Mon, 17 May 2021 18:18:41 +0300",
        "Message-ID": "<20210517151841.57847-1-bingz@nvidia.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210512143607.3982046-1-bingz@nvidia.com>",
        "References": "<20210512143607.3982046-1-bingz@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "5eb33857-bf96-4bc5-e09b-08d919471a87",
        "X-MS-TrafficTypeDiagnostic": "BY5PR12MB4965:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <BY5PR12MB49659161F515BD670107A29BD02D9@BY5PR12MB4965.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:5797;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n mKso2TXJb3DCMtYPuDRVyd5ERmE1CNUXyBW3erHZ6NUCWUqBQBVVIEdAddGP2emXHWzwlUo8zkck87aIZcCUnfesJraDObClA5mF5puvgFgkPOcHIstE+qB8gF4lE5AmiWBeGvM5jTZ13WXqz0NOC6SyN+6ue6DXX+wWSYajY/6hjQfKZFVYRgPYHH/n+2EAGHc1xMwyYFDwNxqXIbJ5CT+BCAcX3y/GOOg9ch9yE1S6cO8/lZ3ZkK5bhxTM3vFZ39s+nPOp9GUuQuO0OdcDg5A+WnUhbuPaADl0GdrrumIJJ7nMZkgYhs6+dVdEkOxvq5ZivijkYQ7i8eehlWuFBeifyWn+06Ls2hd3sI5PF/eO5RetzHr2GKAGOKFJvUiMn1kj4F06qs+gGVJeZBIwiUylasDzkxeSiiqRiE+4gu6Wdu7r0uL3/BQvDGt5ZmZfq+jTQYH7EtyYy+J3nO12rHkAU3AyMjpH+f6pG0QclmrEeH8H88bj+qpwXlAOPge/PL3BA7wa+d/ORTH9fCamo9PWQFr6/4N+7LZa9tuoq3eBUG4/qaIAyXTYUX139EKK77DBVG+TO8ZOdfJS8HaChOPNwaYmOcPs72EpKZlFikiSSdmyp1aTGbO+vRB+eFgg6iRDKpQcEqP8Xl08Mvhdq40Vs8co5IzRnnW3J1eiuxk=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(136003)(39860400002)(396003)(346002)(376002)(36840700001)(46966006)(110136005)(6286002)(107886003)(26005)(4326008)(478600001)(47076005)(336012)(55016002)(54906003)(316002)(36906005)(356005)(186003)(8936002)(426003)(82740400003)(86362001)(2616005)(7696005)(7636003)(8676002)(16526019)(6666004)(36860700001)(82310400003)(2906002)(5660300002)(83380400001)(36756003)(70586007)(1076003)(70206006);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "17 May 2021 15:19:02.6561 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 5eb33857-bf96-4bc5-e09b-08d919471a87",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT005.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB4965",
        "Subject": "[dpdk-dev] [PATCH v3] net/mlx5: fix loopback for DV queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In the past, all the queues and other hardware objects were created\nthrough Verbs interface. Currently, most of the objects creation are\nmigrated to Devx interface by default, including queues. Only when\nthe DV is disabled by device arg or eswitch is enabled, all or some\nof the objects are created through Verbs interface.\n\nWhen using Devx interface to create queues, the kernel driver\nbehavior is different from the case using Verbs. The Tx loopback\ncannot work properly even if the Tx and Rx queues are configured\nwith loopback attribute. To fix the support self loopback for Tx, a\nVerbs dummy queue pair needs to be created to trigger the kernel to\nenable the global loopback capability.\n\nThis is only required when TIR is created for Rx and loopback is\nneeded. Only CQ and QP are needed for this case, no WQ(RQ) needs to\nbe created.\n\nThis requirement comes from bugzilla 645, more details can be found\nin the bugzilla link.\n\nBugzilla ID: 645\n\nFixes: 6deb19e1b2d2 (\"net/mlx5: separate Rx queue object creations\")\nCc: stable@dpdk.org\n\nSigned-off-by: Bing Zhao <bingz@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c    |   5 +-\n drivers/net/mlx5/linux/mlx5_verbs.c | 121 ++++++++++++++++++++++++++++\n drivers/net/mlx5/linux/mlx5_verbs.h |   2 +\n drivers/net/mlx5/mlx5.h             |  11 +++\n drivers/net/mlx5/mlx5_devx.c        |   2 +\n drivers/net/mlx5/mlx5_trigger.c     |  10 +++\n 6 files changed, 150 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex ef7ccba5de..534a56a555 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1632,7 +1632,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\tpriv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;\n \t\tpriv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;\n \t\tmlx5_queue_counter_id_prepare(eth_dev);\n-\n+\t\tpriv->obj_ops.lb_dummy_queue_create =\n+\t\t\t\t\tmlx5_rxq_ibv_obj_dummy_lb_create;\n+\t\tpriv->obj_ops.lb_dummy_queue_release =\n+\t\t\t\t\tmlx5_rxq_ibv_obj_dummy_lb_release;\n \t} else {\n \t\tpriv->obj_ops = ibv_obj_ops;\n \t}\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex 0b0759f33f..d4fa202ac4 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -1055,6 +1055,125 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \treturn -rte_errno;\n }\n \n+/*\n+ * Create the dummy QP with minimal resources for loopback.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)\n+{\n+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tstruct ibv_context *ctx = sh->ctx;\n+\tstruct mlx5dv_qp_init_attr qp_init_attr = {0};\n+\tstruct {\n+\t\tstruct ibv_cq_init_attr_ex ibv;\n+\t\tstruct mlx5dv_cq_init_attr mlx5;\n+\t} cq_attr = {{0}};\n+\n+\tif (dev->data->dev_conf.lpbk_mode) {\n+\t\t/* Allow packet sent from NIC loop back w/o source MAC check. */\n+\t\tqp_init_attr.comp_mask |=\n+\t\t\t\tMLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;\n+\t\tqp_init_attr.create_flags |=\n+\t\t\t\tMLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;\n+\t} else {\n+\t\treturn 0;\n+\t}\n+\t/* Only need to check refcnt, 0 after \"sh\" is allocated. */\n+\tif (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {\n+\t\tMLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);\n+\t\tpriv->lb_used = 1;\n+\t\treturn 0;\n+\t}\n+\tcq_attr.ibv = (struct ibv_cq_init_attr_ex){\n+\t\t.cqe = 1,\n+\t\t.channel = NULL,\n+\t\t.comp_mask = 0,\n+\t};\n+\tcq_attr.mlx5 = (struct mlx5dv_cq_init_attr){\n+\t\t.comp_mask = 0,\n+\t};\n+\t/* Only CQ is needed, no WQ(RQ) is required in this case. */\n+\tsh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,\n+\t\t\t\t\t\t\t&cq_attr.ibv,\n+\t\t\t\t\t\t\t&cq_attr.mlx5));\n+\tif (!sh->self_lb.ibv_cq) {\n+\t\tDRV_LOG(ERR, \"Port %u cannot allocate CQ for loopback.\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\tsh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,\n+\t\t\t\t&(struct ibv_qp_init_attr_ex){\n+\t\t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t\t\t.comp_mask = IBV_QP_INIT_ATTR_PD,\n+\t\t\t\t\t.pd = sh->pd,\n+\t\t\t\t\t.send_cq = sh->self_lb.ibv_cq,\n+\t\t\t\t\t.recv_cq = sh->self_lb.ibv_cq,\n+\t\t\t\t\t.cap.max_recv_wr = 1,\n+\t\t\t\t},\n+\t\t\t\t&qp_init_attr);\n+\tif (!sh->self_lb.qp) {\n+\t\tDRV_LOG(DEBUG, \"Port %u cannot allocate QP for loopback.\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\tpriv->lb_used = 1;\n+\treturn 0;\n+error:\n+\tif (sh->self_lb.ibv_cq) {\n+\t\tclaim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));\n+\t\tsh->self_lb.ibv_cq = NULL;\n+\t}\n+\t(void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);\n+\treturn -rte_errno;\n+#else\n+\tRTE_SET_USED(dev);\n+\treturn 0;\n+#endif\n+}\n+\n+/*\n+ * Release the dummy queue resources for loopback.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+void\n+mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)\n+{\n+#if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\n+\tif (!priv->lb_used)\n+\t\treturn;\n+\tMLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));\n+\tif (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {\n+\t\tif (sh->self_lb.qp) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));\n+\t\t\tsh->self_lb.qp = NULL;\n+\t\t}\n+\t\tif (sh->self_lb.ibv_cq) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));\n+\t\t\tsh->self_lb.ibv_cq = NULL;\n+\t\t}\n+\t}\n+\tpriv->lb_used = 0;\n+#else\n+\tRTE_SET_USED(dev);\n+\treturn;\n+#endif\n+}\n+\n /**\n  * Release an Tx verbs queue object.\n  *\n@@ -1084,4 +1203,6 @@ struct mlx5_obj_ops ibv_obj_ops = {\n \t.txq_obj_new = mlx5_txq_ibv_obj_new,\n \t.txq_obj_modify = mlx5_ibv_modify_qp,\n \t.txq_obj_release = mlx5_txq_ibv_obj_release,\n+\t.lb_dummy_queue_create = NULL,\n+\t.lb_dummy_queue_release = NULL,\n };\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.h b/drivers/net/mlx5/linux/mlx5_verbs.h\nindex 76a79bf4f4..f7e8e2fe98 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.h\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.h\n@@ -9,6 +9,8 @@\n \n int mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx);\n void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);\n+int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);\n+void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);\n \n /* Verbs ops struct */\n extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex b8a29dd369..32b2817bf2 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -287,6 +287,13 @@ struct mlx5_drop {\n \tstruct mlx5_rxq_obj *rxq; /* Rx queue object. */\n };\n \n+/* Loopback dummy queue resources required due to Verbs API. */\n+struct mlx5_lb_ctx {\n+\tstruct ibv_qp *qp; /* QP object. */\n+\tvoid *ibv_cq; /* Completion queue. */\n+\tuint16_t refcnt; /* Reference count for representors. */\n+};\n+\n #define MLX5_COUNTERS_PER_POOL 512\n #define MLX5_MAX_PENDING_QUERIES 4\n #define MLX5_CNT_CONTAINER_RESIZE 64\n@@ -1128,6 +1135,7 @@ struct mlx5_dev_ctx_shared {\n \t/* Meter management structure. */\n \tstruct mlx5_aso_ct_pools_mng *ct_mng;\n \t/* Management data for ASO connection tracking. */\n+\tstruct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */\n \tstruct mlx5_dev_shared_port port[]; /* per device port data array. */\n };\n \n@@ -1287,6 +1295,8 @@ struct mlx5_obj_ops {\n \tint (*txq_obj_modify)(struct mlx5_txq_obj *obj,\n \t\t\t      enum mlx5_txq_modify_type type, uint8_t dev_port);\n \tvoid (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);\n+\tint (*lb_dummy_queue_create)(struct rte_eth_dev *dev);\n+\tvoid (*lb_dummy_queue_release)(struct rte_eth_dev *dev);\n };\n \n #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)\n@@ -1316,6 +1326,7 @@ struct mlx5_priv {\n \tunsigned int sampler_en:1; /* Whether support sampler. */\n \tunsigned int mtr_en:1; /* Whether support meter. */\n \tunsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */\n+\tunsigned int lb_used:1; /* Loopback queue is referred to. */\n \tuint16_t domain_id; /* Switch domain identifier. */\n \tuint16_t vport_id; /* Associated VF vport index (if any). */\n \tuint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 531a81d7fa..78b88f99b4 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -1188,4 +1188,6 @@ struct mlx5_obj_ops devx_obj_ops = {\n \t.txq_obj_new = mlx5_txq_devx_obj_new,\n \t.txq_obj_modify = mlx5_devx_modify_sq,\n \t.txq_obj_release = mlx5_txq_devx_obj_release,\n+\t.lb_dummy_queue_create = NULL,\n+\t.lb_dummy_queue_release = NULL,\n };\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 879d3171e9..ae7fcca229 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -1068,6 +1068,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \t\t\tdev->data->port_id, strerror(rte_errno));\n \t\tgoto error;\n \t}\n+\tif ((priv->config.devx && priv->config.dv_flow_en &&\n+\t    priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {\n+\t\tret = priv->obj_ops.lb_dummy_queue_create(dev);\n+\t\tif (ret)\n+\t\t\tgoto error;\n+\t}\n \tret = mlx5_txq_start(dev);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"port %u Tx queue allocation failed: %s\",\n@@ -1148,6 +1154,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \tmlx5_traffic_disable(dev);\n \tmlx5_txq_stop(dev);\n \tmlx5_rxq_stop(dev);\n+\tif (priv->obj_ops.lb_dummy_queue_release)\n+\t\tpriv->obj_ops.lb_dummy_queue_release(dev);\n \tmlx5_txpp_stop(dev); /* Stop last. */\n \trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n@@ -1186,6 +1194,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)\n \tpriv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;\n \tmlx5_txq_stop(dev);\n \tmlx5_rxq_stop(dev);\n+\tif (priv->obj_ops.lb_dummy_queue_release)\n+\t\tpriv->obj_ops.lb_dummy_queue_release(dev);\n \tmlx5_txpp_stop(dev);\n \n \treturn 0;\n",
    "prefixes": [
        "v3"
    ]
}