get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103617/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103617,
    "url": "http://patches.dpdk.org/api/patches/103617/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211103075838.1486056-9-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211103075838.1486056-9-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211103075838.1486056-9-xuemingl@nvidia.com",
    "date": "2021-11-03T07:58:32",
    "name": "[v3,08/14] net/mlx5: move Rx queue reference count",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f124517367bdb3ea4155bf04a1d4002e474ec17b",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211103075838.1486056-9-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 20258,
            "url": "http://patches.dpdk.org/api/series/20258/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=20258",
            "date": "2021-11-03T07:58:24",
            "name": "net/mlx5: support shared Rx queue",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/20258/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/103617/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/103617/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 15798A0C53;\n\tWed,  3 Nov 2021 09:00:15 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2D6B4411A7;\n\tWed,  3 Nov 2021 08:59:53 +0100 (CET)",
            "from NAM04-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam08on2089.outbound.protection.outlook.com [40.107.101.89])\n by mails.dpdk.org (Postfix) with ESMTP id 220F341183\n for <dev@dpdk.org>; Wed,  3 Nov 2021 08:59:51 +0100 (CET)",
            "from MWHPR12CA0026.namprd12.prod.outlook.com (2603:10b6:301:2::12)\n by BY5PR12MB4259.namprd12.prod.outlook.com (2603:10b6:a03:202::17) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.10; Wed, 3 Nov\n 2021 07:59:48 +0000",
            "from CO1NAM11FT054.eop-nam11.prod.protection.outlook.com\n (2603:10b6:301:2:cafe::1f) by MWHPR12CA0026.outlook.office365.com\n (2603:10b6:301:2::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11 via Frontend\n Transport; Wed, 3 Nov 2021 07:59:48 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT054.mail.protection.outlook.com (10.13.174.70) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Wed, 3 Nov 2021 07:59:48 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 3 Nov\n 2021 07:59:45 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=jl0jaeZCslKPFIM4KkkexP3LhOIHdPcf9TWf8ZBeCJklIsfkV2OxxnHl75o/fiLiBPRCn1tlbpy25SliYlf1sPkgwG6sN94PocsTxgFQa1qTV4ZuAA1v+KId+lxBKmx/bRxBj2sj5gfaWgVZmlZzXg4WQYgLVNs7zdkZb7kVqsShO8eBFhiBu8siCIhuyrOXM+fWaEU4SS/6UMN0Zao/9rSdbcSPtZUDs9AVlgY0Mz0/kHUGD7uMcl0mcetS3Fmqorr6yozqTnEEkb+mRu/y2BP3zB3gw0+JdTnVTecQDWIytmvgLrsHaa344zhPdWbhIdU7Xv3SzH4c1MHwKWMPeQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=NmfwGXVFAoFHmvrJ3XEnCtGnMXLMROEkPIHyM8NS2dw=;\n b=KDzDYModw9ONNke5eyz5eAzo9rO876twKKwUCcu18XoWs5zZEMYuFA0+AEjJH/iuHX+ePyAcVQj28qIkof0pVqH6xLrI2LeLW5JfIN1O7MPCWGq7/PqWFL1YE2mBPL9cZfUjP6j4OUtopoNmvaMllFFmj2xim5GOZn+onWxkkGret/K1bcqh1HcWswb91YwBYsHfpW1f7K2+W02aA/+tDMDeD8rY5Jtyjl3bPFM23kIHZxRx9tA2Eqn0Vkc4rE3kT81FdyS50u3CQUmRUPAVKeD+sykduk1wMQiIey7vJvUfyE9fSfm8R2GkrLy2/HYqtR/DubxCVt4/g697bLNWfw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=NmfwGXVFAoFHmvrJ3XEnCtGnMXLMROEkPIHyM8NS2dw=;\n b=s4c1zSEphGLiTAdClBOBd21hZX2T6mUBbqxidClcRcRxOmIoBUM4t5KrMXnMdJ5rzw5xsjdZkVLGpgAd6Os4Gdbj8/Q5eS6wkp6YssU3KNK+5wcF9OyTpzVEigjAR5Lkhw6Zd7AV9iblI/3CrnWRJGeFlxtzIX111ywYg1PW5XFr5DTwEIaAMCNxThM1pIpBZaXVFV7ch84jY8PTEI8fRL1fBsQs2h1+pQgGj0jjfeoZOIReqOd3cv9Ir6359o60fGRbXiWU/lCgNnpZU7SIBpO07GBFdTDuZ2TkOb7lY9prYMEcqmUb7xGnvL5WE2Cpt0JH//LhtpaQqhA7RARgnQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Wed, 3 Nov 2021 15:58:32 +0800",
        "Message-ID": "<20211103075838.1486056-9-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211103075838.1486056-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211103075838.1486056-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "72bcf130-d5f2-4baa-96fe-08d99e9fe849",
        "X-MS-TrafficTypeDiagnostic": "BY5PR12MB4259:",
        "X-Microsoft-Antispam-PRVS": "\n <BY5PR12MB4259CF8772CD311B55784850A18C9@BY5PR12MB4259.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:513;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 9Dx/o7vI3bvylxQt7tn754n4M40w/+YhArdUU1sp0+mwcEpNWtxAVVdwXT9lGNNz4iHw8MTztvRK61K90jHVv2VWKlG6cIK7X8RiIv3rn/OrDQz045oxQ//4fCY1GzwIEeeMWI6wZpZ6dGfVXsJG8AaBXMuF8n+IOA1Fw6/SI79NJZebKPxNnZyS7r2V8+iVyiISCKh3qXUcNS4XjKY+8hQJjNzNDWKcy7bbK1N71/tYv2Y3pwJBhbWJxft3RGLZGuUFPR/cyJWJMejzji+cqMAGbLVxQHfXBuFmu8m4oeXYD0JnOPRKiVnX+S4VvTGlFZkE4s7DOark69QIpXpTi8/2wd4iwUuPTgTwNHa+eugleVvE5bfo7oYHsVlfOURI4y5YcW6uq9rxMVA7HP6NqTUhQsKVVYLzV1brnD0VsnpOSZiiajo2mYwqUcf2xWcVyQO4QRxzj/IQAFffiUSzdNiqRN/HEynF6WWSRYeKFe63FbSQ1cSbBypaWUXEGsXF3dzegWXaFdjA6kqi7AqOb1KrqNkDv7x/ti9+Iu7k9sqTbcrbL5C+xnLjh31XMxFJuq3zwTVbaGuTThZ7aqk+Qp9pp0fCSp1DG1pWsjZTKznX+Eq1pFzGSVpWZsp+3rWr/e63E+ZjTgl0Pp/7mmadb++gsfABhLS+cOjXnBFyTBrGJeBOSeb+w+4AZL6z9U1LpJB71W0sFEhQmdMgXqds3A==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(426003)(6286002)(6666004)(8676002)(47076005)(86362001)(36860700001)(107886003)(8936002)(82310400003)(1076003)(2906002)(30864003)(508600001)(70206006)(83380400001)(356005)(7696005)(2616005)(4326008)(6916009)(7636003)(186003)(36756003)(16526019)(55016002)(316002)(26005)(336012)(70586007)(54906003)(5660300002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "03 Nov 2021 07:59:48.1982 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 72bcf130-d5f2-4baa-96fe-08d99e9fe849",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT054.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB4259",
        "Subject": "[dpdk-dev] [PATCH v3 08/14] net/mlx5: move Rx queue reference count",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Rx queue reference count is counter of RQ, used to count reference to RQ\nobject. To prepare for shared Rx queue, this patch moves it from rxq_ctrl\nto Rx queue private data.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/net/mlx5/mlx5_rx.h      |   8 +-\n drivers/net/mlx5/mlx5_rxq.c     | 169 +++++++++++++++++++++-----------\n drivers/net/mlx5/mlx5_trigger.c |  57 +++++------\n 3 files changed, 142 insertions(+), 92 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex fa24f5cdf3a..eccfbf1108d 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -149,7 +149,6 @@ enum mlx5_rxq_type {\n struct mlx5_rxq_ctrl {\n \tstruct mlx5_rxq_data rxq; /* Data path structure. */\n \tLIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */\n-\tuint32_t refcnt; /* Reference counter. */\n \tLIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */\n \tstruct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */\n \tstruct mlx5_dev_ctx_shared *sh; /* Shared context. */\n@@ -170,6 +169,7 @@ struct mlx5_rxq_ctrl {\n /* RX queue private data. */\n struct mlx5_rxq_priv {\n \tuint16_t idx; /* Queue index. */\n+\tuint32_t refcnt; /* Reference counter. */\n \tstruct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */\n \tLIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n@@ -207,7 +207,11 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,\n struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new\n \t(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,\n \t const struct rte_eth_hairpin_conf *hairpin_conf);\n-struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);\n+struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);\n+uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);\n+struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);\n+struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);\n+struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);\n int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);\n int mlx5_rxq_verify(struct rte_eth_dev *dev);\n int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 00df245a5c6..8071ddbd61c 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -386,15 +386,13 @@ mlx5_get_rx_port_offloads(void)\n static int\n mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n \n-\tif (!(*priv->rxqs)[idx]) {\n+\tif (rxq == NULL) {\n \t\trte_errno = EINVAL;\n \t\treturn -rte_errno;\n \t}\n-\trxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);\n-\treturn (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);\n+\treturn (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);\n }\n \n /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */\n@@ -874,8 +872,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)\n \n \tfor (i = 0; i != n; ++i) {\n \t\t/* This rxq obj must not be released in this function. */\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);\n-\t\tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);\n+\t\tstruct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;\n \t\tint rc;\n \n \t\t/* Skip queues that cannot request interrupts. */\n@@ -885,11 +883,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)\n \t\t\tif (rte_intr_vec_list_index_set(intr_handle, i,\n \t\t\t   RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))\n \t\t\t\treturn -rte_errno;\n-\t\t\t/* Decrease the rxq_ctrl's refcnt */\n-\t\t\tif (rxq_ctrl)\n-\t\t\t\tmlx5_rxq_release(dev, i);\n \t\t\tcontinue;\n \t\t}\n+\t\tmlx5_rxq_ref(dev, i);\n \t\tif (count >= RTE_MAX_RXTX_INTR_VEC_ID) {\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"port %u too many Rx queues for interrupt\"\n@@ -954,7 +950,7 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)\n \t\t * Need to access directly the queue to release the reference\n \t\t * kept in mlx5_rx_intr_vec_enable().\n \t\t */\n-\t\tmlx5_rxq_release(dev, i);\n+\t\tmlx5_rxq_deref(dev, i);\n \t}\n free:\n \trte_intr_free_epoll_fd(intr_handle);\n@@ -1003,19 +999,14 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)\n int\n mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\n-\trxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);\n-\tif (!rxq_ctrl)\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);\n+\tif (!rxq)\n \t\tgoto error;\n-\tif (rxq_ctrl->irq) {\n-\t\tif (!rxq_ctrl->obj) {\n-\t\t\tmlx5_rxq_release(dev, rx_queue_id);\n+\tif (rxq->ctrl->irq) {\n+\t\tif (!rxq->ctrl->obj)\n \t\t\tgoto error;\n-\t\t}\n-\t\tmlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);\n+\t\tmlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);\n \t}\n-\tmlx5_rxq_release(dev, rx_queue_id);\n \treturn 0;\n error:\n \trte_errno = EINVAL;\n@@ -1037,23 +1028,21 @@ int\n mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);\n \tint ret = 0;\n \n-\trxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);\n-\tif (!rxq_ctrl) {\n+\tif (!rxq) {\n \t\trte_errno = EINVAL;\n \t\treturn -rte_errno;\n \t}\n-\tif (!rxq_ctrl->obj)\n+\tif (!rxq->ctrl->obj)\n \t\tgoto error;\n-\tif (rxq_ctrl->irq) {\n-\t\tret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);\n+\tif (rxq->ctrl->irq) {\n+\t\tret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);\n \t\tif (ret < 0)\n \t\t\tgoto error;\n-\t\trxq_ctrl->rxq.cq_arm_sn++;\n+\t\trxq->ctrl->rxq.cq_arm_sn++;\n \t}\n-\tmlx5_rxq_release(dev, rx_queue_id);\n \treturn 0;\n error:\n \t/**\n@@ -1064,12 +1053,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\trte_errno = errno;\n \telse\n \t\trte_errno = EINVAL;\n-\tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tmlx5_rxq_release(dev, rx_queue_id);\n-\tif (ret != EAGAIN)\n+\tif (rte_errno != EAGAIN)\n \t\tDRV_LOG(WARNING, \"port %u unable to disable interrupt on Rx queue %d\",\n \t\t\tdev->data->port_id, rx_queue_id);\n-\trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n \n@@ -1657,7 +1643,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n \ttmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;\n #endif\n \ttmpl->rxq.idx = idx;\n-\t__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);\n+\tmlx5_rxq_ref(dev, idx);\n \tLIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);\n \treturn tmpl;\n error:\n@@ -1711,11 +1697,53 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n \ttmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };\n \ttmpl->hairpin_conf = *hairpin_conf;\n \ttmpl->rxq.idx = idx;\n-\t__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);\n+\tmlx5_rxq_ref(dev, idx);\n \tLIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);\n \treturn tmpl;\n }\n \n+/**\n+ * Increase Rx queue reference count.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param idx\n+ *   RX queue index.\n+ *\n+ * @return\n+ *   A pointer to the queue if it exists, NULL otherwise.\n+ */\n+struct mlx5_rxq_priv *\n+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\n+\tif (rxq != NULL)\n+\t\t__atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);\n+\treturn rxq;\n+}\n+\n+/**\n+ * Dereference a Rx queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param idx\n+ *   RX queue index.\n+ *\n+ * @return\n+ *   Updated reference count.\n+ */\n+uint32_t\n+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\n+\tif (rxq == NULL)\n+\t\treturn 0;\n+\treturn __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);\n+}\n+\n /**\n  * Get a Rx queue.\n  *\n@@ -1727,18 +1755,52 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n  * @return\n  *   A pointer to the queue if it exists, NULL otherwise.\n  */\n-struct mlx5_rxq_ctrl *\n+struct mlx5_rxq_priv *\n mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n \n-\tif (rxq_data) {\n-\t\trxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n-\t\t__atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);\n-\t}\n-\treturn rxq_ctrl;\n+\tif (priv->rxq_privs == NULL)\n+\t\treturn NULL;\n+\treturn (*priv->rxq_privs)[idx];\n+}\n+\n+/**\n+ * Get Rx queue shareable control.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param idx\n+ *   RX queue index.\n+ *\n+ * @return\n+ *   A pointer to the queue control if it exists, NULL otherwise.\n+ */\n+struct mlx5_rxq_ctrl *\n+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\n+\treturn rxq == NULL ? NULL : rxq->ctrl;\n+}\n+\n+/**\n+ * Get Rx queue shareable data.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param idx\n+ *   RX queue index.\n+ *\n+ * @return\n+ *   A pointer to the queue data if it exists, NULL otherwise.\n+ */\n+struct mlx5_rxq_data *\n+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\n+\treturn rxq == NULL ? NULL : &rxq->ctrl->rxq;\n }\n \n /**\n@@ -1756,13 +1818,12 @@ int\n mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\tstruct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \n \tif (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)\n \t\treturn 0;\n-\trxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);\n-\tif (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)\n+\tif (mlx5_rxq_deref(dev, idx) > 1)\n \t\treturn 1;\n \tif (rxq_ctrl->obj) {\n \t\tpriv->obj_ops.rxq_obj_release(rxq_ctrl->obj);\n@@ -1774,7 +1835,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \t\trxq_free_elts(rxq_ctrl);\n \t\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;\n \t}\n-\tif (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {\n+\tif (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {\n \t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)\n \t\t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n \t\tLIST_REMOVE(rxq, owner_entry);\n@@ -1952,7 +2013,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\treturn 1;\n \tpriv->obj_ops.ind_table_destroy(ind_tbl);\n \tfor (i = 0; i != ind_tbl->queues_n; ++i)\n-\t\tclaim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));\n+\t\tclaim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));\n \tmlx5_free(ind_tbl);\n \treturn 0;\n }\n@@ -2009,7 +2070,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n \t\t\t       log2above(priv->config.ind_table_max_size);\n \n \tfor (i = 0; i != queues_n; ++i) {\n-\t\tif (!mlx5_rxq_get(dev, queues[i])) {\n+\t\tif (mlx5_rxq_ref(dev, queues[i]) == NULL) {\n \t\t\tret = -rte_errno;\n \t\t\tgoto error;\n \t\t}\n@@ -2022,7 +2083,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n error:\n \terr = rte_errno;\n \tfor (j = 0; j < i; j++)\n-\t\tmlx5_rxq_release(dev, ind_tbl->queues[j]);\n+\t\tmlx5_rxq_deref(dev, ind_tbl->queues[j]);\n \trte_errno = err;\n \tDRV_LOG(DEBUG, \"Port %u cannot setup indirection table.\",\n \t\tdev->data->port_id);\n@@ -2118,7 +2179,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,\n \t\t\t  bool standalone)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tunsigned int i, j;\n+\tunsigned int i;\n \tint ret = 0, err;\n \tconst unsigned int n = rte_is_power_of_2(queues_n) ?\n \t\t\t       log2above(queues_n) :\n@@ -2138,15 +2199,11 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,\n \tret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);\n \tif (ret)\n \t\tgoto error;\n-\tfor (j = 0; j < ind_tbl->queues_n; j++)\n-\t\tmlx5_rxq_release(dev, ind_tbl->queues[j]);\n \tind_tbl->queues_n = queues_n;\n \tind_tbl->queues = queues;\n \treturn 0;\n error:\n \terr = rte_errno;\n-\tfor (j = 0; j < i; j++)\n-\t\tmlx5_rxq_release(dev, queues[j]);\n \trte_errno = err;\n \tDRV_LOG(DEBUG, \"Port %u cannot setup indirection table.\",\n \t\tdev->data->port_id);\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex ebeeae279e2..e5d74d275f8 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -201,10 +201,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \tDRV_LOG(DEBUG, \"Port %u device_attr.max_sge is %d.\",\n \t\tdev->data->port_id, priv->sh->device_attr.max_sge);\n \tfor (i = 0; i != priv->rxqs_n; ++i) {\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);\n+\t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \n-\t\tif (!rxq_ctrl)\n+\t\tif (rxq == NULL)\n \t\t\tcontinue;\n+\t\trxq_ctrl = rxq->ctrl;\n \t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n \t\t\t/*\n \t\t\t * Pre-register the mempools. Regardless of whether\n@@ -266,6 +268,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)\n \tstruct mlx5_devx_modify_sq_attr sq_attr = { 0 };\n \tstruct mlx5_devx_modify_rq_attr rq_attr = { 0 };\n \tstruct mlx5_txq_ctrl *txq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tstruct mlx5_devx_obj *sq;\n \tstruct mlx5_devx_obj *rq;\n@@ -310,9 +313,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tsq = txq_ctrl->obj->sq;\n-\t\trxq_ctrl = mlx5_rxq_get(dev,\n-\t\t\t\t\ttxq_ctrl->hairpin_conf.peers[0].queue);\n-\t\tif (!rxq_ctrl) {\n+\t\trxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);\n+\t\tif (rxq == NULL) {\n \t\t\tmlx5_txq_release(dev, i);\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u no rxq object found: %d\",\n@@ -320,6 +322,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)\n \t\t\t\ttxq_ctrl->hairpin_conf.peers[0].queue);\n \t\t\treturn -rte_errno;\n \t\t}\n+\t\trxq_ctrl = rxq->ctrl;\n \t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||\n \t\t    rxq_ctrl->hairpin_conf.peers[0].queue != i) {\n \t\t\trte_errno = ENOMEM;\n@@ -354,12 +357,10 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)\n \t\trxq_ctrl->hairpin_status = 1;\n \t\ttxq_ctrl->hairpin_status = 1;\n \t\tmlx5_txq_release(dev, i);\n-\t\tmlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);\n \t}\n \treturn 0;\n error:\n \tmlx5_txq_release(dev, i);\n-\tmlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);\n \treturn -rte_errno;\n }\n \n@@ -432,27 +433,26 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,\n \t\tpeer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;\n \t\tmlx5_txq_release(dev, peer_queue);\n \t} else { /* Peer port used as ingress. */\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \n-\t\trxq_ctrl = mlx5_rxq_get(dev, peer_queue);\n-\t\tif (rxq_ctrl == NULL) {\n+\t\tif (rxq == NULL) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"Failed to get port %u Rx queue %d\",\n \t\t\t\tdev->data->port_id, peer_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n+\t\trxq_ctrl = rxq->ctrl;\n \t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u queue %d is not a hairpin Rxq\",\n \t\t\t\tdev->data->port_id, peer_queue);\n-\t\t\tmlx5_rxq_release(dev, peer_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tif (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {\n \t\t\trte_errno = ENOMEM;\n \t\t\tDRV_LOG(ERR, \"port %u no Rxq object found: %d\",\n \t\t\t\tdev->data->port_id, peer_queue);\n-\t\t\tmlx5_rxq_release(dev, peer_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tpeer_info->qp_id = rxq_ctrl->obj->rq->id;\n@@ -460,7 +460,6 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,\n \t\tpeer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;\n \t\tpeer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;\n \t\tpeer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;\n-\t\tmlx5_rxq_release(dev, peer_queue);\n \t}\n \treturn 0;\n }\n@@ -559,34 +558,32 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\t\ttxq_ctrl->hairpin_status = 1;\n \t\tmlx5_txq_release(dev, cur_queue);\n \t} else {\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \t\tstruct mlx5_devx_modify_rq_attr rq_attr = { 0 };\n \n-\t\trxq_ctrl = mlx5_rxq_get(dev, cur_queue);\n-\t\tif (rxq_ctrl == NULL) {\n+\t\tif (rxq == NULL) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"Failed to get port %u Rx queue %d\",\n \t\t\t\tdev->data->port_id, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n+\t\trxq_ctrl = rxq->ctrl;\n \t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u queue %d not a hairpin Rxq\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tif (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {\n \t\t\trte_errno = ENOMEM;\n \t\t\tDRV_LOG(ERR, \"port %u no Rxq object found: %d\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tif (rxq_ctrl->hairpin_status != 0) {\n \t\t\tDRV_LOG(DEBUG, \"port %u Rx queue %d is already bound\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn 0;\n \t\t}\n \t\tif (peer_info->tx_explicit !=\n@@ -594,7 +591,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u Rx queue %d and peer Tx rule mode\"\n \t\t\t\t\" mismatch\", dev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tif (peer_info->manual_bind !=\n@@ -602,7 +598,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u Rx queue %d and peer binding mode\"\n \t\t\t\t\" mismatch\", dev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\trq_attr.state = MLX5_SQC_STATE_RDY;\n@@ -612,7 +607,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\tret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);\n \t\tif (ret == 0)\n \t\t\trxq_ctrl->hairpin_status = 1;\n-\t\tmlx5_rxq_release(dev, cur_queue);\n \t}\n \treturn ret;\n }\n@@ -677,34 +671,32 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\t\ttxq_ctrl->hairpin_status = 0;\n \t\tmlx5_txq_release(dev, cur_queue);\n \t} else {\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \t\tstruct mlx5_devx_modify_rq_attr rq_attr = { 0 };\n \n-\t\trxq_ctrl = mlx5_rxq_get(dev, cur_queue);\n-\t\tif (rxq_ctrl == NULL) {\n+\t\tif (rxq == NULL) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"Failed to get port %u Rx queue %d\",\n \t\t\t\tdev->data->port_id, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n+\t\trxq_ctrl = rxq->ctrl;\n \t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {\n \t\t\trte_errno = EINVAL;\n \t\t\tDRV_LOG(ERR, \"port %u queue %d not a hairpin Rxq\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\tif (rxq_ctrl->hairpin_status == 0) {\n \t\t\tDRV_LOG(DEBUG, \"port %u Rx queue %d is already unbound\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn 0;\n \t\t}\n \t\tif (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {\n \t\t\trte_errno = ENOMEM;\n \t\t\tDRV_LOG(ERR, \"port %u no Rxq object found: %d\",\n \t\t\t\tdev->data->port_id, cur_queue);\n-\t\t\tmlx5_rxq_release(dev, cur_queue);\n \t\t\treturn -rte_errno;\n \t\t}\n \t\trq_attr.state = MLX5_SQC_STATE_RST;\n@@ -712,7 +704,6 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,\n \t\tret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);\n \t\tif (ret == 0)\n \t\t\trxq_ctrl->hairpin_status = 0;\n-\t\tmlx5_rxq_release(dev, cur_queue);\n \t}\n \treturn ret;\n }\n@@ -1014,7 +1005,6 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_txq_ctrl *txq_ctrl;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tuint32_t i;\n \tuint16_t pp;\n \tuint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};\n@@ -1043,24 +1033,23 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,\n \t\t}\n \t} else {\n \t\tfor (i = 0; i < priv->rxqs_n; i++) {\n-\t\t\trxq_ctrl = mlx5_rxq_get(dev, i);\n-\t\t\tif (!rxq_ctrl)\n+\t\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);\n+\t\t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\n+\t\t\tif (rxq == NULL)\n \t\t\t\tcontinue;\n-\t\t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {\n-\t\t\t\tmlx5_rxq_release(dev, i);\n+\t\t\trxq_ctrl = rxq->ctrl;\n+\t\t\tif (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)\n \t\t\t\tcontinue;\n-\t\t\t}\n \t\t\tpp = rxq_ctrl->hairpin_conf.peers[0].port;\n \t\t\tif (pp >= RTE_MAX_ETHPORTS) {\n \t\t\t\trte_errno = ERANGE;\n-\t\t\t\tmlx5_rxq_release(dev, i);\n \t\t\t\tDRV_LOG(ERR, \"port %hu queue %u peer port \"\n \t\t\t\t\t\"out of range %hu\",\n \t\t\t\t\tpriv->dev_data->port_id, i, pp);\n \t\t\t\treturn -rte_errno;\n \t\t\t}\n \t\t\tbits[pp / 32] |= 1 << (pp % 32);\n-\t\t\tmlx5_rxq_release(dev, i);\n \t\t}\n \t}\n \tfor (i = 0; i < RTE_MAX_ETHPORTS; i++) {\n",
    "prefixes": [
        "v3",
        "08/14"
    ]
}