get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103758/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103758,
    "url": "https://patches.dpdk.org/api/patches/103758/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20211104123320.1638915-12-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211104123320.1638915-12-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211104123320.1638915-12-xuemingl@nvidia.com",
    "date": "2021-11-04T12:33:17",
    "name": "[v4,11/14] net/mlx5: move Rx queue DevX resource",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ba580c8ab964217a43302dd8385f835435adaf8b",
    "submitter": {
        "id": 1904,
        "url": "https://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20211104123320.1638915-12-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 20310,
            "url": "https://patches.dpdk.org/api/series/20310/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=20310",
            "date": "2021-11-04T12:33:06",
            "name": "net/mlx5: support shared Rx queue",
            "version": 4,
            "mbox": "https://patches.dpdk.org/series/20310/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/103758/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/103758/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BBF28A0548;\n\tThu,  4 Nov 2021 13:35:43 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3FB7B42738;\n\tThu,  4 Nov 2021 13:35:05 +0100 (CET)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2086.outbound.protection.outlook.com [40.107.220.86])\n by mails.dpdk.org (Postfix) with ESMTP id 5DFF54271A\n for <dev@dpdk.org>; Thu,  4 Nov 2021 13:35:03 +0100 (CET)",
            "from DS7PR03CA0058.namprd03.prod.outlook.com (2603:10b6:5:3b5::33)\n by DM6PR12MB3676.namprd12.prod.outlook.com (2603:10b6:5:1c7::29) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4649.17; Thu, 4 Nov\n 2021 12:35:00 +0000",
            "from DM6NAM11FT008.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3b5:cafe::e2) by DS7PR03CA0058.outlook.office365.com\n (2603:10b6:5:3b5::33) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11 via Frontend\n Transport; Thu, 4 Nov 2021 12:35:00 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT008.mail.protection.outlook.com (10.13.172.85) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Thu, 4 Nov 2021 12:35:00 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 4 Nov\n 2021 12:34:57 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=eEO7YboskN5qCJlyItBEcOOoR3iZPlMuggU9g5ch2gU2mi4YmAzmdzdJh8xWdCMbBVArXHT1kvhOISpUf3psD1q8ZbUi5Tm7B34A8O4uS29Q1dqr/r5QjaEj7/X+OC8E51uTNHjMbFN2ow7GBaQWVP+yxI5oqYuIhBjBLlEQq6MxJS/FR0hYZu/aoAoH3dNoF9bmpX2OC0F9zJY0rcho2QI3TOXcZGHicVvEQ+C9Mrrluj7ddAfDYv3eGA+CvjiDHFSZakWtPK6AnUwoLkKK3+ey+BaiegC153UrVM4emrJnkG9ivvVtnhugsgcDbrUg2ENE7IzEFLe56OoIbTRO8w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=yzI4KPWJ7PpK+hTpztzJp80z9EarwSjVwhZboBctFCM=;\n b=f1FaAPwbG9lqlt9BK3AoEU2aU7TtjpNF/02qc0pIEF1MZa+gOHiI7OxZzCo4kBNvD7U5aUiG+Dl2pA5u0uV5kNQdVih9gb/qIbZ71fL+IwUKWB7c1xCJUcxQCfVD5Y+ag7xtoyTJwSHE3eTRK5/0mgg3vPuheb0qqAFWkbHgds2NFh8kAKIyYmbUcxMJRYDRRZftiukYVXIM0V12A+XiWO2ZjEzxohb8/kapKWO8ZpQv0GNWEeUiLg15RL2uSDGmRRBM24OGIT6UrVPtRq5urVisOB1EucFl/bb0PRQ3eO0VZA6lkIvcz4Ju8f4g6XatBJfRL2Ge7K48+t/NqOZCaw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=yzI4KPWJ7PpK+hTpztzJp80z9EarwSjVwhZboBctFCM=;\n b=THUTcOQMS36C1O74sWOonKUpmgWpNeLabgQhptI+OFW6wdWwaxVAfyfVVeD9lWSSM/PerCKgOHK/vc7n+wrsEpAqp/arq/Tsa4GWiECICISgvU+p7qe01WP2Z9KckbowvPFaVGzB4kWI50QVRN//9K150R9R6IcnB5Wnn393zwm3QIswFttghZdgyW/rv1CY/FDd38ClxlFyxtz8YYve9+BJgA5HH+VJSULNVr1Ow5l3X6VTh1963SQ/YOej21hC0OjULs/KeD5h2Z+5b0/O6evUuutcNu5J4qLqZf3P3hWRlq+cMKPIYOkztWRkmYlM6ijdys6VPzKzvB3NKblG4w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, \"Slava\n Ovsiienko\" <viacheslavo@nvidia.com>, Matan Azrad <matan@nvidia.com>, \"Anatoly\n Burakov\" <anatoly.burakov@intel.com>",
        "Date": "Thu, 4 Nov 2021 20:33:17 +0800",
        "Message-ID": "<20211104123320.1638915-12-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211104123320.1638915-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211104123320.1638915-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "5f444941-9e4a-4529-97a3-08d99f8f8480",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB3676:",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB3676A678BE98F5DA6018D578A18D9@DM6PR12MB3676.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:3968;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n n5+r4XynlXgdQIZD/ivKwBUPC8hbzdl4oCM7YbAdqUvs3A3Q5/jrPmznwsYxYHXJ0uAA92DwvjZbz9Q3IK4ZHuDeJx/PNlFQlesUOWofpU/1k3wtEOPMgvaJ5pta5hMifFHDbcwnEZgsig4ocPhCRYl1KUSEhuhxluZ2l6etxZAzsdxhZHjclVqjzAskdPgIG7OX3nnS/gTfRXKoBAyIBVReu6sdeHFp2Ip//1DoRBD4T98v829lS8TCnIhrS6sCY3fsAvKnI62gMtYMeaNkm61swQJG+8l97eN8E9n7qwSRWv9RLYTupi5d3jARzOu6sYz80MFerE8RJBD+o138QINvyvDCK9oU1D/+3waPueTSRkWBXU/9PBXbwQbqx7h6++CC34H0SHppbTyfM/nOTaAV67PBNOgrfC3F3AVsm94ERHx8VJuxKTWsoRjxxlMb4BJPlYjYZz/VKm1sOCPQdUw0x6NFg6YJfnOWYECL220C9uP/o0+ufqvPbtrfaT2u6aRttoMldZLj+2t4hEmohyaivZGO0t3qOa7RPy/k8EFwroHBGf8TqBE4a8Wba25IXkAtk3nw+ayL0mmLQ4e3GRlHQbWopqsI8f0n3zMKcn2k6mSAxxCraO/TUlbuIL3eI9MOSI7perV4N1iXTe1Bf9A6vNguVHTm5T6zqRMP+DHeEPpn4g/Z891Eox3BEraUi7gAIGU2HWbSZc9afDxd9zFGxa9LWAYWuqmuskqV7Sk=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(2616005)(186003)(16526019)(316002)(54906003)(8936002)(26005)(70586007)(70206006)(2906002)(86362001)(55016002)(8676002)(6286002)(83380400001)(47076005)(30864003)(508600001)(36756003)(6916009)(7636003)(336012)(7696005)(5660300002)(1076003)(4326008)(6666004)(82310400003)(356005)(426003)(36860700001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "04 Nov 2021 12:35:00.0246 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 5f444941-9e4a-4529-97a3-08d99f8f8480",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT008.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB3676",
        "Subject": "[dpdk-dev] [PATCH v4 11/14] net/mlx5: move Rx queue DevX resource",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "To support shared RX queue, moves DevX RQ which is per queue resource to\nRx queue private data.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nAcked-by: Slava Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_verbs.c | 154 +++++++++++--------\n drivers/net/mlx5/mlx5.h             |  11 +-\n drivers/net/mlx5/mlx5_devx.c        | 227 +++++++++++++---------------\n drivers/net/mlx5/mlx5_rx.h          |   1 +\n drivers/net/mlx5/mlx5_rxq.c         |  44 +++---\n drivers/net/mlx5/mlx5_rxtx.c        |   6 +-\n drivers/net/mlx5/mlx5_trigger.c     |   2 +-\n drivers/net/mlx5/mlx5_vlan.c        |  16 +-\n 8 files changed, 240 insertions(+), 221 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex 4779b37aa65..5d4ae3ea752 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -29,13 +29,13 @@\n /**\n  * Modify Rx WQ vlan stripping offload\n  *\n- * @param rxq_obj\n- *   Rx queue object.\n+ * @param rxq\n+ *   Rx queue.\n  *\n  * @return 0 on success, non-0 otherwise\n  */\n static int\n-mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n+mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)\n {\n \tuint16_t vlan_offloads =\n \t\t(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |\n@@ -47,14 +47,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n \t\t.flags = vlan_offloads,\n \t};\n \n-\treturn mlx5_glue->modify_wq(rxq_obj->wq, &mod);\n+\treturn mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);\n }\n \n /**\n  * Modifies the attributes for the specified WQ.\n  *\n- * @param rxq_obj\n- *   Verbs Rx queue object.\n+ * @param rxq\n+ *   Verbs Rx queue.\n  * @param type\n  *   Type of change queue state.\n  *\n@@ -62,14 +62,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n+mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)\n {\n \tstruct ibv_wq_attr mod = {\n \t\t.attr_mask = IBV_WQ_ATTR_STATE,\n \t\t.wq_state = (enum ibv_wq_state)type,\n \t};\n \n-\treturn mlx5_glue->modify_wq(rxq_obj->wq, &mod);\n+\treturn mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);\n }\n \n /**\n@@ -139,21 +139,18 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,\n /**\n  * Create a CQ Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.\n  */\n static struct ibv_cq *\n-mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n \tunsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);\n \tstruct {\n@@ -199,7 +196,7 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is disabled for HW\"\n \t\t\t\" timestamp.\",\n-\t\t\tdev->data->port_id);\n+\t\t\tpriv->dev_data->port_id);\n \t}\n #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD\n \tif (RTE_CACHE_LINE_SIZE == 128) {\n@@ -216,21 +213,18 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Create a WQ Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.\n  */\n static struct ibv_wq *\n-mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n \tunsigned int wqe_n = 1 << rxq_data->elts_n;\n \tstruct {\n@@ -297,7 +291,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"Port %u Rx queue %u requested %u*%u but got\"\n \t\t\t\t\" %u*%u WRs*SGEs.\",\n-\t\t\t\tdev->data->port_id, idx,\n+\t\t\t\tpriv->dev_data->port_id, rxq->idx,\n \t\t\t\twqe_n >> rxq_data->sges_n,\n \t\t\t\t(1 << rxq_data->sges_n),\n \t\t\t\twq_attr.ibv.max_wr, wq_attr.ibv.max_sge);\n@@ -312,21 +306,20 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Create the Rx queue Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t idx = rxq->idx;\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tuint16_t port_id = priv->dev_data->port_id;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tstruct mlx5dv_cq cq_info;\n \tstruct mlx5dv_rwq rwq;\n@@ -341,17 +334,17 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\tmlx5_glue->create_comp_channel(priv->sh->cdev->ctx);\n \t\tif (!tmpl->ibv_channel) {\n \t\t\tDRV_LOG(ERR, \"Port %u: comp channel creation failure.\",\n-\t\t\t\tdev->data->port_id);\n+\t\t\t\tport_id);\n \t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n \t\ttmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;\n \t}\n \t/* Create CQ using Verbs API. */\n-\ttmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);\n+\ttmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);\n \tif (!tmpl->ibv_cq) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u CQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -366,7 +359,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u wrong MLX5_CQE_SIZE environment \"\n \t\t\t\"variable value: it should be set to %u.\",\n-\t\t\tdev->data->port_id, RTE_CACHE_LINE_SIZE);\n+\t\t\tport_id, RTE_CACHE_LINE_SIZE);\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n@@ -377,19 +370,19 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \trxq_data->cq_uar = cq_info.cq_uar;\n \trxq_data->cqn = cq_info.cqn;\n \t/* Create WQ (RQ) using Verbs API. */\n-\ttmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);\n+\ttmpl->wq = mlx5_rxq_ibv_wq_create(rxq);\n \tif (!tmpl->wq) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u WQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n \t/* Change queue state to ready. */\n-\tret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);\n+\tret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);\n \tif (ret) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ret;\n \t\tgoto error;\n \t}\n@@ -405,7 +398,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \trxq_data->cq_arm_sn = 0;\n \tmlx5_rxq_initialize(rxq_data);\n \trxq_data->cq_ci = 0;\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n+\tpriv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n \trxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;\n \treturn 0;\n error:\n@@ -423,12 +416,14 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Release an Rx verbs queue object.\n  *\n- * @param rxq_obj\n- *   Verbs Rx queue object.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  */\n static void\n-mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)\n+mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)\n {\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;\n+\n \tMLX5_ASSERT(rxq_obj);\n \tMLX5_ASSERT(rxq_obj->wq);\n \tMLX5_ASSERT(rxq_obj->ibv_cq);\n@@ -652,12 +647,24 @@ static void\n mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_obj *rxq_obj;\n \n-\tif (rxq->wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(rxq->wq));\n-\tif (rxq->ibv_cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));\n+\tif (rxq == NULL)\n+\t\treturn;\n+\tif (rxq->ctrl == NULL)\n+\t\tgoto free_priv;\n+\trxq_obj = rxq->ctrl->obj;\n+\tif (rxq_obj == NULL)\n+\t\tgoto free_ctrl;\n+\tif (rxq_obj->wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));\n+\tif (rxq_obj->ibv_cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));\n+\tmlx5_free(rxq_obj);\n+free_ctrl:\n+\tmlx5_free(rxq->ctrl);\n+free_priv:\n \tmlx5_free(rxq);\n \tpriv->drop_queue.rxq = NULL;\n }\n@@ -676,39 +683,58 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct ibv_context *ctx = priv->sh->cdev->ctx;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n+\tstruct mlx5_rxq_obj *rxq_obj = NULL;\n \n-\tif (rxq)\n+\tif (rxq != NULL)\n \t\treturn 0;\n \trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);\n-\tif (!rxq) {\n+\tif (rxq == NULL) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue memory.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n \tpriv->drop_queue.rxq = rxq;\n-\trxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n-\tif (!rxq->ibv_cq) {\n+\trxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,\n+\t\t\t       SOCKET_ID_ANY);\n+\tif (rxq_ctrl == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue control memory.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq->ctrl = rxq_ctrl;\n+\trxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,\n+\t\t\t      SOCKET_ID_ANY);\n+\tif (rxq_obj == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue memory.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq_ctrl->obj = rxq_obj;\n+\trxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n+\tif (!rxq_obj->ibv_cq) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate CQ for drop queue.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\trxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){\n+\trxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){\n \t\t\t\t\t\t    .wq_type = IBV_WQT_RQ,\n \t\t\t\t\t\t    .max_wr = 1,\n \t\t\t\t\t\t    .max_sge = 1,\n \t\t\t\t\t\t    .pd = priv->sh->cdev->pd,\n-\t\t\t\t\t\t    .cq = rxq->ibv_cq,\n+\t\t\t\t\t\t    .cq = rxq_obj->ibv_cq,\n \t\t\t\t\t      });\n-\tif (!rxq->wq) {\n+\tif (!rxq_obj->wq) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate WQ for drop queue.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\tpriv->drop_queue.rxq = rxq;\n \treturn 0;\n error:\n \tmlx5_rxq_ibv_obj_drop_release(dev);\n@@ -737,7 +763,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)\n \tret = mlx5_rxq_ibv_obj_drop_create(dev);\n \tif (ret < 0)\n \t\tgoto error;\n-\trxq = priv->drop_queue.rxq;\n+\trxq = priv->drop_queue.rxq->ctrl->obj;\n \tind_tbl = mlx5_glue->create_rwq_ind_table\n \t\t\t\t(priv->sh->cdev->ctx,\n \t\t\t\t &(struct ibv_rwq_ind_table_init_attr){\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 4e99fe7d068..967d92b4ad6 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -300,7 +300,7 @@ struct mlx5_vf_vlan {\n /* Flow drop context necessary due to Verbs API. */\n struct mlx5_drop {\n \tstruct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */\n-\tstruct mlx5_rxq_obj *rxq; /* Rx queue object. */\n+\tstruct mlx5_rxq_priv *rxq; /* Rx queue. */\n };\n \n /* Loopback dummy queue resources required due to Verbs API. */\n@@ -1267,7 +1267,6 @@ struct mlx5_rxq_obj {\n \t\t};\n \t\tstruct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */\n \t\tstruct {\n-\t\t\tstruct mlx5_devx_rq rq_obj; /* DevX RQ object. */\n \t\t\tstruct mlx5_devx_cq cq_obj; /* DevX CQ object. */\n \t\t\tvoid *devx_channel;\n \t\t};\n@@ -1349,11 +1348,11 @@ struct mlx5_rxq_priv;\n \n /* HW objects operations structure. */\n struct mlx5_obj_ops {\n-\tint (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);\n-\tint (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);\n+\tint (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);\n+\tint (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);\n \tint (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);\n-\tint (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);\n-\tvoid (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);\n+\tint (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);\n+\tvoid (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);\n \tint (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,\n \t\t\t     struct mlx5_ind_table_obj *ind_tbl);\n \tint (*ind_table_modify)(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 8b3651f5034..b90a5d82458 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -30,14 +30,16 @@\n /**\n  * Modify RQ vlan stripping offload\n  *\n- * @param rxq_obj\n- *   Rx queue object.\n+ * @param rxq\n+ *   Rx queue.\n+ * @param on\n+ *   Enable/disable VLAN stripping.\n  *\n  * @return\n  *   0 on success, non-0 otherwise\n  */\n static int\n-mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n+mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)\n {\n \tstruct mlx5_devx_modify_rq_attr rq_attr;\n \n@@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n \trq_attr.state = MLX5_RQC_STATE_RDY;\n \trq_attr.vsd = (on ? 0 : 1);\n \trq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;\n-\treturn mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);\n+\treturn mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);\n }\n \n /**\n  * Modify RQ using DevX API.\n  *\n- * @param rxq_obj\n- *   DevX Rx queue object.\n+ * @param rxq\n+ *   DevX rx queue.\n  * @param type\n  *   Type of change queue state.\n  *\n@@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n+mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)\n {\n \tstruct mlx5_devx_modify_rq_attr rq_attr;\n \n@@ -86,7 +88,7 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n \tdefault:\n \t\tbreak;\n \t}\n-\treturn mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);\n+\treturn mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);\n }\n \n /**\n@@ -145,42 +147,34 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,\n \treturn 0;\n }\n \n-/**\n- * Destroy the Rx queue DevX object.\n- *\n- * @param rxq_obj\n- *   Rxq object to destroy.\n- */\n-static void\n-mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)\n-{\n-\tmlx5_devx_rq_destroy(&rxq_obj->rq_obj);\n-\tmemset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));\n-\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n-\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n-}\n-\n /**\n  * Release an Rx DevX queue object.\n  *\n- * @param rxq_obj\n- *   DevX Rx queue object.\n+ * @param rxq\n+ *   DevX Rx queue.\n  */\n static void\n-mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)\n+mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)\n {\n-\tMLX5_ASSERT(rxq_obj);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n+\n+\tMLX5_ASSERT(rxq != NULL);\n+\tMLX5_ASSERT(rxq_ctrl != NULL);\n \tif (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {\n \t\tMLX5_ASSERT(rxq_obj->rq);\n-\t\tmlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);\n+\t\tmlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);\n \t\tclaim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));\n \t} else {\n-\t\tMLX5_ASSERT(rxq_obj->cq_obj.cq);\n-\t\tMLX5_ASSERT(rxq_obj->rq_obj.rq);\n-\t\tmlx5_rxq_release_devx_resources(rxq_obj);\n-\t\tif (rxq_obj->devx_channel)\n+\t\tmlx5_devx_rq_destroy(&rxq->devx_rq);\n+\t\tmemset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));\n+\t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n+\t\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n+\t\tif (rxq_obj->devx_channel) {\n \t\t\tmlx5_os_devx_destroy_event_channel\n \t\t\t\t\t\t\t(rxq_obj->devx_channel);\n+\t\t\trxq_obj->devx_channel = NULL;\n+\t\t}\n \t}\n }\n \n@@ -224,22 +218,19 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)\n /**\n  * Create a RQ object using DevX.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param rxq_data\n- *   RX queue data.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,\n-\t\t\t\t  struct mlx5_rxq_data *rxq_data)\n+mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_priv *priv = rxq->priv;\n \tstruct mlx5_common_device *cdev = priv->sh->cdev;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tstruct mlx5_devx_create_rq_attr rq_attr = { 0 };\n \tuint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;\n \tuint32_t wqe_size, log_wqe_size;\n@@ -281,31 +272,29 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,\n \trq_attr.wq_attr.pd = cdev->pdn;\n \trq_attr.counter_set_id = priv->counter_set_id;\n \t/* Create RQ using DevX API. */\n-\treturn mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,\n+\treturn mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,\n \t\t\t\t   log_desc_n, &rq_attr, rxq_ctrl->socket);\n }\n \n /**\n  * Create a DevX CQ object for an Rx queue.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param rxq_data\n- *   RX queue data.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n-\t\t\t\t  struct mlx5_rxq_data *rxq_data)\n+mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)\n {\n \tstruct mlx5_devx_cq *cq_obj = 0;\n \tstruct mlx5_devx_cq_attr cq_attr = { 0 };\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_priv *priv = rxq->priv;\n \tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t port_id = priv->dev_data->port_id;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tunsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);\n \tuint32_t log_cqe_n;\n \tuint16_t event_nums[1] = { 0 };\n@@ -346,7 +335,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n \t\t}\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is enabled, format %d.\",\n-\t\t\tdev->data->port_id, priv->config.cqe_comp_fmt);\n+\t\t\tport_id, priv->config.cqe_comp_fmt);\n \t\t/*\n \t\t * For vectorized Rx, it must not be doubled in order to\n \t\t * make cq_ci and rq_ci aligned.\n@@ -355,13 +344,12 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n \t\t\tcqe_n *= 2;\n \t} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {\n \t\tDRV_LOG(DEBUG,\n-\t\t\t\"Port %u Rx CQE compression is disabled for HW\"\n-\t\t\t\" timestamp.\",\n-\t\t\tdev->data->port_id);\n+\t\t\t\"Port %u Rx CQE compression is disabled for HW timestamp.\",\n+\t\t\tport_id);\n \t} else if (priv->config.cqe_comp && rxq_data->lro) {\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is disabled for LRO.\",\n-\t\t\tdev->data->port_id);\n+\t\t\tport_id);\n \t}\n \tcq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);\n \tlog_cqe_n = log2above(cqe_n);\n@@ -399,27 +387,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n /**\n  * Create the Rx hairpin queue object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t idx = rxq->idx;\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \tstruct mlx5_devx_create_rq_attr attr = { 0 };\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tuint32_t max_wq_data;\n \n-\tMLX5_ASSERT(rxq_data);\n-\tMLX5_ASSERT(tmpl);\n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tattr.hairpin = 1;\n \tmax_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;\n@@ -448,39 +432,36 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n \tif (!tmpl->rq) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u Rx hairpin queue %u can't create rq object.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tpriv->dev_data->port_id, idx);\n \t\trte_errno = errno;\n \t\treturn -rte_errno;\n \t}\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;\n+\tpriv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;\n \treturn 0;\n }\n \n /**\n  * Create the Rx queue DevX object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tint ret = 0;\n \n \tMLX5_ASSERT(rxq_data);\n \tMLX5_ASSERT(tmpl);\n \tif (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)\n-\t\treturn mlx5_rxq_obj_hairpin_new(dev, idx);\n+\t\treturn mlx5_rxq_obj_hairpin_new(rxq);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tif (rxq_ctrl->irq) {\n \t\tint devx_ev_flag =\n@@ -498,34 +479,32 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\ttmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);\n \t}\n \t/* Create CQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_cq_resources(rxq);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create CQ.\");\n \t\tgoto error;\n \t}\n \t/* Create RQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_rq_resources(rxq);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u RQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tpriv->dev_data->port_id, rxq->idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n \t/* Change queue state to ready. */\n-\tret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);\n+\tret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret)\n \t\tgoto error;\n-\trxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;\n-\trxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.wq.db_rec;\n-\trxq_data->cq_arm_sn = 0;\n-\trxq_data->cq_ci = 0;\n+\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n+\trxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;\n \tmlx5_rxq_initialize(rxq_data);\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n-\trxq_ctrl->wqn = tmpl->rq_obj.rq->id;\n+\tpriv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;\n+\trxq_ctrl->wqn = rxq->devx_rq.rq->id;\n \treturn 0;\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tmlx5_rxq_devx_obj_release(tmpl);\n+\tmlx5_rxq_devx_obj_release(rxq);\n \trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -571,15 +550,15 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,\n \trqt_attr->rqt_actual_size = rqt_n;\n \tif (queues == NULL) {\n \t\tfor (i = 0; i < rqt_n; i++)\n-\t\t\trqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;\n+\t\t\trqt_attr->rq_list[i] =\n+\t\t\t\t\tpriv->drop_queue.rxq->devx_rq.rq->id;\n \t\treturn rqt_attr;\n \t}\n \tfor (i = 0; i != queues_n; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);\n \n-\t\trqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;\n+\t\tMLX5_ASSERT(rxq != NULL);\n+\t\trqt_attr->rq_list[i] = rxq->devx_rq.rq->id;\n \t}\n \tMLX5_ASSERT(i > 0);\n \tfor (j = 0; i != rqt_n; ++j, ++i)\n@@ -719,7 +698,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,\n \t\t\t}\n \t\t}\n \t} else {\n-\t\trxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;\n+\t\trxq_obj_type = priv->drop_queue.rxq->ctrl->type;\n \t}\n \tmemset(tir_attr, 0, sizeof(*tir_attr));\n \ttir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;\n@@ -891,9 +870,9 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint socket_id = dev->device->numa_node;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\tstruct mlx5_rxq_data *rxq_data;\n-\tstruct mlx5_rxq_obj *rxq = NULL;\n+\tstruct mlx5_rxq_priv *rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n+\tstruct mlx5_rxq_obj *rxq_obj = NULL;\n \tint ret;\n \n \t/*\n@@ -901,6 +880,13 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n \t * They are required to hold pointers for cleanup\n \t * and are only accessible via drop queue DevX objects.\n \t */\n+\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);\n+\tif (rxq == NULL) {\n+\t\tDRV_LOG(ERR, \"Port %u could not allocate drop queue private\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n \trxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),\n \t\t\t       0, socket_id);\n \tif (rxq_ctrl == NULL) {\n@@ -909,27 +895,29 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);\n-\tif (rxq == NULL) {\n+\trxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);\n+\tif (rxq_obj == NULL) {\n \t\tDRV_LOG(ERR, \"Port %u could not allocate drop queue object\",\n \t\t\tdev->data->port_id);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\trxq->rxq_ctrl = rxq_ctrl;\n+\trxq_obj->rxq_ctrl = rxq_ctrl;\n \trxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;\n \trxq_ctrl->sh = priv->sh;\n-\trxq_ctrl->obj = rxq;\n-\trxq_data = &rxq_ctrl->rxq;\n+\trxq_ctrl->obj = rxq_obj;\n+\trxq->ctrl = rxq_ctrl;\n+\trxq->priv = priv;\n+\tLIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);\n \t/* Create CQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_cq_resources(rxq);\n \tif (ret != 0) {\n \t\tDRV_LOG(ERR, \"Port %u drop queue CQ creation failed.\",\n \t\t\tdev->data->port_id);\n \t\tgoto error;\n \t}\n \t/* Create RQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_rq_resources(rxq);\n \tif (ret != 0) {\n \t\tDRV_LOG(ERR, \"Port %u drop queue RQ creation failed.\",\n \t\t\tdev->data->port_id);\n@@ -945,18 +933,20 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n \treturn 0;\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tif (rxq != NULL) {\n-\t\tif (rxq->rq_obj.rq != NULL)\n-\t\t\tmlx5_devx_rq_destroy(&rxq->rq_obj);\n-\t\tif (rxq->cq_obj.cq != NULL)\n-\t\t\tmlx5_devx_cq_destroy(&rxq->cq_obj);\n-\t\tif (rxq->devx_channel)\n+\tif (rxq != NULL && rxq->devx_rq.rq != NULL)\n+\t\tmlx5_devx_rq_destroy(&rxq->devx_rq);\n+\tif (rxq_obj != NULL) {\n+\t\tif (rxq_obj->cq_obj.cq != NULL)\n+\t\t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n+\t\tif (rxq_obj->devx_channel)\n \t\t\tmlx5_os_devx_destroy_event_channel\n-\t\t\t\t\t\t\t(rxq->devx_channel);\n-\t\tmlx5_free(rxq);\n+\t\t\t\t\t\t\t(rxq_obj->devx_channel);\n+\t\tmlx5_free(rxq_obj);\n \t}\n \tif (rxq_ctrl != NULL)\n \t\tmlx5_free(rxq_ctrl);\n+\tif (rxq != NULL)\n+\t\tmlx5_free(rxq);\n \trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -971,12 +961,13 @@ static void\n mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \n \tmlx5_rxq_devx_obj_release(rxq);\n-\tmlx5_free(rxq);\n+\tmlx5_free(rxq_ctrl->obj);\n \tmlx5_free(rxq_ctrl);\n+\tmlx5_free(rxq);\n \tpriv->drop_queue.rxq = NULL;\n }\n \n@@ -996,7 +987,7 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)\n \t\tmlx5_devx_tir_destroy(hrxq);\n \tif (hrxq->ind_table->ind_table != NULL)\n \t\tmlx5_devx_ind_table_destroy(hrxq->ind_table);\n-\tif (priv->drop_queue.rxq->rq != NULL)\n+\tif (priv->drop_queue.rxq->devx_rq.rq != NULL)\n \t\tmlx5_rxq_devx_obj_drop_release(dev);\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex c04c0c73349..337dcca59fb 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -174,6 +174,7 @@ struct mlx5_rxq_priv {\n \tstruct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */\n \tLIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n+\tstruct mlx5_devx_rq devx_rq;\n \tstruct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */\n \tuint32_t hairpin_status; /* Hairpin binding status. */\n };\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 5a20966e2ca..2850a220399 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -471,13 +471,13 @@ int\n mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \tint ret;\n \n+\tMLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);\n \tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to RESET:  %s\",\n \t\t\tstrerror(errno));\n@@ -485,7 +485,7 @@ mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)\n \t\treturn ret;\n \t}\n \t/* Remove all processes CQEs. */\n-\trxq_sync_cq(rxq);\n+\trxq_sync_cq(&rxq_ctrl->rxq);\n \t/* Free all involved mbufs. */\n \trxq_free_elts(rxq_ctrl);\n \t/* Set the actual queue state. */\n@@ -557,26 +557,26 @@ int\n mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tint ret;\n \n-\tMLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);\n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n \t/* Allocate needed buffers. */\n-\tret = rxq_alloc_elts(rxq_ctrl);\n+\tret = rxq_alloc_elts(rxq->ctrl);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot reallocate buffers for Rx WQ\");\n \t\trte_errno = errno;\n \t\treturn ret;\n \t}\n \trte_io_wmb();\n-\t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n+\t*rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);\n \trte_io_wmb();\n \t/* Reset RQ consumer before moving queue to READY state. */\n-\t*rxq->rq_db = rte_cpu_to_be_32(0);\n+\t*rxq_data->rq_db = rte_cpu_to_be_32(0);\n \trte_io_wmb();\n-\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to READY:  %s\",\n \t\t\tstrerror(errno));\n@@ -584,8 +584,8 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)\n \t\treturn ret;\n \t}\n \t/* Reinitialize RQ - set WQEs. */\n-\tmlx5_rxq_initialize(rxq);\n-\trxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;\n+\tmlx5_rxq_initialize(rxq_data);\n+\trxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;\n \t/* Set actual queue state. */\n \tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n \treturn 0;\n@@ -1835,15 +1835,19 @@ int\n mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_priv *rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \n-\tif (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)\n+\tif (priv->rxq_privs == NULL)\n+\t\treturn 0;\n+\trxq = mlx5_rxq_get(dev, idx);\n+\tif (rxq == NULL)\n \t\treturn 0;\n \tif (mlx5_rxq_deref(dev, idx) > 1)\n \t\treturn 1;\n-\tif (rxq_ctrl->obj) {\n-\t\tpriv->obj_ops.rxq_obj_release(rxq_ctrl->obj);\n+\trxq_ctrl = rxq->ctrl;\n+\tif (rxq_ctrl->obj != NULL) {\n+\t\tpriv->obj_ops.rxq_obj_release(rxq);\n \t\tLIST_REMOVE(rxq_ctrl->obj, next);\n \t\tmlx5_free(rxq_ctrl->obj);\n \t\trxq_ctrl->obj = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 0bcdff1b116..54d410b513b 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -373,11 +373,9 @@ mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n \tif (sm->is_wq) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, sm->queue_id);\n \n-\t\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);\n+\t\tret = priv->obj_ops.rxq_obj_modify(rxq, sm->state);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to %u  - %s\",\n \t\t\t\t\tsm->state, strerror(errno));\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex caafdf27e8f..2cf62a9780d 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -231,7 +231,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n-\t\tret = priv->obj_ops.rxq_obj_new(dev, i);\n+\t\tret = priv->obj_ops.rxq_obj_new(rxq);\n \t\tif (ret) {\n \t\t\tmlx5_free(rxq_ctrl->obj);\n \t\t\trxq_ctrl->obj = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex 07792fc5d94..ea841bb32fb 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -91,11 +91,11 @@ void\n mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queue);\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tint ret = 0;\n \n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);\n \t/* Validate hw support */\n \tif (!priv->config.hw_vlan_strip) {\n \t\tDRV_LOG(ERR, \"port %u VLAN stripping is not supported\",\n@@ -109,20 +109,20 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)\n \t\treturn;\n \t}\n \tDRV_LOG(DEBUG, \"port %u set VLAN stripping offloads %d for port %uqueue %d\",\n-\t\tdev->data->port_id, on, rxq->port_id, queue);\n-\tif (!rxq_ctrl->obj) {\n+\t\tdev->data->port_id, on, rxq_data->port_id, queue);\n+\tif (rxq->ctrl->obj == NULL) {\n \t\t/* Update related bits in RX queue. */\n-\t\trxq->vlan_strip = !!on;\n+\t\trxq_data->vlan_strip = !!on;\n \t\treturn;\n \t}\n-\tret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);\n+\tret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq, on);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Port %u failed to modify object stripping mode:\"\n \t\t\t\" %s\", dev->data->port_id, strerror(rte_errno));\n \t\treturn;\n \t}\n \t/* Update related bits in RX queue. */\n-\trxq->vlan_strip = !!on;\n+\trxq_data->vlan_strip = !!on;\n }\n \n /**\n",
    "prefixes": [
        "v4",
        "11/14"
    ]
}