get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99692/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99692,
    "url": "https://patches.dpdk.org/api/patches/99692/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-10-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210926111904.237736-10-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210926111904.237736-10-xuemingl@nvidia.com",
    "date": "2021-09-26T11:19:02",
    "name": "[09/11] net/mlx5: move Rx queue DevX resource",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c2a1781b9e9e4c5cd112c7e54d4e3d7c5b26d65f",
    "submitter": {
        "id": 1904,
        "url": "https://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210926111904.237736-10-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19166,
            "url": "https://patches.dpdk.org/api/series/19166/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=19166",
            "date": "2021-09-26T11:18:53",
            "name": "net/mlx5: support shared Rx queue",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/19166/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/99692/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/99692/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4B727A0547;\n\tSun, 26 Sep 2021 13:20:29 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D7B8641123;\n\tSun, 26 Sep 2021 13:20:14 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2080.outbound.protection.outlook.com [40.107.243.80])\n by mails.dpdk.org (Postfix) with ESMTP id 02C5641101\n for <dev@dpdk.org>; Sun, 26 Sep 2021 13:20:12 +0200 (CEST)",
            "from MW2PR2101CA0025.namprd21.prod.outlook.com (2603:10b6:302:1::38)\n by CY4PR12MB1783.namprd12.prod.outlook.com (2603:10b6:903:121::17)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.18; Sun, 26 Sep\n 2021 11:20:10 +0000",
            "from CO1NAM11FT034.eop-nam11.prod.protection.outlook.com\n (2603:10b6:302:1:cafe::d2) by MW2PR2101CA0025.outlook.office365.com\n (2603:10b6:302:1::38) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4587.2 via Frontend\n Transport; Sun, 26 Sep 2021 11:20:09 +0000",
            "from mail.nvidia.com (216.228.112.36) by\n CO1NAM11FT034.mail.protection.outlook.com (10.13.174.248) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4544.13 via Frontend Transport; Sun, 26 Sep 2021 11:20:09 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL101.nvidia.com\n (172.20.187.10) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:20:09 +0000",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:20:06 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=mTCEseoz9Ko1P98ZwAVfev5SDCEyaBcHT+vnczTLSYS+re+o4i1pJSyoNYioillZ8OE9ch3RBbTsFswMWFWZErMdJPJEJ+vKpRY6PGI+x5IP1gWrkRjvhn59fRV+yDBh4K4fNtke6OdYbz6QK12PYme1HcxKA+85M/pzU8rjEtrTMEcvKXFS7gCe8dhIQWKAJSWDhVJMKTBMi1L6gekCqu2YYrgrDsnIu6iHz9LJ8THwAnBxcOB7oCRPARCtoInK8RDjs+LNuUQhvMjbbk2VnmEjhG46Z00+08XbKry2ccxc0bvXHmHS3O6Sw+gMYEbZ46C9OMfw3FXOG87LxslyRg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=BDLOONjmavYDoUSLBaSunMP8GaoQhVOcqZmu1HOLfGM=;\n b=TMNqtm2uA7RjmOd9mSq/M0+26DZq76euL3e/tQpEw2A8URmmp4t4aBQadMIunrTgYsmtKbVd4sUlaEOVlF0CsPvOyXt21Ml2xiVdTxnuvKEpVUjpU+qiPfgcaCNVERrpgXvti4MsvfAi5wL2V8dzv5gWzGwlR2S26B2s6ScjFy7dnAJ3hFdM9ZLMYFy4GVsnpSqmp3YSJHMX5tgN+mLHvgce+A9+V9CPYyoJwV4SymK74GIs3MxFOa6V54pzW/3Z/pu79fljGGgNyv/YMWCt5Bz8Eqn+iFCH9fA0rv9g9jjt9Kl+dsqx/LFfwYbOo/3OfbO6n/mOx9atbGG4861RvQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.36) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=BDLOONjmavYDoUSLBaSunMP8GaoQhVOcqZmu1HOLfGM=;\n b=NT1O+4lP5XsQ+hOAQfY85iqRUObaaxqG7eXX6koj+kOnp1SLQ32qaq8PpXU34NCkAS7feoFSv7mjRpdicWNruFteXpbt/lt8oOgkinZBtc6wo+WeMH3H5ujBj+lKFVbwAwetBkhX77xhhgQiKv5qXhy0a6BQwiWrQn7kWahNptceEepRwVX/USBSFmCiWu6nH/5sQcWn/B3WSNZE0E54QfnPQL7aDtecstDbGVqhgP70k1OWn+HG0FKz05OC75p6t/kylik6yzf9tOi/XZxCt6vQAvwmxZx+zmxd2IVP+wIwgcP7lMZYmZS6f6yony2UEaYexYH1uLg2CHqGNwa2Yw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.36)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.36 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.36; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>, \"Anatoly\n Burakov\" <anatoly.burakov@intel.com>",
        "Date": "Sun, 26 Sep 2021 19:19:02 +0800",
        "Message-ID": "<20210926111904.237736-10-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "References": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "59cc8acc-e1e5-4072-8b34-08d980df99f6",
        "X-MS-TrafficTypeDiagnostic": "CY4PR12MB1783:",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR12MB1783DC50E733C074321D530FA1A69@CY4PR12MB1783.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:3968;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n w512xndSwyLVFI58QRS8kJ7hvQ8ff8PZ48xmFadcCBa/zCDeoWD3y06CtIvhhWgbZF6P6QdjKnylXchatKzLmv+tKtRamQV+0sbKAJHEhPSJegawDyWeWceRMwHn9pzQSaKYVD+wzlpV4uinDkY/xTbnQtDPX8KJ2vJs0Z0BDQfofiBNk8Z/223D3awtrbhGWMjljThrhGltuk3uQKybjlEChf4nMnq65V6VRah+n2hBP29PhYDAvBsTDk1y3xw1lePZryfe8aa8hduZIS7b43QyCPzGhwgpCg7C2V2UuGR9cjiAgrhKBEBbvfLoco8p2nvFWCmYxFe4/KEX9ECTEwpCDWWDkVJpw63oaFKOVcmjtfbtSy1DUpc0aZ8KcuPmBINiKVgi2eOqjBq29d2KGzZdckgb51aEZwVaMcVBePh5QpGQTn2MCrSUrIaBWg+eSbUa2nYpT6Mui7gj5H9k9Jpen67qGloCtr4/y9p9Mh8YrRjbGitV2z7NQeiu529yyXlM7zpaZ3PGUiOB0qu1VsIqdu3Z193oIIDGcDZYldVAjLp4RiiyD42k/HidaWNR6lgizWmbvTQuaPd1PZk28uuyTCzh9VOS+hngvhYHMBp1EVvhUG+3bedZ1UMv+TatCrbsozDy2tF0qQZ/8t6RQKHN3MRFLC5W/nlMj6i3LWdAYHHc/p8OXIqGuq5Pkxbex9uDlWxgWjBeCArI+Wb8hamWzhdjsZee+RH5Z89BV7s=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.36; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid05.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(16526019)(83380400001)(316002)(86362001)(47076005)(8936002)(30864003)(186003)(508600001)(36860700001)(356005)(5660300002)(55016002)(336012)(4326008)(426003)(6666004)(36906005)(6916009)(6286002)(70586007)(2616005)(36756003)(8676002)(26005)(2906002)(82310400003)(7696005)(1076003)(7636003)(54906003)(70206006)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "26 Sep 2021 11:20:09.7610 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 59cc8acc-e1e5-4072-8b34-08d980df99f6",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.36];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT034.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR12MB1783",
        "Subject": "[dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "To support shared RX queue, move DevX RQ which is per queue resource to\nRx queue private data.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_verbs.c | 154 +++++++++++--------\n drivers/net/mlx5/mlx5.h             |  11 +-\n drivers/net/mlx5/mlx5_devx.c        | 227 ++++++++++++++--------------\n drivers/net/mlx5/mlx5_rx.h          |   1 +\n drivers/net/mlx5/mlx5_rxq.c         |  44 +++---\n drivers/net/mlx5/mlx5_rxtx.c        |   6 +-\n drivers/net/mlx5/mlx5_trigger.c     |   2 +-\n drivers/net/mlx5/mlx5_vlan.c        |  16 +-\n 8 files changed, 241 insertions(+), 220 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex d4fa202ac4b..a2a9b9c1f98 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -71,13 +71,13 @@ const struct mlx5_mr_ops mlx5_mr_verbs_ops = {\n /**\n  * Modify Rx WQ vlan stripping offload\n  *\n- * @param rxq_obj\n- *   Rx queue object.\n+ * @param rxq\n+ *   Rx queue.\n  *\n  * @return 0 on success, non-0 otherwise\n  */\n static int\n-mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n+mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)\n {\n \tuint16_t vlan_offloads =\n \t\t(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |\n@@ -89,14 +89,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n \t\t.flags = vlan_offloads,\n \t};\n \n-\treturn mlx5_glue->modify_wq(rxq_obj->wq, &mod);\n+\treturn mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);\n }\n \n /**\n  * Modifies the attributes for the specified WQ.\n  *\n- * @param rxq_obj\n- *   Verbs Rx queue object.\n+ * @param rxq\n+ *   Verbs Rx queue.\n  * @param type\n  *   Type of change queue state.\n  *\n@@ -104,14 +104,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n+mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)\n {\n \tstruct ibv_wq_attr mod = {\n \t\t.attr_mask = IBV_WQ_ATTR_STATE,\n \t\t.wq_state = (enum ibv_wq_state)type,\n \t};\n \n-\treturn mlx5_glue->modify_wq(rxq_obj->wq, &mod);\n+\treturn mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);\n }\n \n /**\n@@ -181,21 +181,18 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,\n /**\n  * Create a CQ Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.\n  */\n static struct ibv_cq *\n-mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n \tunsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);\n \tstruct {\n@@ -241,7 +238,7 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is disabled for HW\"\n \t\t\t\" timestamp.\",\n-\t\t\tdev->data->port_id);\n+\t\t\tpriv->dev_data->port_id);\n \t}\n #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD\n \tif (RTE_CACHE_LINE_SIZE == 128) {\n@@ -257,21 +254,18 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Create a WQ Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.\n  */\n static struct ibv_wq *\n-mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n \tunsigned int wqe_n = 1 << rxq_data->elts_n;\n \tstruct {\n@@ -338,7 +332,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"Port %u Rx queue %u requested %u*%u but got\"\n \t\t\t\t\" %u*%u WRs*SGEs.\",\n-\t\t\t\tdev->data->port_id, idx,\n+\t\t\t\tpriv->dev_data->port_id, rxq->idx,\n \t\t\t\twqe_n >> rxq_data->sges_n,\n \t\t\t\t(1 << rxq_data->sges_n),\n \t\t\t\twq_attr.ibv.max_wr, wq_attr.ibv.max_sge);\n@@ -353,21 +347,20 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Create the Rx queue Verbs object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t idx = rxq->idx;\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tuint16_t port_id = priv->dev_data->port_id;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tstruct mlx5dv_cq cq_info;\n \tstruct mlx5dv_rwq rwq;\n@@ -382,17 +375,17 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\t\t\tmlx5_glue->create_comp_channel(priv->sh->ctx);\n \t\tif (!tmpl->ibv_channel) {\n \t\t\tDRV_LOG(ERR, \"Port %u: comp channel creation failure.\",\n-\t\t\t\tdev->data->port_id);\n+\t\t\t\tport_id);\n \t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n \t\ttmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;\n \t}\n \t/* Create CQ using Verbs API. */\n-\ttmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);\n+\ttmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);\n \tif (!tmpl->ibv_cq) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u CQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n@@ -407,7 +400,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u wrong MLX5_CQE_SIZE environment \"\n \t\t\t\"variable value: it should be set to %u.\",\n-\t\t\tdev->data->port_id, RTE_CACHE_LINE_SIZE);\n+\t\t\tport_id, RTE_CACHE_LINE_SIZE);\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n@@ -418,19 +411,19 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \trxq_data->cq_uar = cq_info.cq_uar;\n \trxq_data->cqn = cq_info.cqn;\n \t/* Create WQ (RQ) using Verbs API. */\n-\ttmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);\n+\ttmpl->wq = mlx5_rxq_ibv_wq_create(rxq);\n \tif (!tmpl->wq) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u WQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n \t/* Change queue state to ready. */\n-\tret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);\n+\tret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);\n \tif (ret) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tport_id, idx);\n \t\trte_errno = ret;\n \t\tgoto error;\n \t}\n@@ -446,7 +439,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \trxq_data->cq_arm_sn = 0;\n \tmlx5_rxq_initialize(rxq_data);\n \trxq_data->cq_ci = 0;\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n+\tpriv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n \trxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;\n \treturn 0;\n error:\n@@ -464,12 +457,14 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n /**\n  * Release an Rx verbs queue object.\n  *\n- * @param rxq_obj\n- *   Verbs Rx queue object.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  */\n static void\n-mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)\n+mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)\n {\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;\n+\n \tMLX5_ASSERT(rxq_obj);\n \tMLX5_ASSERT(rxq_obj->wq);\n \tMLX5_ASSERT(rxq_obj->ibv_cq);\n@@ -692,12 +687,24 @@ static void\n mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_obj *rxq_obj;\n \n-\tif (rxq->wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(rxq->wq));\n-\tif (rxq->ibv_cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));\n+\tif (rxq == NULL)\n+\t\treturn;\n+\tif (rxq->ctrl == NULL)\n+\t\tgoto free_priv;\n+\trxq_obj = rxq->ctrl->obj;\n+\tif (rxq_obj == NULL)\n+\t\tgoto free_ctrl;\n+\tif (rxq_obj->wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));\n+\tif (rxq_obj->ibv_cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));\n+\tmlx5_free(rxq_obj);\n+free_ctrl:\n+\tmlx5_free(rxq->ctrl);\n+free_priv:\n \tmlx5_free(rxq);\n \tpriv->drop_queue.rxq = NULL;\n }\n@@ -716,39 +723,58 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct ibv_context *ctx = priv->sh->ctx;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n+\tstruct mlx5_rxq_obj *rxq_obj = NULL;\n \n-\tif (rxq)\n+\tif (rxq != NULL)\n \t\treturn 0;\n \trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);\n-\tif (!rxq) {\n+\tif (rxq == NULL) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue memory.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n \tpriv->drop_queue.rxq = rxq;\n-\trxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n-\tif (!rxq->ibv_cq) {\n+\trxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,\n+\t\t\t       SOCKET_ID_ANY);\n+\tif (rxq_ctrl == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue control memory.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq->ctrl = rxq_ctrl;\n+\trxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,\n+\t\t\t      SOCKET_ID_ANY);\n+\tif (rxq_obj == NULL) {\n+\t\tDRV_LOG(DEBUG, \"Port %u cannot allocate drop Rx queue memory.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq_ctrl->obj = rxq_obj;\n+\trxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n+\tif (!rxq_obj->ibv_cq) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate CQ for drop queue.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\trxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){\n+\trxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){\n \t\t\t\t\t\t    .wq_type = IBV_WQT_RQ,\n \t\t\t\t\t\t    .max_wr = 1,\n \t\t\t\t\t\t    .max_sge = 1,\n \t\t\t\t\t\t    .pd = priv->sh->pd,\n-\t\t\t\t\t\t    .cq = rxq->ibv_cq,\n+\t\t\t\t\t\t    .cq = rxq_obj->ibv_cq,\n \t\t\t\t\t      });\n-\tif (!rxq->wq) {\n+\tif (!rxq_obj->wq) {\n \t\tDRV_LOG(DEBUG, \"Port %u cannot allocate WQ for drop queue.\",\n \t\t      dev->data->port_id);\n \t\trte_errno = errno;\n \t\tgoto error;\n \t}\n-\tpriv->drop_queue.rxq = rxq;\n \treturn 0;\n error:\n \tmlx5_rxq_ibv_obj_drop_release(dev);\n@@ -777,7 +803,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)\n \tret = mlx5_rxq_ibv_obj_drop_create(dev);\n \tif (ret < 0)\n \t\tgoto error;\n-\trxq = priv->drop_queue.rxq;\n+\trxq = priv->drop_queue.rxq->ctrl->obj;\n \tind_tbl = mlx5_glue->create_rwq_ind_table\n \t\t\t\t(priv->sh->ctx,\n \t\t\t\t &(struct ibv_rwq_ind_table_init_attr){\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex d06f828ed33..c674f5ba9c4 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -310,7 +310,7 @@ struct mlx5_vf_vlan {\n /* Flow drop context necessary due to Verbs API. */\n struct mlx5_drop {\n \tstruct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */\n-\tstruct mlx5_rxq_obj *rxq; /* Rx queue object. */\n+\tstruct mlx5_rxq_priv *rxq; /* Rx queue. */\n };\n \n /* Loopback dummy queue resources required due to Verbs API. */\n@@ -1257,7 +1257,6 @@ struct mlx5_rxq_obj {\n \t\t};\n \t\tstruct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */\n \t\tstruct {\n-\t\t\tstruct mlx5_devx_rq rq_obj; /* DevX RQ object. */\n \t\t\tstruct mlx5_devx_cq cq_obj; /* DevX CQ object. */\n \t\t\tvoid *devx_channel;\n \t\t};\n@@ -1339,11 +1338,11 @@ struct mlx5_rxq_priv;\n \n /* HW objects operations structure. */\n struct mlx5_obj_ops {\n-\tint (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);\n-\tint (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);\n+\tint (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);\n+\tint (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);\n \tint (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);\n-\tint (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);\n-\tvoid (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);\n+\tint (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);\n+\tvoid (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);\n \tint (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,\n \t\t\t     struct mlx5_ind_table_obj *ind_tbl);\n \tint (*ind_table_modify)(struct rte_eth_dev *dev,\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 71e4bce1588..d219e255f0a 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -30,14 +30,16 @@\n /**\n  * Modify RQ vlan stripping offload\n  *\n- * @param rxq_obj\n- *   Rx queue object.\n+ * @param rxq\n+ *   Rx queue.\n+ * @param on\n+ *   Enable/disable VLAN stripping.\n  *\n  * @return\n  *   0 on success, non-0 otherwise\n  */\n static int\n-mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n+mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)\n {\n \tstruct mlx5_devx_modify_rq_attr rq_attr;\n \n@@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n \trq_attr.state = MLX5_RQC_STATE_RDY;\n \trq_attr.vsd = (on ? 0 : 1);\n \trq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;\n-\treturn mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);\n+\treturn mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);\n }\n \n /**\n  * Modify RQ using DevX API.\n  *\n- * @param rxq_obj\n- *   DevX Rx queue object.\n+ * @param rxq\n+ *   DevX rx queue.\n  * @param type\n  *   Type of change queue state.\n  *\n@@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n+mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)\n {\n \tstruct mlx5_devx_modify_rq_attr rq_attr;\n \n@@ -86,7 +88,7 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)\n \tdefault:\n \t\tbreak;\n \t}\n-\treturn mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);\n+\treturn mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);\n }\n \n /**\n@@ -145,42 +147,34 @@ mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,\n \treturn 0;\n }\n \n-/**\n- * Destroy the Rx queue DevX object.\n- *\n- * @param rxq_obj\n- *   Rxq object to destroy.\n- */\n-static void\n-mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)\n-{\n-\tmlx5_devx_rq_destroy(&rxq_obj->rq_obj);\n-\tmemset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));\n-\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n-\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n-}\n-\n /**\n  * Release an Rx DevX queue object.\n  *\n- * @param rxq_obj\n- *   DevX Rx queue object.\n+ * @param rxq\n+ *   DevX Rx queue.\n  */\n static void\n-mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)\n+mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)\n {\n-\tMLX5_ASSERT(rxq_obj);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n+\n+\tMLX5_ASSERT(rxq != NULL);\n+\tMLX5_ASSERT(rxq_ctrl != NULL);\n \tif (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {\n \t\tMLX5_ASSERT(rxq_obj->rq);\n-\t\tmlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);\n+\t\tmlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);\n \t\tclaim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));\n \t} else {\n-\t\tMLX5_ASSERT(rxq_obj->cq_obj.cq);\n-\t\tMLX5_ASSERT(rxq_obj->rq_obj.rq);\n-\t\tmlx5_rxq_release_devx_resources(rxq_obj);\n-\t\tif (rxq_obj->devx_channel)\n+\t\tmlx5_devx_rq_destroy(&rxq->devx_rq);\n+\t\tmemset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));\n+\t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n+\t\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n+\t\tif (rxq_obj->devx_channel) {\n \t\t\tmlx5_os_devx_destroy_event_channel\n \t\t\t\t\t\t\t(rxq_obj->devx_channel);\n+\t\t\trxq_obj->devx_channel = NULL;\n+\t\t}\n \t}\n }\n \n@@ -224,21 +218,18 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)\n /**\n  * Create a RQ object using DevX.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param rxq_data\n- *   RX queue data.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,\n-\t\t\t\t  struct mlx5_rxq_data *rxq_data)\n+mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tstruct mlx5_devx_create_rq_attr rq_attr = { 0 };\n \tuint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;\n \tuint32_t wqe_size, log_wqe_size;\n@@ -279,7 +270,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,\n \trq_attr.wq_attr.pd = priv->sh->pdn;\n \trq_attr.counter_set_id = priv->counter_set_id;\n \t/* Create RQ using DevX API. */\n-\treturn mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,\n+\treturn mlx5_devx_rq_create(priv->sh->ctx, &rxq->devx_rq,\n \t\t\t\t   wqe_size, log_desc_n, &rq_attr,\n \t\t\t\t   rxq_ctrl->socket);\n }\n@@ -287,24 +278,22 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,\n /**\n  * Create a DevX CQ object for an Rx queue.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param rxq_data\n- *   RX queue data.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n-\t\t\t\t  struct mlx5_rxq_data *rxq_data)\n+mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)\n {\n \tstruct mlx5_devx_cq *cq_obj = 0;\n \tstruct mlx5_devx_cq_attr cq_attr = { 0 };\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_priv *priv = rxq->priv;\n \tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t port_id = priv->dev_data->port_id;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tunsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);\n \tuint32_t log_cqe_n;\n \tuint16_t event_nums[1] = { 0 };\n@@ -345,7 +334,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n \t\t}\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is enabled, format %d.\",\n-\t\t\tdev->data->port_id, priv->config.cqe_comp_fmt);\n+\t\t\tport_id, priv->config.cqe_comp_fmt);\n \t\t/*\n \t\t * For vectorized Rx, it must not be doubled in order to\n \t\t * make cq_ci and rq_ci aligned.\n@@ -354,13 +343,12 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n \t\t\tcqe_n *= 2;\n \t} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {\n \t\tDRV_LOG(DEBUG,\n-\t\t\t\"Port %u Rx CQE compression is disabled for HW\"\n-\t\t\t\" timestamp.\",\n-\t\t\tdev->data->port_id);\n+\t\t\t\"Port %u Rx CQE compression is disabled for HW timestamp.\",\n+\t\t\tport_id);\n \t} else if (priv->config.cqe_comp && rxq_data->lro) {\n \t\tDRV_LOG(DEBUG,\n \t\t\t\"Port %u Rx CQE compression is disabled for LRO.\",\n-\t\t\tdev->data->port_id);\n+\t\t\tport_id);\n \t}\n \tcq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);\n \tlog_cqe_n = log2above(cqe_n);\n@@ -398,27 +386,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,\n /**\n  * Create the Rx hairpin queue object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tuint16_t idx = rxq->idx;\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \tstruct mlx5_devx_create_rq_attr attr = { 0 };\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tuint32_t max_wq_data;\n \n-\tMLX5_ASSERT(rxq_data);\n-\tMLX5_ASSERT(tmpl);\n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tattr.hairpin = 1;\n \tmax_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;\n@@ -447,39 +431,36 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)\n \tif (!tmpl->rq) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Port %u Rx hairpin queue %u can't create rq object.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tpriv->dev_data->port_id, idx);\n \t\trte_errno = errno;\n \t\treturn -rte_errno;\n \t}\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;\n+\tpriv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;\n \treturn 0;\n }\n \n /**\n  * Create the Rx queue DevX object.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- * @param idx\n- *   Queue index in DPDK Rx queue array.\n+ * @param rxq\n+ *   Pointer to Rx queue.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_priv *priv = rxq->priv;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;\n \tstruct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;\n \tint ret = 0;\n \n \tMLX5_ASSERT(rxq_data);\n \tMLX5_ASSERT(tmpl);\n \tif (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)\n-\t\treturn mlx5_rxq_obj_hairpin_new(dev, idx);\n+\t\treturn mlx5_rxq_obj_hairpin_new(rxq);\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tif (rxq_ctrl->irq) {\n \t\tint devx_ev_flag =\n@@ -497,34 +478,32 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n \t\ttmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);\n \t}\n \t/* Create CQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_cq_resources(rxq);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Failed to create CQ.\");\n \t\tgoto error;\n \t}\n \t/* Create RQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_rq_resources(rxq);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Port %u Rx queue %u RQ creation failure.\",\n-\t\t\tdev->data->port_id, idx);\n+\t\t\tpriv->dev_data->port_id, rxq->idx);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n \t/* Change queue state to ready. */\n-\tret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);\n+\tret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret)\n \t\tgoto error;\n-\trxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;\n-\trxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec;\n-\trxq_data->cq_arm_sn = 0;\n-\trxq_data->cq_ci = 0;\n+\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n+\trxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.db_rec;\n \tmlx5_rxq_initialize(rxq_data);\n-\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n-\trxq_ctrl->wqn = tmpl->rq_obj.rq->id;\n+\tpriv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;\n+\trxq_ctrl->wqn = rxq->devx_rq.rq->id;\n \treturn 0;\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n-\tmlx5_rxq_devx_obj_release(tmpl);\n+\tmlx5_rxq_devx_obj_release(rxq);\n \trte_errno = ret; /* Restore rte_errno. */\n \treturn -rte_errno;\n }\n@@ -570,15 +549,15 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,\n \trqt_attr->rqt_actual_size = rqt_n;\n \tif (queues == NULL) {\n \t\tfor (i = 0; i < rqt_n; i++)\n-\t\t\trqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;\n+\t\t\trqt_attr->rq_list[i] =\n+\t\t\t\t\tpriv->drop_queue.rxq->devx_rq.rq->id;\n \t\treturn rqt_attr;\n \t}\n \tfor (i = 0; i != queues_n; ++i) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);\n \n-\t\trqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;\n+\t\tMLX5_ASSERT(rxq != NULL);\n+\t\trqt_attr->rq_list[i] = rxq->devx_rq.rq->id;\n \t}\n \tMLX5_ASSERT(i > 0);\n \tfor (j = 0; i != rqt_n; ++j, ++i)\n@@ -717,7 +696,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,\n \t\t\t}\n \t\t}\n \t} else {\n-\t\trxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;\n+\t\trxq_obj_type = priv->drop_queue.rxq->ctrl->type;\n \t}\n \tmemset(tir_attr, 0, sizeof(*tir_attr));\n \ttir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;\n@@ -889,16 +868,23 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint socket_id = dev->device->numa_node;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\tstruct mlx5_rxq_data *rxq_data;\n-\tstruct mlx5_rxq_obj *rxq = NULL;\n+\tstruct mlx5_rxq_priv *rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n+\tstruct mlx5_rxq_obj *rxq_obj = NULL;\n \tint ret;\n \n \t/*\n-\t * Initialize dummy control structures.\n+\t * Initialize dummy Rx queue structures.\n \t * They are required to hold pointers for cleanup\n \t * and are only accessible via drop queue DevX objects.\n \t */\n+\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);\n+\tif (rxq == NULL) {\n+\t\tDRV_LOG(ERR, \"Port %u could not allocate drop queue\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n \trxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),\n \t\t\t       0, socket_id);\n \tif (rxq_ctrl == NULL) {\n@@ -907,27 +893,29 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);\n-\tif (rxq == NULL) {\n+\trxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);\n+\tif (rxq_obj == NULL) {\n \t\tDRV_LOG(ERR, \"Port %u could not allocate drop queue object\",\n \t\t\tdev->data->port_id);\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\trxq->rxq_ctrl = rxq_ctrl;\n+\trxq->priv = priv;\n+\trxq->ctrl = rxq_ctrl;\n+\tLIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);\n+\trxq_obj->rxq_ctrl = rxq_ctrl;\n \trxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;\n \trxq_ctrl->sh = priv->sh;\n-\trxq_ctrl->obj = rxq;\n-\trxq_data = &rxq_ctrl->rxq;\n+\trxq_ctrl->obj = rxq_obj;\n \t/* Create CQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_cq_resources(rxq);\n \tif (ret != 0) {\n \t\tDRV_LOG(ERR, \"Port %u drop queue CQ creation failed.\",\n \t\t\tdev->data->port_id);\n \t\tgoto error;\n \t}\n \t/* Create RQ using DevX API. */\n-\tret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);\n+\tret = mlx5_rxq_create_devx_rq_resources(rxq);\n \tif (ret != 0) {\n \t\tDRV_LOG(ERR, \"Port %u drop queue RQ creation failed.\",\n \t\t\tdev->data->port_id);\n@@ -944,15 +932,18 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)\n error:\n \tret = rte_errno; /* Save rte_errno before cleanup. */\n \tif (rxq != NULL) {\n-\t\tif (rxq->rq_obj.rq != NULL)\n-\t\t\tmlx5_devx_rq_destroy(&rxq->rq_obj);\n-\t\tif (rxq->cq_obj.cq != NULL)\n-\t\t\tmlx5_devx_cq_destroy(&rxq->cq_obj);\n-\t\tif (rxq->devx_channel)\n-\t\t\tmlx5_os_devx_destroy_event_channel\n-\t\t\t\t\t\t\t(rxq->devx_channel);\n+\t\tif (rxq->devx_rq.rq != NULL)\n+\t\t\tclaim_zero(mlx5_devx_rq_destroy(&rxq->devx_rq));\n \t\tmlx5_free(rxq);\n \t}\n+\tif (rxq_obj != NULL) {\n+\t\tif (rxq_obj->cq_obj.cq != NULL)\n+\t\t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n+\t\tif (rxq_obj->devx_channel)\n+\t\t\tmlx5_os_devx_destroy_event_channel\n+\t\t\t\t\t\t\t(rxq_obj->devx_channel);\n+\t\tmlx5_free(rxq_obj);\n+\t}\n \tif (rxq_ctrl != NULL)\n \t\tmlx5_free(rxq_ctrl);\n \trte_errno = ret; /* Restore rte_errno. */\n@@ -969,12 +960,14 @@ static void\n mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;\n+\tstruct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;\n \n \tmlx5_rxq_devx_obj_release(rxq);\n \tmlx5_free(rxq);\n \tmlx5_free(rxq_ctrl);\n+\tmlx5_free(rxq_obj);\n \tpriv->drop_queue.rxq = NULL;\n }\n \n@@ -994,7 +987,7 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)\n \t\tmlx5_devx_tir_destroy(hrxq);\n \tif (hrxq->ind_table->ind_table != NULL)\n \t\tmlx5_devx_ind_table_destroy(hrxq->ind_table);\n-\tif (priv->drop_queue.rxq->rq != NULL)\n+\tif (priv->drop_queue.rxq != NULL)\n \t\tmlx5_rxq_devx_obj_drop_release(dev);\n }\n \ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 4eed4176324..25f7fc2071a 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -183,6 +183,7 @@ struct mlx5_rxq_priv {\n \tstruct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */\n \tLIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n+\tstruct mlx5_devx_rq devx_rq;\n \tstruct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */\n \tuint32_t hairpin_status; /* Hairpin binding status. */\n };\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 3aac7cc20ba..98408da3c8e 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -452,13 +452,13 @@ int\n mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n \tint ret;\n \n+\tMLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);\n \tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to RESET:  %s\",\n \t\t\tstrerror(errno));\n@@ -466,7 +466,7 @@ mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)\n \t\treturn ret;\n \t}\n \t/* Remove all processes CQEs. */\n-\trxq_sync_cq(rxq);\n+\trxq_sync_cq(&rxq_ctrl->rxq);\n \t/* Free all involved mbufs. */\n \trxq_free_elts(rxq_ctrl);\n \t/* Set the actual queue state. */\n@@ -538,26 +538,26 @@ int\n mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tint ret;\n \n-\tMLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);\n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n \t/* Allocate needed buffers. */\n-\tret = rxq_alloc_elts(rxq_ctrl);\n+\tret = rxq_alloc_elts(rxq->ctrl);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot reallocate buffers for Rx WQ\");\n \t\trte_errno = errno;\n \t\treturn ret;\n \t}\n \trte_io_wmb();\n-\t*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);\n+\t*rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);\n \trte_io_wmb();\n \t/* Reset RQ consumer before moving queue to READY state. */\n-\t*rxq->rq_db = rte_cpu_to_be_32(0);\n+\t*rxq_data->rq_db = rte_cpu_to_be_32(0);\n \trte_io_wmb();\n-\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to READY:  %s\",\n \t\t\tstrerror(errno));\n@@ -565,8 +565,8 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)\n \t\treturn ret;\n \t}\n \t/* Reinitialize RQ - set WQEs. */\n-\tmlx5_rxq_initialize(rxq);\n-\trxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;\n+\tmlx5_rxq_initialize(rxq_data);\n+\trxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;\n \t/* Set actual queue state. */\n \tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;\n \treturn 0;\n@@ -1770,15 +1770,19 @@ int\n mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;\n+\tstruct mlx5_rxq_priv *rxq;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \n-\tif (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)\n+\tif (priv->rxq_privs == NULL)\n+\t\treturn 0;\n+\trxq = mlx5_rxq_get(dev, idx);\n+\tif (rxq == NULL)\n \t\treturn 0;\n \tif (mlx5_rxq_deref(dev, idx) > 1)\n \t\treturn 1;\n-\tif (rxq_ctrl->obj) {\n-\t\tpriv->obj_ops.rxq_obj_release(rxq_ctrl->obj);\n+\trxq_ctrl = rxq->ctrl;\n+\tif (rxq_ctrl->obj != NULL) {\n+\t\tpriv->obj_ops.rxq_obj_release(rxq);\n \t\tLIST_REMOVE(rxq_ctrl->obj, next);\n \t\tmlx5_free(rxq_ctrl->obj);\n \t\trxq_ctrl->obj = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 7b984eff35f..d44d6d8e4c3 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -374,11 +374,9 @@ mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n \tif (sm->is_wq) {\n-\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];\n-\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\t\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, sm->queue_id);\n \n-\t\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);\n+\t\tret = priv->obj_ops.rxq_obj_modify(rxq, sm->state);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to %u  - %s\",\n \t\t\t\t\tsm->state, strerror(errno));\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex f376f4d6fc4..b3188f510fb 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -180,7 +180,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n-\t\tret = priv->obj_ops.rxq_obj_new(dev, i);\n+\t\tret = priv->obj_ops.rxq_obj_new(rxq);\n \t\tif (ret) {\n \t\t\tmlx5_free(rxq_ctrl->obj);\n \t\t\tgoto error;\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex 60f97f2d2d1..586ba7166cb 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -91,11 +91,11 @@ void\n mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n-\t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queue);\n+\tstruct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;\n \tint ret = 0;\n \n+\tMLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);\n \t/* Validate hw support */\n \tif (!priv->config.hw_vlan_strip) {\n \t\tDRV_LOG(ERR, \"port %u VLAN stripping is not supported\",\n@@ -109,20 +109,20 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)\n \t\treturn;\n \t}\n \tDRV_LOG(DEBUG, \"port %u set VLAN stripping offloads %d for port %uqueue %d\",\n-\t\tdev->data->port_id, on, rxq->port_id, queue);\n-\tif (!rxq_ctrl->obj) {\n+\t\tdev->data->port_id, on, rxq_data->port_id, queue);\n+\tif (rxq->ctrl->obj == NULL) {\n \t\t/* Update related bits in RX queue. */\n-\t\trxq->vlan_strip = !!on;\n+\t\trxq_data->vlan_strip = !!on;\n \t\treturn;\n \t}\n-\tret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);\n+\tret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq, on);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Port %u failed to modify object stripping mode:\"\n \t\t\t\" %s\", dev->data->port_id, strerror(rte_errno));\n \t\treturn;\n \t}\n \t/* Update related bits in RX queue. */\n-\trxq->vlan_strip = !!on;\n+\trxq_data->vlan_strip = !!on;\n }\n \n /**\n",
    "prefixes": [
        "09/11"
    ]
}