get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96801/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96801,
    "url": "http://patches.dpdk.org/api/patches/96801/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210811134523.376022-1-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210811134523.376022-1-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210811134523.376022-1-xuemingl@nvidia.com",
    "date": "2021-08-11T13:45:22",
    "name": "[v1] ethdev: change queue release callback",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "526ef5b9a3b80ff7e55d40667605b45998086eae",
    "submitter": {
        "id": 1904,
        "url": "http://patches.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210811134523.376022-1-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 18255,
            "url": "http://patches.dpdk.org/api/series/18255/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18255",
            "date": "2021-08-11T13:45:22",
            "name": "[v1] ethdev: change queue release callback",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/18255/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/96801/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/96801/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0649EA0C43;\n\tWed, 11 Aug 2021 15:46:15 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6354140C35;\n\tWed, 11 Aug 2021 15:46:15 +0200 (CEST)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2064.outbound.protection.outlook.com [40.107.93.64])\n by mails.dpdk.org (Postfix) with ESMTP id C79A440042\n for <dev@dpdk.org>; Wed, 11 Aug 2021 15:46:13 +0200 (CEST)",
            "from BN1PR13CA0025.namprd13.prod.outlook.com (2603:10b6:408:e2::30)\n by DM6PR12MB3564.namprd12.prod.outlook.com (2603:10b6:5:11d::14) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4394.15; Wed, 11 Aug\n 2021 13:46:07 +0000",
            "from BN8NAM11FT012.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:e2:cafe::d9) by BN1PR13CA0025.outlook.office365.com\n (2603:10b6:408:e2::30) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.5 via Frontend\n Transport; Wed, 11 Aug 2021 13:46:07 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT012.mail.protection.outlook.com (10.13.177.55) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4415.16 via Frontend Transport; Wed, 11 Aug 2021 13:46:05 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 11 Aug\n 2021 13:45:50 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=KsXbXNWyKI1Ng4X+QC+JJ6phG02Qs69NnsV+sVX6SSLzrvR1W09Oh1iYdv/ECBAMnpuKckIulbErvBGI5qtiriE4GB3fV3UbqhbLhFENiHRhkBNYk1cr6kbI6XbPn9F6DQ8fLXahWkFW5PMul0/XtihmlvbDtlz9DHRSR3gmGCHegqFjjpbPYtv7fa66H4WvwHF8jFvPKxP1yjeEKVWGPA1UIea13JnglkTBkIdFHx9fwbosekYjKqvTYilNOOhp0PRLZtGxgHftm4REwbHU7egEvDCju4EuU1v0AtRGbxfLkdDO5Th85zFK8AlDNAH9/O4QB7yf6qMG+pO/vYF3BA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aebt92pEm3dcFkiChNEVA+N7AHPKf0Xbw8GcMGzoCXM=;\n b=GhIV7vOABTMdo5mD6tthPcTWhQkhpgd52xfAAyMySXzYG+51xoJDMZP+oHhDCX9j7r5A9BR3/Zmq7ijSBVCbDgGQjo4ILlpOpWuQ9KP9IkK/e9z//ug1u6VAUdeOuwvgvRPP0gjUjTUoX/VTdKbdiM43CwQT2XKPkqxnTgP4QzdYmh6nuMtG2pUQDVhWzgqO4rGuml8/XYROFbv3FdgJCpwcgT6Ev0T83uWZnHIMc2gKDh7ZN4A2BuWOoOZjjiVmG5UkDMhmJizZf8Eyd9CaJegZ7EornvczDKKs8ZtJIxMu2KcEuCgHHDUSDFZeF3tyVJGlqWU/qyBijd/kz2ch1Q==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=semihalf.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=aebt92pEm3dcFkiChNEVA+N7AHPKf0Xbw8GcMGzoCXM=;\n b=twVKWzp4/NM/PsweaaThyFuNM7uzi9ClRQfhYe6AH6hwlcPVXE25nnQsa0mQPBiu743z3H3esnpoXrveIk3hHqcTL2Gb24MoBhiSZGPieV7OzFtevE00+tNO+nTQ8KLsvSHynblXLZoo5AX4oDEtADuyVYdVOjoqnkt7PhkG8r4jNfaXkE96/Pg80Tn6s67YG52njvpYQzIQNrkMUoplrLkgTbHCrOy49a+yAsUpIEd6uHPAbC4bdNfpyuN1DufyBe4a3dqO04cOpGh/NJuI856lvVt6UPLenItf555p3J3ctc1BbAfwHJY3bJwFVUBdc+aHYnDE2r7NcXO4Izn9Vw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; semihalf.com; dkim=none (message not signed)\n header.d=none;semihalf.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "",
        "CC": "<dev@dpdk.org>, <xuemingl@nvidia.com>, Ferruh Yigit\n <ferruh.yigit@intel.com>, \"John W. Linville\" <linville@tuxdriver.com>, \"Ciara\n Loftus\" <ciara.loftus@intel.com>, Qi Zhang <qi.z.zhang@intel.com>, \"Igor\n Russkikh\" <irusskikh@marvell.com>, Steven Webster\n <steven.webster@windriver.com>, Matt Peters <matt.peters@windriver.com>,\n Somalapuram Amaranath <asomalap@amd.com>, Rasesh Mody <rmody@marvell.com>,\n Shahed Shaikh <shshaikh@marvell.com>, Ajit Khaparde\n <ajit.khaparde@broadcom.com>, Somnath Kotur <somnath.kotur@broadcom.com>,\n Chas Williams <chas3@att.com>, \"Min Hu (Connor)\" <humin29@huawei.com>,\n \"Nithin Dabilpuram\" <ndabilpuram@marvell.com>, Kiran Kumar K\n <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>, Satha Rao\n <skoteshwar@marvell.com>, Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>,\n Hemant Agrawal <hemant.agrawal@nxp.com>, Sachin Saxena\n <sachin.saxena@oss.nxp.com>, Haiyue Wang <haiyue.wang@intel.com>, \"Marcin\n Wojtas\" <mw@semihalf.com>, Michal Krawczyk <mk@semihalf.com>, Shai Brandes\n <shaibran@amazon.com>, Evgeny Schemeilin <evgenys@amazon.com>, Igor Chauskin\n <igorch@amazon.com>, Gagandeep Singh <g.singh@nxp.com>, John Daley\n <johndale@cisco.com>, Hyong Youb Kim <hyonkim@cisco.com>, Gaetan Rivet\n <grive@u256.net>, Xiao Wang <xiao.w.wang@intel.com>, Ziyang Xuan\n <xuanziyang2@huawei.com>, Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>,\n Guoyang Zhou <zhouguoyang@huawei.com>, Yisen Zhuang\n <yisen.zhuang@huawei.com>, Lijun Ou <oulijun@huawei.com>, Beilei Xing\n <beilei.xing@intel.com>, Jingjing Wu <jingjing.wu@intel.com>, Qiming Yang\n <qiming.yang@intel.com>, Andrew Boyer <aboyer@pensando.io>, Rosen Xu\n <rosen.xu@intel.com>, Shijith Thotton <sthotton@marvell.com>,\n Srisivasubramanian Srinivasan <srinivasan@marvell.com>, Jakub Grajciar\n <jgrajcia@cisco.com>, Matan Azrad <matan@nvidia.com>, Shahaf Shuler\n <shahafs@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>, \"Zyta\n Szpak\" <zr@semihalf.com>, Liron Himi <lironh@marvell.com>, Stephen Hemminger\n <sthemmin@microsoft.com>, Long Li <longli@microsoft.com>, Martin Spinler\n <spinler@cesnet.cz>, Heinrich Kuhn <heinrich.kuhn@netronome.com>, Jiawen Wu\n <jiawenwu@trustnetic.com>, Tetsuya Mukawa <mtetsuyah@gmail.com>, Harman Kalra\n <hkalra@marvell.com>, Jerin Jacob <jerinj@marvell.com>, Nalla Pradeep\n <pnalla@marvell.com>, Radha Mohan Chintakuntla <radhac@marvell.com>,\n Veerasenareddy Burru <vburru@marvell.com>, Devendra Singh Rawat\n <dsinghrawat@marvell.com>, Bruce Richardson <bruce.richardson@intel.com>,\n Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>, Keith Wiles\n <keith.wiles@intel.com>, Maciej Czekaj <mczekaj@marvell.com>, Jian Wang\n <jianwang@trustnetic.com>, Maxime Coquelin <maxime.coquelin@redhat.com>,\n Chenbo Xia <chenbo.xia@intel.com>, Yong Wang <yongwang@vmware.com>, \"Thomas\n Monjalon\" <thomas@monjalon.net>",
        "Date": "Wed, 11 Aug 2021 16:45:22 +0300",
        "Message-ID": "<20210811134523.376022-1-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.27.0",
        "In-Reply-To": "<20210727034134.20556-1-xuemingl@nvidia.com>",
        "References": "<20210727034134.20556-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "b6f0c135-28e7-47a9-42d7-08d95cce5dd9",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB3564:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB356493F50B51A17B4433D177A1F89@DM6PR12MB3564.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:48;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n VlX8wX4BwpcIWFj7aPLe5OEyFkSwXEkd2DpyAyGru0LeLHgXZ/z9gBFBE0aYwz8c3v4z44Mb7RJdYyvz+3JfBMJepoIsRe3lOLpXAuZ1Lvw3RbSvUQ3Njgwdohq6yHHkZot8eLxs3WNxhI0tI/s9gKEcuUQro4jY2CfF3rG9+azgoYbI7RYtjpAWCM+NjcOGs3m7cbeoIh9jUk+rY3TyoZ7b2a110bYkvv6t+Sq7eisZetuzvc0QEValh35m1IByqUjLnEoXrqqM0nvRnEoY6BNgG0tuXVWErgsmrLK+j8017/7tXzjSKe1mBmYoPsOdItdPTlDdEsuEtpXxnq9HD6B5m+FZRDKc54hi2bSB1+fx0IbWHrlxD+zJmnEG/eEWSlP/ko1D/u/N1LnRsA1PAn+uBUpnpttqG+dRqfl6m8v71Nd/MZN/b1Z/DjnGhL6y1u851i7ttVw4tVOhBcOMuNk9SNdadAVW75W7eIgZEDZlQ7hW+dRN+a/zdH5xjEBPKd85y0rw1K5fPHIk6UoZtoRZzdT2lo47+2i4gxu4SUCPf7vxdsNV5zg5OIDpNQzJh19Lbpl+51A4MXfgGY7uYTGXmeKDt/PKGcwb4bsPQWb4VGmdUa5tUihQjfQlhVtk5bDHWtHau9/Mg7m6t7jgEw0irPDSpQZvocnb1dGyuymL2RjwWK95FABJ3vWQYIqJvpje09Cn65bdSzT7nnhqLI16FCp6wnwx79SHJTehATvtr6jejf/A244cYVvRH7e4fnse4OQ+knsUO/VBh4OK5A9QbkaI66Yy8tYVFwv3Wk+B0UFgPS0fnwU2qL/fgtTgm+3nKAlKTSV/BcoyW/EpVw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(376002)(396003)(39860400002)(346002)(136003)(46966006)(36840700001)(82740400003)(7636003)(82310400003)(7696005)(26005)(109986005)(6286002)(86362001)(70206006)(1076003)(70586007)(83380400001)(186003)(8676002)(47076005)(16526019)(966005)(36906005)(356005)(30864003)(2616005)(4326008)(316002)(54906003)(55016002)(36860700001)(8936002)(336012)(36756003)(2906002)(426003)(7406005)(5660300002)(478600001)(7416002)(7366002)(6666004)(266003)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "11 Aug 2021 13:46:05.5364 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n b6f0c135-28e7-47a9-42d7-08d95cce5dd9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT012.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB3564",
        "Subject": "[dpdk-dev] [PATCH v1] ethdev: change queue release callback",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "To align with other eth device queue configuration callbacks, change RX\nand TX queue release callback API parameter from queue object to device\nand queue index.\n\nThis patch allows NULL callback to avoid defining empty release\ncallbacks.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nCc: Ferruh Yigit <ferruh.yigit@intel.com>\n\n---\nThis patch is part of shared Rx queue feature:\n  https://mails.dpdk.org/archives/dev/2021-July/215575.html\n---\n app/test/virtual_pmd.c                    | 12 -----\n drivers/net/af_packet/rte_eth_af_packet.c |  7 ---\n drivers/net/af_xdp/rte_eth_af_xdp.c       |  7 ---\n drivers/net/atlantic/atl_ethdev.h         |  4 +-\n drivers/net/atlantic/atl_rxtx.c           | 24 ++++-----\n drivers/net/avp/avp_ethdev.c              | 34 ++++---------\n drivers/net/axgbe/axgbe_dev.c             |  2 +-\n drivers/net/axgbe/axgbe_rxtx.c            |  8 +--\n drivers/net/axgbe/axgbe_rxtx.h            |  4 +-\n drivers/net/bnx2x/bnx2x_rxtx.c            |  8 +--\n drivers/net/bnx2x/bnx2x_rxtx.h            |  4 +-\n drivers/net/bnxt/bnxt_reps.c              | 20 ++++----\n drivers/net/bnxt/bnxt_reps.h              |  4 +-\n drivers/net/bnxt/bnxt_ring.c              |  2 +-\n drivers/net/bnxt/bnxt_rxq.c               | 12 +++--\n drivers/net/bnxt/bnxt_rxq.h               |  2 +-\n drivers/net/bnxt/bnxt_txq.c               | 15 +++---\n drivers/net/bnxt/bnxt_txq.h               |  2 +-\n drivers/net/bonding/rte_eth_bond_pmd.c    |  8 ++-\n drivers/net/cnxk/cnxk_ethdev.c            | 33 ++++++-------\n drivers/net/cxgbe/cxgbe_ethdev.c          | 12 ++---\n drivers/net/cxgbe/cxgbe_pfvf.h            |  4 +-\n drivers/net/dpaa/dpaa_ethdev.c            | 13 -----\n drivers/net/dpaa2/dpaa2_ethdev.c          | 11 +----\n drivers/net/e1000/e1000_ethdev.h          |  8 +--\n drivers/net/e1000/em_rxtx.c               | 12 ++---\n drivers/net/e1000/igb_rxtx.c              | 12 ++---\n drivers/net/ena/ena_ethdev.c              | 18 +++----\n drivers/net/enetc/enetc_ethdev.c          | 12 +++--\n drivers/net/enic/enic_ethdev.c            |  8 ++-\n drivers/net/enic/enic_vf_representor.c    |  8 ++-\n drivers/net/failsafe/failsafe_ops.c       | 42 ++++++----------\n drivers/net/fm10k/fm10k_ethdev.c          | 14 +++---\n drivers/net/hinic/hinic_pmd_ethdev.c      | 20 +++++---\n drivers/net/hns3/hns3_rxtx.c              | 25 +++++-----\n drivers/net/hns3/hns3_rxtx.h              |  4 +-\n drivers/net/i40e/i40e_fdir.c              |  8 +--\n drivers/net/i40e/i40e_rxtx.c              | 40 +++++++++------\n drivers/net/i40e/i40e_rxtx.h              |  6 ++-\n drivers/net/iavf/iavf_rxtx.c              | 12 ++---\n drivers/net/iavf/iavf_rxtx.h              |  4 +-\n drivers/net/ice/ice_dcf_ethdev.c          |  4 +-\n drivers/net/ice/ice_ethdev.c              |  4 +-\n drivers/net/ice/ice_rxtx.c                | 12 +++++\n drivers/net/ice/ice_rxtx.h                |  2 +\n drivers/net/igc/igc_ethdev.c              |  4 +-\n drivers/net/igc/igc_txrx.c                | 12 ++---\n drivers/net/igc/igc_txrx.h                |  4 +-\n drivers/net/ionic/ionic_lif.c             |  4 +-\n drivers/net/ionic/ionic_rxtx.c            | 14 +++---\n drivers/net/ionic/ionic_rxtx.h            |  4 +-\n drivers/net/ipn3ke/ipn3ke_representor.c   | 12 -----\n drivers/net/ixgbe/ixgbe_ethdev.h          |  4 +-\n drivers/net/ixgbe/ixgbe_rxtx.c            | 12 ++---\n drivers/net/kni/rte_eth_kni.c             |  7 ---\n drivers/net/liquidio/lio_ethdev.c         | 24 +++++----\n drivers/net/liquidio/lio_ethdev.h         |  4 +-\n drivers/net/liquidio/lio_rxtx.c           |  4 +-\n drivers/net/memif/rte_eth_memif.c         | 23 ++++++---\n drivers/net/mlx4/mlx4.c                   |  4 +-\n drivers/net/mlx4/mlx4_rxq.c               | 27 ++++-------\n drivers/net/mlx4/mlx4_rxtx.h              |  4 +-\n drivers/net/mlx4/mlx4_txq.c               | 27 ++++-------\n drivers/net/mlx5/mlx5_rx.h                |  2 +-\n drivers/net/mlx5/mlx5_rxq.c               | 21 ++++----\n drivers/net/mlx5/mlx5_tx.h                |  2 +-\n drivers/net/mlx5/mlx5_txq.c               | 25 ++++------\n drivers/net/mvneta/mvneta_ethdev.c        |  4 +-\n drivers/net/mvneta/mvneta_rxtx.c          | 22 +++++----\n drivers/net/mvneta/mvneta_rxtx.h          |  4 +-\n drivers/net/mvpp2/mrvl_ethdev.c           | 20 +++++---\n drivers/net/netvsc/hn_rxtx.c              | 10 ++--\n drivers/net/netvsc/hn_var.h               |  4 +-\n drivers/net/netvsc/hn_vf.c                | 14 ++----\n drivers/net/nfb/nfb_ethdev.c              |  4 +-\n drivers/net/nfb/nfb_rx.c                  |  5 +-\n drivers/net/nfb/nfb_rx.h                  |  8 +--\n drivers/net/nfb/nfb_tx.c                  |  5 +-\n drivers/net/nfb/nfb_tx.h                  |  8 +--\n drivers/net/nfp/nfp_net.c                 | 43 ++++++++---------\n drivers/net/ngbe/ngbe_ethdev.h            |  4 +-\n drivers/net/ngbe/ngbe_rxtx.c              | 12 ++---\n drivers/net/null/rte_eth_null.c           | 22 ++++++---\n drivers/net/octeontx/octeontx_ethdev.c    | 18 +++----\n drivers/net/octeontx2/otx2_ethdev.c       | 59 +++++++++--------------\n drivers/net/octeontx_ep/otx_ep_ethdev.c   | 20 +++++---\n drivers/net/pcap/pcap_ethdev.c            |  7 ---\n drivers/net/pfe/pfe_ethdev.c              | 14 ------\n drivers/net/qede/qede_ethdev.c            | 20 ++++++--\n drivers/net/ring/rte_eth_ring.c           |  4 --\n drivers/net/sfc/sfc_ethdev.c              |  8 +--\n drivers/net/szedata2/rte_eth_szedata2.c   | 50 +++++++++----------\n drivers/net/tap/rte_eth_tap.c             |  8 +--\n drivers/net/thunderx/nicvf_ethdev.c       | 28 +++++------\n drivers/net/txgbe/txgbe_ethdev.h          |  4 +-\n drivers/net/txgbe/txgbe_rxtx.c            | 12 ++---\n drivers/net/vhost/rte_eth_vhost.c         | 14 ++++--\n drivers/net/virtio/virtio_ethdev.c        |  8 ---\n drivers/net/vmxnet3/vmxnet3_ethdev.c      | 14 ++----\n drivers/net/vmxnet3/vmxnet3_ethdev.h      |  4 +-\n drivers/net/vmxnet3/vmxnet3_rxtx.c        |  8 +--\n lib/ethdev/ethdev_driver.h                |  3 +-\n lib/ethdev/rte_ethdev.c                   | 50 ++++++++-----------\n 103 files changed, 616 insertions(+), 697 deletions(-)",
    "diff": "diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c\nindex 7036f401ed..7e15b47eb0 100644\n--- a/app/test/virtual_pmd.c\n+++ b/app/test/virtual_pmd.c\n@@ -163,16 +163,6 @@ virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,\n \treturn -1;\n }\n \n-static void\n-virtual_ethdev_rx_queue_release(void *q __rte_unused)\n-{\n-}\n-\n-static void\n-virtual_ethdev_tx_queue_release(void *q __rte_unused)\n-{\n-}\n-\n static int\n virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,\n \t\tint wait_to_complete __rte_unused)\n@@ -243,8 +233,6 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {\n \t.dev_infos_get = virtual_ethdev_info_get,\n \t.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,\n \t.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,\n-\t.rx_queue_release = virtual_ethdev_rx_queue_release,\n-\t.tx_queue_release = virtual_ethdev_tx_queue_release,\n \t.link_update = virtual_ethdev_link_update_success,\n \t.mac_addr_set = virtual_ethdev_mac_address_set,\n \t.stats_get = virtual_ethdev_stats_get,\ndiff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c\nindex b73b211fd2..480d6d3333 100644\n--- a/drivers/net/af_packet/rte_eth_af_packet.c\n+++ b/drivers/net/af_packet/rte_eth_af_packet.c\n@@ -407,11 +407,6 @@ eth_dev_close(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n-static void\n-eth_queue_release(void *q __rte_unused)\n-{\n-}\n-\n static int\n eth_link_update(struct rte_eth_dev *dev __rte_unused,\n                 int wait_to_complete __rte_unused)\n@@ -574,8 +569,6 @@ static const struct eth_dev_ops ops = {\n \t.promiscuous_disable = eth_dev_promiscuous_disable,\n \t.rx_queue_setup = eth_rx_queue_setup,\n \t.tx_queue_setup = eth_tx_queue_setup,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n \t.link_update = eth_link_update,\n \t.stats_get = eth_stats_get,\n \t.stats_reset = eth_stats_reset,\ndiff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex 74ffa45112..bad8cc5759 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -991,11 +991,6 @@ eth_dev_close(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n-static void\n-eth_queue_release(void *q __rte_unused)\n-{\n-}\n-\n static int\n eth_link_update(struct rte_eth_dev *dev __rte_unused,\n \t\tint wait_to_complete __rte_unused)\n@@ -1476,8 +1471,6 @@ static const struct eth_dev_ops ops = {\n \t.promiscuous_disable = eth_dev_promiscuous_disable,\n \t.rx_queue_setup = eth_rx_queue_setup,\n \t.tx_queue_setup = eth_tx_queue_setup,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n \t.link_update = eth_link_update,\n \t.stats_get = eth_stats_get,\n \t.stats_reset = eth_stats_reset,\ndiff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h\nindex f547571b5c..a2d1d4397c 100644\n--- a/drivers/net/atlantic/atl_ethdev.h\n+++ b/drivers/net/atlantic/atl_ethdev.h\n@@ -54,8 +54,8 @@ struct atl_adapter {\n /*\n  * RX/TX function prototypes\n  */\n-void atl_rx_queue_release(void *rxq);\n-void atl_tx_queue_release(void *txq);\n+void atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+void atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n \n int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\tuint16_t nb_rx_desc, unsigned int socket_id,\ndiff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c\nindex 7d367c9306..fca682d8b0 100644\n--- a/drivers/net/atlantic/atl_rxtx.c\n+++ b/drivers/net/atlantic/atl_rxtx.c\n@@ -125,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t * different socket than was previously used.\n \t */\n \tif (dev->data->rx_queues[rx_queue_id] != NULL) {\n-\t\tatl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);\n+\t\tatl_rx_queue_release(dev, rx_queue_id);\n \t\tdev->data->rx_queues[rx_queue_id] = NULL;\n \t}\n \n@@ -247,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t * different socket than was previously used.\n \t */\n \tif (dev->data->tx_queues[tx_queue_id] != NULL) {\n-\t\tatl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);\n+\t\tatl_tx_queue_release(dev, tx_queue_id);\n \t\tdev->data->tx_queues[tx_queue_id] = NULL;\n \t}\n \n@@ -498,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n }\n \n void\n-atl_rx_queue_release(void *rx_queue)\n+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tPMD_INIT_FUNC_TRACE();\n+\tstruct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];\n \n-\tif (rx_queue != NULL) {\n-\t\tstruct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;\n+\tPMD_INIT_FUNC_TRACE();\n \n+\tif (rxq != NULL) {\n \t\tatl_rx_queue_release_mbufs(rxq);\n \t\trte_free(rxq->sw_ring);\n \t\trte_free(rxq);\n@@ -569,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n }\n \n void\n-atl_tx_queue_release(void *tx_queue)\n+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tPMD_INIT_FUNC_TRACE();\n+\tstruct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];\n \n-\tif (tx_queue != NULL) {\n-\t\tstruct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;\n+\tPMD_INIT_FUNC_TRACE();\n \n+\tif (txq != NULL) {\n \t\tatl_tx_queue_release_mbufs(txq);\n \t\trte_free(txq->sw_ring);\n \t\trte_free(txq);\n@@ -590,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tatl_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tatl_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = 0;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tatl_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tatl_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = 0;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c\nindex 623fa5e5ff..bb0842fb24 100644\n--- a/drivers/net/avp/avp_ethdev.c\n+++ b/drivers/net/avp/avp_ethdev.c\n@@ -75,8 +75,8 @@ static uint16_t avp_xmit_pkts(void *tx_queue,\n \t\t\t      struct rte_mbuf **tx_pkts,\n \t\t\t      uint16_t nb_pkts);\n \n-static void avp_dev_rx_queue_release(void *rxq);\n-static void avp_dev_tx_queue_release(void *txq);\n+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n static int avp_dev_stats_get(struct rte_eth_dev *dev,\n \t\t\t      struct rte_eth_stats *stats);\n@@ -1926,18 +1926,11 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n }\n \n static void\n-avp_dev_rx_queue_release(void *rx_queue)\n+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n {\n-\tstruct avp_queue *rxq = (struct avp_queue *)rx_queue;\n-\tstruct avp_dev *avp = rxq->avp;\n-\tstruct rte_eth_dev_data *data = avp->dev_data;\n-\tunsigned int i;\n-\n-\tfor (i = 0; i < avp->num_rx_queues; i++) {\n-\t\tif (data->rx_queues[i] == rxq) {\n-\t\t\trte_free(data->rx_queues[i]);\n-\t\t\tdata->rx_queues[i] = NULL;\n-\t\t}\n+\tif (eth_dev->data->rx_queues[rx_queue_id] != NULL) {\n+\t\trte_free(eth_dev->data->rx_queues[rx_queue_id]);\n+\t\teth_dev->data->rx_queues[rx_queue_id] = NULL;\n \t}\n }\n \n@@ -1957,18 +1950,11 @@ avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)\n }\n \n static void\n-avp_dev_tx_queue_release(void *tx_queue)\n+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n {\n-\tstruct avp_queue *txq = (struct avp_queue *)tx_queue;\n-\tstruct avp_dev *avp = txq->avp;\n-\tstruct rte_eth_dev_data *data = avp->dev_data;\n-\tunsigned int i;\n-\n-\tfor (i = 0; i < avp->num_tx_queues; i++) {\n-\t\tif (data->tx_queues[i] == txq) {\n-\t\t\trte_free(data->tx_queues[i]);\n-\t\t\tdata->tx_queues[i] = NULL;\n-\t\t}\n+\tif (eth_dev->data->tx_queues[tx_queue_id] != NULL) {\n+\t\trte_free(eth_dev->data->tx_queues[tx_queue_id]);\n+\t\teth_dev->data->tx_queues[tx_queue_id] = NULL;\n \t}\n }\n \ndiff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c\nindex 786288a7b0..ca32ad6418 100644\n--- a/drivers/net/axgbe/axgbe_dev.c\n+++ b/drivers/net/axgbe/axgbe_dev.c\n@@ -950,7 +950,7 @@ static int wrapper_rx_desc_init(struct axgbe_port *pdata)\n \t\t\tif (mbuf == NULL) {\n \t\t\t\tPMD_DRV_LOG(ERR, \"RX mbuf alloc failed queue_id = %u, idx = %d\\n\",\n \t\t\t\t\t    (unsigned int)rxq->queue_id, j);\n-\t\t\t\taxgbe_dev_rx_queue_release(rxq);\n+\t\t\t\taxgbe_dev_rx_queue_release(pdata->eth_dev, i);\n \t\t\t\treturn -ENOMEM;\n \t\t\t}\n \t\t\trxq->sw_ring[j] = mbuf;\ndiff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c\nindex 33f709a6bb..c8618d2d6d 100644\n--- a/drivers/net/axgbe/axgbe_rxtx.c\n+++ b/drivers/net/axgbe/axgbe_rxtx.c\n@@ -31,9 +31,9 @@ axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)\n \t}\n }\n \n-void axgbe_dev_rx_queue_release(void *rxq)\n+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\taxgbe_rx_queue_release(rxq);\n+\taxgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);\n }\n \n int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n@@ -517,9 +517,9 @@ static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)\n \t}\n }\n \n-void axgbe_dev_tx_queue_release(void *txq)\n+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\taxgbe_tx_queue_release(txq);\n+\taxgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);\n }\n \n int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\ndiff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h\nindex c2b11bb0e6..2a330339cd 100644\n--- a/drivers/net/axgbe/axgbe_rxtx.h\n+++ b/drivers/net/axgbe/axgbe_rxtx.h\n@@ -153,7 +153,7 @@ struct axgbe_tx_queue {\n  */\n \n \n-void axgbe_dev_tx_queue_release(void *txq);\n+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);\n int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\t\t      uint16_t nb_tx_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_txconf *tx_conf);\n@@ -171,7 +171,7 @@ uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t uint16_t nb_pkts);\n \n \n-void axgbe_dev_rx_queue_release(void *rxq);\n+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);\n int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\t\t      uint16_t nb_rx_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_rxconf *rx_conf,\ndiff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c\nindex 2b17602290..fea7a34e7d 100644\n--- a/drivers/net/bnx2x/bnx2x_rxtx.c\n+++ b/drivers/net/bnx2x/bnx2x_rxtx.c\n@@ -37,9 +37,9 @@ bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)\n }\n \n void\n-bnx2x_dev_rx_queue_release(void *rxq)\n+bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tbnx2x_rx_queue_release(rxq);\n+\tbnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]);\n }\n \n int\n@@ -182,9 +182,9 @@ bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)\n }\n \n void\n-bnx2x_dev_tx_queue_release(void *txq)\n+bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tbnx2x_tx_queue_release(txq);\n+\tbnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]);\n }\n \n static uint16_t\ndiff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h\nindex 3f4692b47d..247a72230b 100644\n--- a/drivers/net/bnx2x/bnx2x_rxtx.h\n+++ b/drivers/net/bnx2x/bnx2x_rxtx.h\n@@ -72,8 +72,8 @@ int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \t\t\t      uint16_t nb_tx_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_txconf *tx_conf);\n \n-void bnx2x_dev_rx_queue_release(void *rxq);\n-void bnx2x_dev_tx_queue_release(void *txq);\n+void bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);\n+void bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);\n void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);\n void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);\n void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c\nindex bdbad53b7d..df05619c3f 100644\n--- a/drivers/net/bnxt/bnxt_reps.c\n+++ b/drivers/net/bnxt/bnxt_reps.c\n@@ -630,7 +630,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \tif (eth_dev->data->rx_queues) {\n \t\trxq = eth_dev->data->rx_queues[queue_idx];\n \t\tif (rxq)\n-\t\t\tbnxt_rx_queue_release_op(rxq);\n+\t\t\tbnxt_rx_queue_release_op(eth_dev, queue_idx);\n \t}\n \n \trxq = rte_zmalloc_socket(\"bnxt_vfr_rx_queue\",\n@@ -641,6 +641,8 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\treturn -ENOMEM;\n \t}\n \n+\teth_dev->data->rx_queues[queue_idx] = rxq;\n+\n \trxq->nb_rx_desc = nb_desc;\n \n \trc = bnxt_init_rep_rx_ring(rxq, socket_id);\n@@ -660,20 +662,19 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \trxq->rx_ring->rx_buf_ring = buf_ring;\n \trxq->queue_id = queue_idx;\n \trxq->port_id = eth_dev->data->port_id;\n-\teth_dev->data->rx_queues[queue_idx] = rxq;\n \n \treturn 0;\n \n out:\n \tif (rxq)\n-\t\tbnxt_rep_rx_queue_release_op(rxq);\n+\t\tbnxt_rep_rx_queue_release_op(eth_dev, queue_idx);\n \n \treturn rc;\n }\n \n-void bnxt_rep_rx_queue_release_op(void *rx_queue)\n+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tstruct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;\n+\tstruct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];\n \n \tif (!rxq)\n \t\treturn;\n@@ -728,8 +729,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \n \tif (eth_dev->data->tx_queues) {\n \t\tvfr_txq = eth_dev->data->tx_queues[queue_idx];\n-\t\tbnxt_rep_tx_queue_release_op(vfr_txq);\n-\t\tvfr_txq = NULL;\n+\t\tif (vfr_txq != NULL)\n+\t\t\tbnxt_rep_tx_queue_release_op(eth_dev, queue_idx);\n \t}\n \n \tvfr_txq = rte_zmalloc_socket(\"bnxt_vfr_tx_queue\",\n@@ -758,15 +759,16 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \treturn 0;\n }\n \n-void bnxt_rep_tx_queue_release_op(void *tx_queue)\n+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tstruct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;\n+\tstruct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];\n \n \tif (!vfr_txq)\n \t\treturn;\n \n \trte_free(vfr_txq->txq);\n \trte_free(vfr_txq);\n+\tdev->data->tx_queues[queue_idx] = NULL;\n }\n \n int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,\ndiff --git a/drivers/net/bnxt/bnxt_reps.h b/drivers/net/bnxt/bnxt_reps.h\nindex 8d6139f2b7..01e57ee5b5 100644\n--- a/drivers/net/bnxt/bnxt_reps.h\n+++ b/drivers/net/bnxt/bnxt_reps.h\n@@ -42,8 +42,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\t\t\t  __rte_unused unsigned int socket_id,\n \t\t\t\t  __rte_unused const struct rte_eth_txconf *\n \t\t\t\t  tx_conf);\n-void bnxt_rep_rx_queue_release_op(void *rx_queue);\n-void bnxt_rep_tx_queue_release_op(void *tx_queue);\n+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);\n+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);\n int  bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev);\n int bnxt_rep_dev_close_op(struct rte_eth_dev *eth_dev);\n int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,\ndiff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c\nindex b05c470766..176636933b 100644\n--- a/drivers/net/bnxt/bnxt_ring.c\n+++ b/drivers/net/bnxt/bnxt_ring.c\n@@ -634,7 +634,7 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)\n \tif (rxq->rx_started) {\n \t\tif (bnxt_init_one_rx_ring(rxq)) {\n \t\t\tPMD_DRV_LOG(ERR, \"bnxt_init_one_rx_ring failed!\\n\");\n-\t\t\tbnxt_rx_queue_release_op(rxq);\n+\t\t\tbnxt_rx_queue_release_op(bp->eth_dev, queue_index);\n \t\t\trc = -ENOMEM;\n \t\t\tgoto err_out;\n \t\t}\ndiff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c\nindex bbcb3b06e7..2eb7a3cb29 100644\n--- a/drivers/net/bnxt/bnxt_rxq.c\n+++ b/drivers/net/bnxt/bnxt_rxq.c\n@@ -240,9 +240,9 @@ void bnxt_free_rx_mbufs(struct bnxt *bp)\n \t}\n }\n \n-void bnxt_rx_queue_release_op(void *rx_queue)\n+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tstruct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;\n+\tstruct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];\n \n \tif (rxq) {\n \t\tif (is_bnxt_in_error(rxq->bp))\n@@ -273,6 +273,7 @@ void bnxt_rx_queue_release_op(void *rx_queue)\n \t\trxq->mz = NULL;\n \n \t\trte_free(rxq);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n }\n \n@@ -307,7 +308,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \tif (eth_dev->data->rx_queues) {\n \t\trxq = eth_dev->data->rx_queues[queue_idx];\n \t\tif (rxq)\n-\t\t\tbnxt_rx_queue_release_op(rxq);\n+\t\t\tbnxt_rx_queue_release_op(eth_dev, queue_idx);\n \t}\n \trxq = rte_zmalloc_socket(\"bnxt_rx_queue\", sizeof(struct bnxt_rx_queue),\n \t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n@@ -328,6 +329,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \n \tPMD_DRV_LOG(DEBUG, \"RX Buf MTU %d\\n\", eth_dev->data->mtu);\n \n+\teth_dev->data->rx_queues[queue_idx] = rxq;\n+\n \trc = bnxt_init_rx_ring_struct(rxq, socket_id);\n \tif (rc) {\n \t\tPMD_DRV_LOG(ERR,\n@@ -343,7 +346,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \telse\n \t\trxq->crc_len = 0;\n \n-\teth_dev->data->rx_queues[queue_idx] = rxq;\n \t/* Allocate RX ring hardware descriptors */\n \trc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,\n \t\t\t      NULL, \"rxr\");\n@@ -369,7 +371,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \n \treturn 0;\n err:\n-\tbnxt_rx_queue_release_op(rxq);\n+\tbnxt_rx_queue_release_op(eth_dev, queue_idx);\n \treturn rc;\n }\n \ndiff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h\nindex 42bd8e7ab7..9bb9352feb 100644\n--- a/drivers/net/bnxt/bnxt_rxq.h\n+++ b/drivers/net/bnxt/bnxt_rxq.h\n@@ -46,7 +46,7 @@ struct bnxt_rx_queue {\n \n void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);\n int bnxt_mq_rx_configure(struct bnxt *bp);\n-void bnxt_rx_queue_release_op(void *rx_queue);\n+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);\n int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\t\t       uint16_t queue_idx,\n \t\t\t       uint16_t nb_desc,\ndiff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c\nindex 830416af3d..d4d2c20362 100644\n--- a/drivers/net/bnxt/bnxt_txq.c\n+++ b/drivers/net/bnxt/bnxt_txq.c\n@@ -53,9 +53,9 @@ void bnxt_free_tx_mbufs(struct bnxt *bp)\n \t}\n }\n \n-void bnxt_tx_queue_release_op(void *tx_queue)\n+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)\n {\n-\tstruct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;\n+\tstruct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx];\n \n \tif (txq) {\n \t\tif (is_bnxt_in_error(txq->bp))\n@@ -83,6 +83,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)\n \n \t\trte_free(txq->free);\n \t\trte_free(txq);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n }\n \n@@ -115,7 +116,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \tif (eth_dev->data->tx_queues) {\n \t\ttxq = eth_dev->data->tx_queues[queue_idx];\n \t\tif (txq) {\n-\t\t\tbnxt_tx_queue_release_op(txq);\n+\t\t\tbnxt_tx_queue_release_op(eth_dev, queue_idx);\n \t\t\ttxq = NULL;\n \t\t}\n \t}\n@@ -126,6 +127,9 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\treturn -ENOMEM;\n \t}\n \n+\ttxq->bp = bp;\n+\teth_dev->data->tx_queues[queue_idx] = txq;\n+\n \ttxq->free = rte_zmalloc_socket(NULL,\n \t\t\t\t       sizeof(struct rte_mbuf *) * nb_desc,\n \t\t\t\t       RTE_CACHE_LINE_SIZE, socket_id);\n@@ -134,7 +138,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\trc = -ENOMEM;\n \t\tgoto err;\n \t}\n-\ttxq->bp = bp;\n \ttxq->nb_tx_desc = nb_desc;\n \ttxq->tx_free_thresh =\n \t\tRTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);\n@@ -164,8 +167,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\tgoto err;\n \t}\n \n-\teth_dev->data->tx_queues[queue_idx] = txq;\n-\n \tif (txq->tx_deferred_start)\n \t\ttxq->tx_started = false;\n \telse\n@@ -173,6 +174,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \n \treturn 0;\n err:\n-\tbnxt_tx_queue_release_op(txq);\n+\tbnxt_tx_queue_release_op(eth_dev, queue_idx);\n \treturn rc;\n }\ndiff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h\nindex e0e142df3e..67fd4cbebb 100644\n--- a/drivers/net/bnxt/bnxt_txq.h\n+++ b/drivers/net/bnxt/bnxt_txq.h\n@@ -37,7 +37,7 @@ struct bnxt_tx_queue {\n \n void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);\n void bnxt_free_tx_mbufs(struct bnxt *bp);\n-void bnxt_tx_queue_release_op(void *tx_queue);\n+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);\n int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,\n \t\t\t       uint16_t queue_idx,\n \t\t\t       uint16_t nb_desc,\ndiff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c\nindex a6755661c4..154db0dba2 100644\n--- a/drivers/net/bonding/rte_eth_bond_pmd.c\n+++ b/drivers/net/bonding/rte_eth_bond_pmd.c\n@@ -2333,8 +2333,10 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n }\n \n static void\n-bond_ethdev_rx_queue_release(void *queue)\n+bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)\n {\n+\tvoid *queue = dev->data->rx_queues[queue_id];\n+\n \tif (queue == NULL)\n \t\treturn;\n \n@@ -2342,8 +2344,10 @@ bond_ethdev_rx_queue_release(void *queue)\n }\n \n static void\n-bond_ethdev_tx_queue_release(void *queue)\n+bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)\n {\n+\tvoid *queue = dev->data->rx_queues[queue_id];\n+\n \tif (queue == NULL)\n \t\treturn;\n \ndiff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c\nindex 0e3652ed51..2037b8a65e 100644\n--- a/drivers/net/cnxk/cnxk_ethdev.c\n+++ b/drivers/net/cnxk/cnxk_ethdev.c\n@@ -190,7 +190,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \t/* Free memory prior to re-allocation if needed. */\n \tif (eth_dev->data->tx_queues[qid] != NULL) {\n \t\tplt_nix_dbg(\"Freeing memory prior to re-allocation %d\", qid);\n-\t\tdev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);\n+\t\tdev_ops->tx_queue_release(eth_dev, qid);\n \t\teth_dev->data->tx_queues[qid] = NULL;\n \t}\n \n@@ -232,20 +232,20 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n }\n \n static void\n-cnxk_nix_tx_queue_release(void *txq)\n+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)\n {\n+\tvoid *txq = eth_dev->data->tx_queues[qid];\n \tstruct cnxk_eth_txq_sp *txq_sp;\n \tstruct cnxk_eth_dev *dev;\n \tstruct roc_nix_sq *sq;\n-\tuint16_t qid;\n \tint rc;\n \n \tif (!txq)\n \t\treturn;\n \n \ttxq_sp = cnxk_eth_txq_to_sp(txq);\n+\n \tdev = txq_sp->dev;\n-\tqid = txq_sp->qid;\n \n \tplt_nix_dbg(\"Releasing txq %u\", qid);\n \n@@ -299,7 +299,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n \t\tconst struct eth_dev_ops *dev_ops = eth_dev->dev_ops;\n \n \t\tplt_nix_dbg(\"Freeing memory prior to re-allocation %d\", qid);\n-\t\tdev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);\n+\t\tdev_ops->rx_queue_release(eth_dev, qid);\n \t\teth_dev->data->rx_queues[qid] = NULL;\n \t}\n \n@@ -379,13 +379,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,\n }\n \n static void\n-cnxk_nix_rx_queue_release(void *rxq)\n+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)\n {\n+\tvoid *rxq = eth_dev->data->rx_queues[qid];\n \tstruct cnxk_eth_rxq_sp *rxq_sp;\n \tstruct cnxk_eth_dev *dev;\n \tstruct roc_nix_rq *rq;\n \tstruct roc_nix_cq *cq;\n-\tuint16_t qid;\n \tint rc;\n \n \tif (!rxq)\n@@ -393,7 +393,6 @@ cnxk_nix_rx_queue_release(void *rxq)\n \n \trxq_sp = cnxk_eth_rxq_to_sp(rxq);\n \tdev = rxq_sp->dev;\n-\tqid = rxq_sp->qid;\n \n \tplt_nix_dbg(\"Releasing rxq %u\", qid);\n \n@@ -558,7 +557,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)\n \t\ttxq_sp = cnxk_eth_txq_to_sp(txq[i]);\n \t\tmemcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));\n \t\ttx_qconf[i].valid = true;\n-\t\tdev_ops->tx_queue_release(txq[i]);\n+\t\tdev_ops->tx_queue_release(eth_dev, i);\n \t\teth_dev->data->tx_queues[i] = NULL;\n \t}\n \n@@ -572,7 +571,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)\n \t\trxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);\n \t\tmemcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));\n \t\trx_qconf[i].valid = true;\n-\t\tdev_ops->rx_queue_release(rxq[i]);\n+\t\tdev_ops->rx_queue_release(eth_dev, i);\n \t\teth_dev->data->rx_queues[i] = NULL;\n \t}\n \n@@ -594,7 +593,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \tstruct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;\n \tstruct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;\n \tint rc, i, nb_rxq, nb_txq;\n-\tvoid **txq, **rxq;\n \n \tnb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);\n \tnb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);\n@@ -629,9 +627,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \t\t\t\t\t     &tx_qconf[i].conf.tx);\n \t\tif (rc) {\n \t\t\tplt_err(\"Failed to setup tx queue rc=%d\", rc);\n-\t\t\ttxq = eth_dev->data->tx_queues;\n \t\t\tfor (i -= 1; i >= 0; i--)\n-\t\t\t\tdev_ops->tx_queue_release(txq[i]);\n+\t\t\t\tdev_ops->tx_queue_release(eth_dev, i);\n \t\t\tgoto fail;\n \t\t}\n \t}\n@@ -647,9 +644,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \t\t\t\t\t     rx_qconf[i].mp);\n \t\tif (rc) {\n \t\t\tplt_err(\"Failed to setup rx queue rc=%d\", rc);\n-\t\t\trxq = eth_dev->data->rx_queues;\n \t\t\tfor (i -= 1; i >= 0; i--)\n-\t\t\t\tdev_ops->rx_queue_release(rxq[i]);\n+\t\t\t\tdev_ops->rx_queue_release(eth_dev, i);\n \t\t\tgoto tx_queue_release;\n \t\t}\n \t}\n@@ -660,9 +656,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \treturn 0;\n \n tx_queue_release:\n-\ttxq = eth_dev->data->tx_queues;\n \tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n-\t\tdev_ops->tx_queue_release(txq[i]);\n+\t\tdev_ops->tx_queue_release(eth_dev, i);\n fail:\n \tif (tx_qconf)\n \t\tfree(tx_qconf);\n@@ -1417,14 +1412,14 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)\n \n \t/* Free up SQs */\n \tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n-\t\tdev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);\n+\t\tdev_ops->tx_queue_release(eth_dev, i);\n \t\teth_dev->data->tx_queues[i] = NULL;\n \t}\n \teth_dev->data->nb_tx_queues = 0;\n \n \t/* Free up RQ's and CQ's */\n \tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n-\t\tdev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);\n+\t\tdev_ops->rx_queue_release(eth_dev, i);\n \t\teth_dev->data->rx_queues[i] = NULL;\n \t}\n \teth_dev->data->nb_rx_queues = 0;\ndiff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c\nindex 177eca3976..33fa80213f 100644\n--- a/drivers/net/cxgbe/cxgbe_ethdev.c\n+++ b/drivers/net/cxgbe/cxgbe_ethdev.c\n@@ -532,7 +532,7 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,\n \n \t/*  Free up the existing queue  */\n \tif (eth_dev->data->tx_queues[queue_idx]) {\n-\t\tcxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);\n+\t\tcxgbe_dev_tx_queue_release(eth_dev, queue_idx);\n \t\teth_dev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n@@ -565,9 +565,9 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,\n \treturn err;\n }\n \n-void cxgbe_dev_tx_queue_release(void *q)\n+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)\n {\n-\tstruct sge_eth_txq *txq = (struct sge_eth_txq *)q;\n+\tstruct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];\n \n \tif (txq) {\n \t\tstruct port_info *pi = (struct port_info *)\n@@ -655,7 +655,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \n \t/*  Free up the existing queue  */\n \tif (eth_dev->data->rx_queues[queue_idx]) {\n-\t\tcxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);\n+\t\tcxgbe_dev_rx_queue_release(eth_dev, queue_idx);\n \t\teth_dev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n@@ -702,9 +702,9 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \treturn err;\n }\n \n-void cxgbe_dev_rx_queue_release(void *q)\n+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)\n {\n-\tstruct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;\n+\tstruct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];\n \n \tif (rxq) {\n \t\tstruct port_info *pi = (struct port_info *)\ndiff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h\nindex 801d6995d1..4a49665905 100644\n--- a/drivers/net/cxgbe/cxgbe_pfvf.h\n+++ b/drivers/net/cxgbe/cxgbe_pfvf.h\n@@ -16,8 +16,8 @@\n \t V_FW_PARAMS_PARAM_Y(0) | \\\n \t V_FW_PARAMS_PARAM_Z(0))\n \n-void cxgbe_dev_rx_queue_release(void *q);\n-void cxgbe_dev_tx_queue_release(void *q);\n+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);\n+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);\n int cxgbe_dev_stop(struct rte_eth_dev *eth_dev);\n int cxgbe_dev_close(struct rte_eth_dev *eth_dev);\n int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c\nindex 27d670f843..97681ff6c4 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.c\n+++ b/drivers/net/dpaa/dpaa_ethdev.c\n@@ -1233,12 +1233,6 @@ dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static\n-void dpaa_eth_rx_queue_release(void *rxq __rte_unused)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-}\n-\n static\n int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t    uint16_t nb_desc __rte_unused,\n@@ -1272,11 +1266,6 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \treturn 0;\n }\n \n-static void dpaa_eth_tx_queue_release(void *txq __rte_unused)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-}\n-\n static uint32_t\n dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n@@ -1571,8 +1560,6 @@ static struct eth_dev_ops dpaa_devops = {\n \n \t.rx_queue_setup\t\t  = dpaa_eth_rx_queue_setup,\n \t.tx_queue_setup\t\t  = dpaa_eth_tx_queue_setup,\n-\t.rx_queue_release\t  = dpaa_eth_rx_queue_release,\n-\t.tx_queue_release\t  = dpaa_eth_tx_queue_release,\n \t.rx_burst_mode_get\t  = dpaa_dev_rx_burst_mode_get,\n \t.tx_burst_mode_get\t  = dpaa_dev_tx_burst_mode_get,\n \t.rxq_info_get\t\t  = dpaa_rxq_info_get,\ndiff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c\nindex c12169578e..5b9381cf40 100644\n--- a/drivers/net/dpaa2/dpaa2_ethdev.c\n+++ b/drivers/net/dpaa2/dpaa2_ethdev.c\n@@ -976,9 +976,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,\n }\n \n static void\n-dpaa2_dev_rx_queue_release(void *q __rte_unused)\n+dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;\n+\tstruct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];\n \tstruct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;\n \tstruct fsl_mc_io *dpni =\n \t\t(struct fsl_mc_io *)priv->eth_dev->process_private;\n@@ -1004,12 +1004,6 @@ dpaa2_dev_rx_queue_release(void *q __rte_unused)\n \t}\n }\n \n-static void\n-dpaa2_dev_tx_queue_release(void *q __rte_unused)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-}\n-\n static uint32_t\n dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n@@ -2427,7 +2421,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {\n \t.rx_queue_setup    = dpaa2_dev_rx_queue_setup,\n \t.rx_queue_release  = dpaa2_dev_rx_queue_release,\n \t.tx_queue_setup    = dpaa2_dev_tx_queue_setup,\n-\t.tx_queue_release  = dpaa2_dev_tx_queue_release,\n \t.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,\n \t.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,\n \t.flow_ctrl_get\t      = dpaa2_flow_ctrl_get,\ndiff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h\nindex 3b4d9c3ee6..8e10e2777e 100644\n--- a/drivers/net/e1000/e1000_ethdev.h\n+++ b/drivers/net/e1000/e1000_ethdev.h\n@@ -386,8 +386,8 @@ extern const struct rte_flow_ops igb_flow_ops;\n /*\n  * RX/TX IGB function prototypes\n  */\n-void eth_igb_tx_queue_release(void *txq);\n-void eth_igb_rx_queue_release(void *rxq);\n+void eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void igb_dev_clear_queues(struct rte_eth_dev *dev);\n void igb_dev_free_queues(struct rte_eth_dev *dev);\n \n@@ -462,8 +462,8 @@ uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);\n /*\n  * RX/TX EM function prototypes\n  */\n-void eth_em_tx_queue_release(void *txq);\n-void eth_em_rx_queue_release(void *rxq);\n+void eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n void em_dev_clear_queues(struct rte_eth_dev *dev);\n void em_dev_free_queues(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex dfd8f2fd00..00a8af6d39 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -1121,9 +1121,9 @@ em_tx_queue_release(struct em_tx_queue *txq)\n }\n \n void\n-eth_em_tx_queue_release(void *txq)\n+eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tem_tx_queue_release(txq);\n+\tem_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n /* (Re)set dynamic em_tx_queue fields to defaults */\n@@ -1343,9 +1343,9 @@ em_rx_queue_release(struct em_rx_queue *rxq)\n }\n \n void\n-eth_em_rx_queue_release(void *rxq)\n+eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tem_rx_queue_release(rxq);\n+\tem_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n /* Reset dynamic em_rx_queue fields back to defaults */\n@@ -1609,14 +1609,14 @@ em_dev_free_queues(struct rte_eth_dev *dev)\n \tuint16_t i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\teth_em_rx_queue_release(dev->data->rx_queues[i]);\n+\t\teth_em_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"rx_ring\", i);\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\teth_em_tx_queue_release(dev->data->tx_queues[i]);\n+\t\teth_em_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"tx_ring\", i);\n \t}\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex 278d5d2712..d97ca1a011 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -1281,9 +1281,9 @@ igb_tx_queue_release(struct igb_tx_queue *txq)\n }\n \n void\n-eth_igb_tx_queue_release(void *txq)\n+eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tigb_tx_queue_release(txq);\n+\tigb_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n static int\n@@ -1606,9 +1606,9 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)\n }\n \n void\n-eth_igb_rx_queue_release(void *rxq)\n+eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tigb_rx_queue_release(rxq);\n+\tigb_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n static void\n@@ -1883,14 +1883,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev)\n \tuint16_t i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\teth_igb_rx_queue_release(dev->data->rx_queues[i]);\n+\t\teth_igb_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"rx_ring\", i);\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\teth_igb_tx_queue_release(dev->data->tx_queues[i]);\n+\t\teth_igb_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"tx_ring\", i);\n \t}\ndiff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c\nindex 4cebf60a68..a82d4b6287 100644\n--- a/drivers/net/ena/ena_ethdev.c\n+++ b/drivers/net/ena/ena_ethdev.c\n@@ -192,8 +192,8 @@ static int ena_dev_reset(struct rte_eth_dev *dev);\n static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);\n static void ena_rx_queue_release_all(struct rte_eth_dev *dev);\n static void ena_tx_queue_release_all(struct rte_eth_dev *dev);\n-static void ena_rx_queue_release(void *queue);\n-static void ena_tx_queue_release(void *queue);\n+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n static void ena_rx_queue_release_bufs(struct ena_ring *ring);\n static void ena_tx_queue_release_bufs(struct ena_ring *ring);\n static int ena_link_update(struct rte_eth_dev *dev,\n@@ -525,27 +525,25 @@ ena_dev_reset(struct rte_eth_dev *dev)\n \n static void ena_rx_queue_release_all(struct rte_eth_dev *dev)\n {\n-\tstruct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;\n \tint nb_queues = dev->data->nb_rx_queues;\n \tint i;\n \n \tfor (i = 0; i < nb_queues; i++)\n-\t\tena_rx_queue_release(queues[i]);\n+\t\tena_rx_queue_release(dev, i);\n }\n \n static void ena_tx_queue_release_all(struct rte_eth_dev *dev)\n {\n-\tstruct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;\n \tint nb_queues = dev->data->nb_tx_queues;\n \tint i;\n \n \tfor (i = 0; i < nb_queues; i++)\n-\t\tena_tx_queue_release(queues[i]);\n+\t\tena_tx_queue_release(dev, i);\n }\n \n-static void ena_rx_queue_release(void *queue)\n+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ena_ring *ring = (struct ena_ring *)queue;\n+\tstruct ena_ring *ring = dev->data->rx_queues[qid];\n \n \t/* Free ring resources */\n \tif (ring->rx_buffer_info)\n@@ -566,9 +564,9 @@ static void ena_rx_queue_release(void *queue)\n \t\tring->port_id, ring->id);\n }\n \n-static void ena_tx_queue_release(void *queue)\n+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ena_ring *ring = (struct ena_ring *)queue;\n+\tstruct ena_ring *ring = dev->data->tx_queues[qid];\n \n \t/* Free ring resources */\n \tif (ring->push_buf_intermediate_buf)\ndiff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c\nindex b496cd4700..246aff4672 100644\n--- a/drivers/net/enetc/enetc_ethdev.c\n+++ b/drivers/net/enetc/enetc_ethdev.c\n@@ -325,8 +325,10 @@ enetc_tx_queue_setup(struct rte_eth_dev *dev,\n }\n \n static void\n-enetc_tx_queue_release(void *txq)\n+enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *txq = dev->data->tx_queues[qid];\n+\n \tif (txq == NULL)\n \t\treturn;\n \n@@ -473,8 +475,10 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,\n }\n \n static void\n-enetc_rx_queue_release(void *rxq)\n+enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *rxq = dev->data->rx_queues[qid];\n+\n \tif (rxq == NULL)\n \t\treturn;\n \n@@ -561,13 +565,13 @@ enetc_dev_close(struct rte_eth_dev *dev)\n \tret = enetc_dev_stop(dev);\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tenetc_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tenetc_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tenetc_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tenetc_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c\nindex 8d5797523b..b03e56bc25 100644\n--- a/drivers/net/enic/enic_ethdev.c\n+++ b/drivers/net/enic/enic_ethdev.c\n@@ -88,8 +88,10 @@ enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static void enicpmd_dev_tx_queue_release(void *txq)\n+static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *txq = dev->data->tx_queues[qid];\n+\n \tENICPMD_FUNC_TRACE();\n \n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n@@ -223,8 +225,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,\n \treturn ret;\n }\n \n-static void enicpmd_dev_rx_queue_release(void *rxq)\n+static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *rxq = dev->data->rx_queues[qid];\n+\n \tENICPMD_FUNC_TRACE();\n \n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\ndiff --git a/drivers/net/enic/enic_vf_representor.c b/drivers/net/enic/enic_vf_representor.c\nindex 79dd6e5640..cfd02c03cc 100644\n--- a/drivers/net/enic/enic_vf_representor.c\n+++ b/drivers/net/enic/enic_vf_representor.c\n@@ -70,8 +70,10 @@ static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,\n \treturn 0;\n }\n \n-static void enic_vf_dev_tx_queue_release(void *txq)\n+static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *txq = dev->data->tx_queues[qid];\n+\n \tENICPMD_FUNC_TRACE();\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn;\n@@ -108,8 +110,10 @@ static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \treturn 0;\n }\n \n-static void enic_vf_dev_rx_queue_release(void *rxq)\n+static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n+\tvoid *rxq = dev->data->rx_queues[qid];\n+\n \tENICPMD_FUNC_TRACE();\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn;\ndiff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c\nindex 5ff33e03e0..d0030af061 100644\n--- a/drivers/net/failsafe/failsafe_ops.c\n+++ b/drivers/net/failsafe/failsafe_ops.c\n@@ -358,26 +358,21 @@ fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n }\n \n static void\n-fs_rx_queue_release(void *queue)\n+fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct rte_eth_dev *dev;\n \tstruct sub_device *sdev;\n \tuint8_t i;\n-\tstruct rxq *rxq;\n+\tstruct rxq *rxq = dev->data->rx_queues[qid];\n \n-\tif (queue == NULL)\n+\tif (rxq == NULL)\n \t\treturn;\n-\trxq = queue;\n-\tdev = &rte_eth_devices[rxq->priv->data->port_id];\n \tfs_lock(dev, 0);\n \tif (rxq->event_fd >= 0)\n \t\tclose(rxq->event_fd);\n \tFOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {\n \t\tif (ETH(sdev)->data->rx_queues != NULL &&\n-\t\t    ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {\n-\t\t\tSUBOPS(sdev, rx_queue_release)\n-\t\t\t\t(ETH(sdev)->data->rx_queues[rxq->qid]);\n-\t\t}\n+\t\t    ETH(sdev)->data->rx_queues[rxq->qid] != NULL)\n+\t\t\tSUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid);\n \t}\n \tdev->data->rx_queues[rxq->qid] = NULL;\n \trte_free(rxq);\n@@ -420,7 +415,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \trxq = dev->data->rx_queues[rx_queue_id];\n \tif (rxq != NULL) {\n-\t\tfs_rx_queue_release(rxq);\n+\t\tfs_rx_queue_release(dev, rx_queue_id);\n \t\tdev->data->rx_queues[rx_queue_id] = NULL;\n \t}\n \trxq = rte_zmalloc(NULL,\n@@ -460,7 +455,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,\n \tfs_unlock(dev, 0);\n \treturn 0;\n free_rxq:\n-\tfs_rx_queue_release(rxq);\n+\tfs_rx_queue_release(dev, rx_queue_id);\n \tfs_unlock(dev, 0);\n \treturn ret;\n }\n@@ -542,24 +537,19 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)\n }\n \n static void\n-fs_tx_queue_release(void *queue)\n+fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct rte_eth_dev *dev;\n \tstruct sub_device *sdev;\n \tuint8_t i;\n-\tstruct txq *txq;\n+\tstruct txq *txq = dev->data->tx_queues[qid];\n \n-\tif (queue == NULL)\n+\tif (txq == NULL)\n \t\treturn;\n-\ttxq = queue;\n-\tdev = &rte_eth_devices[txq->priv->data->port_id];\n \tfs_lock(dev, 0);\n \tFOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {\n \t\tif (ETH(sdev)->data->tx_queues != NULL &&\n-\t\t    ETH(sdev)->data->tx_queues[txq->qid] != NULL) {\n-\t\t\tSUBOPS(sdev, tx_queue_release)\n-\t\t\t\t(ETH(sdev)->data->tx_queues[txq->qid]);\n-\t\t}\n+\t\t    ETH(sdev)->data->tx_queues[txq->qid] != NULL)\n+\t\t\tSUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid);\n \t}\n \tdev->data->tx_queues[txq->qid] = NULL;\n \trte_free(txq);\n@@ -591,7 +581,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \ttxq = dev->data->tx_queues[tx_queue_id];\n \tif (txq != NULL) {\n-\t\tfs_tx_queue_release(txq);\n+\t\tfs_tx_queue_release(dev, tx_queue_id);\n \t\tdev->data->tx_queues[tx_queue_id] = NULL;\n \t}\n \ttxq = rte_zmalloc(\"ethdev TX queue\",\n@@ -623,7 +613,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,\n \tfs_unlock(dev, 0);\n \treturn 0;\n free_txq:\n-\tfs_tx_queue_release(txq);\n+\tfs_tx_queue_release(dev, tx_queue_id);\n \tfs_unlock(dev, 0);\n \treturn ret;\n }\n@@ -634,12 +624,12 @@ fs_dev_free_queues(struct rte_eth_dev *dev)\n \tuint16_t i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tfs_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tfs_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tfs_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tfs_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c\nindex 3236290e40..7075d69022 100644\n--- a/drivers/net/fm10k/fm10k_ethdev.c\n+++ b/drivers/net/fm10k/fm10k_ethdev.c\n@@ -51,8 +51,8 @@ static int\n fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);\n static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,\n \tconst u8 *mac, bool add, uint32_t pool);\n-static void fm10k_tx_queue_release(void *queue);\n-static void fm10k_rx_queue_release(void *queue);\n+static void fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+static void fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n static void fm10k_set_rx_function(struct rte_eth_dev *dev);\n static void fm10k_set_tx_function(struct rte_eth_dev *dev);\n static int fm10k_check_ftag(struct rte_devargs *devargs);\n@@ -1210,7 +1210,7 @@ fm10k_dev_queue_release(struct rte_eth_dev *dev)\n \n \tif (dev->data->rx_queues) {\n \t\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n-\t\t\tfm10k_rx_queue_release(dev->data->rx_queues[i]);\n+\t\t\tfm10k_rx_queue_release(dev, i);\n \t}\n }\n \n@@ -1891,11 +1891,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,\n }\n \n static void\n-fm10k_rx_queue_release(void *queue)\n+fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n \tPMD_INIT_FUNC_TRACE();\n \n-\trx_queue_free(queue);\n+\trx_queue_free(dev->data->rx_queues[qid]);\n }\n \n static inline int\n@@ -2080,9 +2080,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,\n }\n \n static void\n-fm10k_tx_queue_release(void *queue)\n+fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct fm10k_tx_queue *q = queue;\n+\tstruct fm10k_tx_queue *q = dev->data->tx_queues[qid];\n \tPMD_INIT_FUNC_TRACE();\n \n \ttx_queue_free(q);\ndiff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c\nindex 1a72401546..f8686f34c6 100644\n--- a/drivers/net/hinic/hinic_pmd_ethdev.c\n+++ b/drivers/net/hinic/hinic_pmd_ethdev.c\n@@ -1075,12 +1075,14 @@ static int hinic_dev_start(struct rte_eth_dev *dev)\n /**\n  * DPDK callback to release the receive queue.\n  *\n- * @param queue\n- *   Generic receive queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   RX queue index.\n  */\n-static void hinic_rx_queue_release(void *queue)\n+static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hinic_rxq *rxq = queue;\n+\tstruct hinic_rxq *rxq = dev->data->rx_queues[qid];\n \tstruct hinic_nic_dev *nic_dev;\n \n \tif (!rxq) {\n@@ -1107,12 +1109,14 @@ static void hinic_rx_queue_release(void *queue)\n /**\n  * DPDK callback to release the transmit queue.\n  *\n- * @param queue\n- *   Generic transmit queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   TX queue index.\n  */\n-static void hinic_tx_queue_release(void *queue)\n+static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hinic_txq *txq = queue;\n+\tstruct hinic_txq *txq = dev->data->tx_queues[qid];\n \tstruct hinic_nic_dev *nic_dev;\n \n \tif (!txq) {\ndiff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c\nindex d3fbe082e6..e917406ed6 100644\n--- a/drivers/net/hns3/hns3_rxtx.c\n+++ b/drivers/net/hns3/hns3_rxtx.c\n@@ -108,9 +108,9 @@ hns3_tx_queue_release(void *queue)\n }\n \n void\n-hns3_dev_rx_queue_release(void *queue)\n+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hns3_rx_queue *rxq = queue;\n+\tstruct hns3_rx_queue *rxq = dev->data->rx_queues[qid];\n \tstruct hns3_adapter *hns;\n \n \tif (rxq == NULL)\n@@ -118,14 +118,14 @@ hns3_dev_rx_queue_release(void *queue)\n \n \thns = rxq->hns;\n \trte_spinlock_lock(&hns->hw.lock);\n-\thns3_rx_queue_release(queue);\n+\thns3_rx_queue_release(rxq);\n \trte_spinlock_unlock(&hns->hw.lock);\n }\n \n void\n-hns3_dev_tx_queue_release(void *queue)\n+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hns3_tx_queue *txq = queue;\n+\tstruct hns3_tx_queue *txq = dev->data->tx_queues[qid];\n \tstruct hns3_adapter *hns;\n \n \tif (txq == NULL)\n@@ -133,7 +133,7 @@ hns3_dev_tx_queue_release(void *queue)\n \n \thns = txq->hns;\n \trte_spinlock_lock(&hns->hw.lock);\n-\thns3_tx_queue_release(queue);\n+\thns3_tx_queue_release(txq);\n \trte_spinlock_unlock(&hns->hw.lock);\n }\n \n@@ -1535,7 +1535,8 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n \t\t/* re-configure */\n \t\trxq = hw->fkq_data.rx_queues;\n \t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\thns3_dev_rx_queue_release(rxq[i]);\n+\t\t\thns3_dev_rx_queue_release\n+\t\t\t\t(&rte_eth_devices[hw->data->port_id], i);\n \n \t\trxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,\n \t\t\t\t  RTE_CACHE_LINE_SIZE);\n@@ -1548,9 +1549,9 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n \n \t\thw->fkq_data.rx_queues = rxq;\n \t} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {\n-\t\trxq = hw->fkq_data.rx_queues;\n \t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\thns3_dev_rx_queue_release(rxq[i]);\n+\t\t\thns3_dev_rx_queue_release\n+\t\t\t\t(&rte_eth_devices[hw->data->port_id], i);\n \n \t\trte_free(hw->fkq_data.rx_queues);\n \t\thw->fkq_data.rx_queues = NULL;\n@@ -1582,7 +1583,8 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n \t\t/* re-configure */\n \t\ttxq = hw->fkq_data.tx_queues;\n \t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\thns3_dev_tx_queue_release(txq[i]);\n+\t\t\thns3_dev_tx_queue_release\n+\t\t\t\t(&rte_eth_devices[hw->data->port_id], i);\n \t\ttxq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,\n \t\t\t\t  RTE_CACHE_LINE_SIZE);\n \t\tif (txq == NULL)\n@@ -1596,7 +1598,8 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)\n \t} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {\n \t\ttxq = hw->fkq_data.tx_queues;\n \t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\thns3_dev_tx_queue_release(txq[i]);\n+\t\t\thns3_dev_tx_queue_release\n+\t\t\t\t(&rte_eth_devices[hw->data->port_id], i);\n \n \t\trte_free(hw->fkq_data.tx_queues);\n \t\thw->fkq_data.tx_queues = NULL;\ndiff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h\nindex 56c1b80049..422ba15212 100644\n--- a/drivers/net/hns3/hns3_rxtx.h\n+++ b/drivers/net/hns3/hns3_rxtx.h\n@@ -677,8 +677,8 @@ hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)\n \t\trte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);\n }\n \n-void hns3_dev_rx_queue_release(void *queue);\n-void hns3_dev_tx_queue_release(void *queue);\n+void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void hns3_free_all_queues(struct rte_eth_dev *dev);\n int hns3_reset_all_tqps(struct hns3_adapter *hns);\n void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);\ndiff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c\nindex af075fda2a..105a6a657f 100644\n--- a/drivers/net/i40e/i40e_fdir.c\n+++ b/drivers/net/i40e/i40e_fdir.c\n@@ -264,10 +264,10 @@ i40e_fdir_setup(struct i40e_pf *pf)\n \treturn I40E_SUCCESS;\n \n fail_mem:\n-\ti40e_dev_rx_queue_release(pf->fdir.rxq);\n+\ti40e_rx_queue_release(pf->fdir.rxq);\n \tpf->fdir.rxq = NULL;\n fail_setup_rx:\n-\ti40e_dev_tx_queue_release(pf->fdir.txq);\n+\ti40e_tx_queue_release(pf->fdir.txq);\n \tpf->fdir.txq = NULL;\n fail_setup_tx:\n \ti40e_vsi_release(vsi);\n@@ -302,10 +302,10 @@ i40e_fdir_teardown(struct i40e_pf *pf)\n \t\tPMD_DRV_LOG(DEBUG, \"Failed to do FDIR RX switch off\");\n \n \trte_eth_dma_zone_free(dev, \"fdir_rx_ring\", pf->fdir.rxq->queue_id);\n-\ti40e_dev_rx_queue_release(pf->fdir.rxq);\n+\ti40e_rx_queue_release(pf->fdir.rxq);\n \tpf->fdir.rxq = NULL;\n \trte_eth_dma_zone_free(dev, \"fdir_tx_ring\", pf->fdir.txq->queue_id);\n-\ti40e_dev_tx_queue_release(pf->fdir.txq);\n+\ti40e_tx_queue_release(pf->fdir.txq);\n \tpf->fdir.txq = NULL;\n \ti40e_vsi_release(vsi);\n \tpf->fdir.fdir_vsi = NULL;\ndiff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c\nindex 026cda948c..0a58460132 100644\n--- a/drivers/net/i40e/i40e_rxtx.c\n+++ b/drivers/net/i40e/i40e_rxtx.c\n@@ -1985,7 +1985,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx]) {\n-\t\ti40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\ti40e_rx_queue_release(dev->data->rx_queues[queue_idx]);\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n@@ -2029,7 +2029,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx,\n \t\t\t      ring_size, I40E_RING_BASE_ALIGN, socket_id);\n \tif (!rz) {\n-\t\ti40e_dev_rx_queue_release(rxq);\n+\t\ti40e_rx_queue_release(rxq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n \t\treturn -ENOMEM;\n \t}\n@@ -2049,7 +2049,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t\t   RTE_CACHE_LINE_SIZE,\n \t\t\t\t   socket_id);\n \tif (!rxq->sw_ring) {\n-\t\ti40e_dev_rx_queue_release(rxq);\n+\t\ti40e_rx_queue_release(rxq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for SW ring\");\n \t\treturn -ENOMEM;\n \t}\n@@ -2072,7 +2072,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \n \tif (dev->data->dev_started) {\n \t\tif (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {\n-\t\t\ti40e_dev_rx_queue_release(rxq);\n+\t\t\ti40e_rx_queue_release(rxq);\n \t\t\treturn -EINVAL;\n \t\t}\n \t} else {\n@@ -2102,7 +2102,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n }\n \n void\n-i40e_dev_rx_queue_release(void *rxq)\n+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\ti40e_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+void\n+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\ti40e_tx_queue_release(dev->data->tx_queues[qid]);\n+}\n+\n+void\n+i40e_rx_queue_release(void *rxq)\n {\n \tstruct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;\n \n@@ -2407,7 +2419,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/* Free memory if needed. */\n \tif (dev->data->tx_queues[queue_idx]) {\n-\t\ti40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\ti40e_tx_queue_release(dev->data->tx_queues[queue_idx]);\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n@@ -2428,7 +2440,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \ttz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n \t\t\t      ring_size, I40E_RING_BASE_ALIGN, socket_id);\n \tif (!tz) {\n-\t\ti40e_dev_tx_queue_release(txq);\n+\t\ti40e_tx_queue_release(txq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n \t\treturn -ENOMEM;\n \t}\n@@ -2456,7 +2468,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t\t   RTE_CACHE_LINE_SIZE,\n \t\t\t\t   socket_id);\n \tif (!txq->sw_ring) {\n-\t\ti40e_dev_tx_queue_release(txq);\n+\t\ti40e_tx_queue_release(txq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n \t\treturn -ENOMEM;\n \t}\n@@ -2479,7 +2491,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \n \tif (dev->data->dev_started) {\n \t\tif (i40e_dev_tx_queue_setup_runtime(dev, txq)) {\n-\t\t\ti40e_dev_tx_queue_release(txq);\n+\t\t\ti40e_tx_queue_release(txq);\n \t\t\treturn -EINVAL;\n \t\t}\n \t} else {\n@@ -2495,7 +2507,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n }\n \n void\n-i40e_dev_tx_queue_release(void *txq)\n+i40e_tx_queue_release(void *txq)\n {\n \tstruct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;\n \n@@ -3056,7 +3068,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)\n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\tif (!dev->data->rx_queues[i])\n \t\t\tcontinue;\n-\t\ti40e_dev_rx_queue_release(dev->data->rx_queues[i]);\n+\t\ti40e_rx_queue_release(dev->data->rx_queues[i]);\n \t\tdev->data->rx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"rx_ring\", i);\n \t}\n@@ -3064,7 +3076,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\tif (!dev->data->tx_queues[i])\n \t\t\tcontinue;\n-\t\ti40e_dev_tx_queue_release(dev->data->tx_queues[i]);\n+\t\ti40e_tx_queue_release(dev->data->tx_queues[i]);\n \t\tdev->data->tx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"tx_ring\", i);\n \t}\n@@ -3104,7 +3116,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)\n \t\t\t\t      I40E_FDIR_QUEUE_ID, ring_size,\n \t\t\t\t      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);\n \tif (!tz) {\n-\t\ti40e_dev_tx_queue_release(txq);\n+\t\ti40e_tx_queue_release(txq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX.\");\n \t\treturn I40E_ERR_NO_MEMORY;\n \t}\n@@ -3162,7 +3174,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)\n \t\t\t\t      I40E_FDIR_QUEUE_ID, ring_size,\n \t\t\t\t      I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);\n \tif (!rz) {\n-\t\ti40e_dev_rx_queue_release(rxq);\n+\t\ti40e_rx_queue_release(rxq);\n \t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX.\");\n \t\treturn I40E_ERR_NO_MEMORY;\n \t}\ndiff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h\nindex 5ccf5773e8..8d6ab16b4f 100644\n--- a/drivers/net/i40e/i40e_rxtx.h\n+++ b/drivers/net/i40e/i40e_rxtx.h\n@@ -197,8 +197,10 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t    uint16_t nb_desc,\n \t\t\t    unsigned int socket_id,\n \t\t\t    const struct rte_eth_txconf *tx_conf);\n-void i40e_dev_rx_queue_release(void *rxq);\n-void i40e_dev_tx_queue_release(void *txq);\n+void i40e_rx_queue_release(void *rxq);\n+void i40e_tx_queue_release(void *txq);\n+void i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n uint16_t i40e_recv_pkts(void *rx_queue,\n \t\t\tstruct rte_mbuf **rx_pkts,\n \t\t\tuint16_t nb_pkts);\ndiff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c\nindex e33fe4576b..eaa2217abd 100644\n--- a/drivers/net/iavf/iavf_rxtx.c\n+++ b/drivers/net/iavf/iavf_rxtx.c\n@@ -554,7 +554,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \t/* Free memory if needed */\n \tif (dev->data->rx_queues[queue_idx]) {\n-\t\tiavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tiavf_dev_rx_queue_release(dev, queue_idx);\n \t\tdev->data->rx_queues[queue_idx] = NULL;\n \t}\n \n@@ -713,7 +713,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/* Free memory if needed. */\n \tif (dev->data->tx_queues[queue_idx]) {\n-\t\tiavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tiavf_dev_tx_queue_release(dev, queue_idx);\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n@@ -952,9 +952,9 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n }\n \n void\n-iavf_dev_rx_queue_release(void *rxq)\n+iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;\n+\tstruct iavf_rx_queue *q = dev->data->rx_queues[qid];\n \n \tif (!q)\n \t\treturn;\n@@ -966,9 +966,9 @@ iavf_dev_rx_queue_release(void *rxq)\n }\n \n void\n-iavf_dev_tx_queue_release(void *txq)\n+iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;\n+\tstruct iavf_tx_queue *q = dev->data->tx_queues[qid];\n \n \tif (!q)\n \t\treturn;\ndiff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h\nindex e210b913d6..c7a868cf1d 100644\n--- a/drivers/net/iavf/iavf_rxtx.h\n+++ b/drivers/net/iavf/iavf_rxtx.h\n@@ -420,7 +420,7 @@ int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \n int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n-void iavf_dev_rx_queue_release(void *rxq);\n+void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t   uint16_t queue_idx,\n@@ -430,7 +430,7 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,\n int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);\n-void iavf_dev_tx_queue_release(void *txq);\n+void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void iavf_stop_queues(struct rte_eth_dev *dev);\n uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t       uint16_t nb_pkts);\ndiff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c\nindex cab7c4da87..27d4f25699 100644\n--- a/drivers/net/ice/ice_dcf_ethdev.c\n+++ b/drivers/net/ice/ice_dcf_ethdev.c\n@@ -1014,8 +1014,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {\n \t.dev_infos_get           = ice_dcf_dev_info_get,\n \t.rx_queue_setup          = ice_rx_queue_setup,\n \t.tx_queue_setup          = ice_tx_queue_setup,\n-\t.rx_queue_release        = ice_rx_queue_release,\n-\t.tx_queue_release        = ice_tx_queue_release,\n+\t.rx_queue_release        = ice_dev_rx_queue_release,\n+\t.tx_queue_release        = ice_dev_tx_queue_release,\n \t.rx_queue_start          = ice_dcf_rx_queue_start,\n \t.tx_queue_start          = ice_dcf_tx_queue_start,\n \t.rx_queue_stop           = ice_dcf_rx_queue_stop,\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex a4cd39c954..c483f09c13 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -184,9 +184,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {\n \t.tx_queue_start               = ice_tx_queue_start,\n \t.tx_queue_stop                = ice_tx_queue_stop,\n \t.rx_queue_setup               = ice_rx_queue_setup,\n-\t.rx_queue_release             = ice_rx_queue_release,\n+\t.rx_queue_release             = ice_dev_rx_queue_release,\n \t.tx_queue_setup               = ice_tx_queue_setup,\n-\t.tx_queue_release             = ice_tx_queue_release,\n+\t.tx_queue_release             = ice_dev_tx_queue_release,\n \t.dev_infos_get                = ice_dev_info_get,\n \t.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,\n \t.link_update                  = ice_link_update,\ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex 5d7ab4f047..7f86cebe93 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -1374,6 +1374,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+void\n+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tice_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+void\n+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tice_tx_queue_release(dev->data->tx_queues[qid]);\n+}\n+\n void\n ice_tx_queue_release(void *txq)\n {\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex b10db0874d..7f9838b58a 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -209,6 +209,8 @@ int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void ice_rx_queue_release(void *rxq);\n void ice_tx_queue_release(void *txq);\n+void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void ice_free_queues(struct rte_eth_dev *dev);\n int ice_fdir_setup_tx_resources(struct ice_pf *pf);\n int ice_fdir_setup_rx_resources(struct ice_pf *pf);\ndiff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c\nindex 224a095483..e634306249 100644\n--- a/drivers/net/igc/igc_ethdev.c\n+++ b/drivers/net/igc/igc_ethdev.c\n@@ -1153,13 +1153,13 @@ igc_dev_free_queues(struct rte_eth_dev *dev)\n \tuint16_t i;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\teth_igc_rx_queue_release(dev->data->rx_queues[i]);\n+\t\teth_igc_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\teth_igc_tx_queue_release(dev->data->tx_queues[i]);\n+\t\teth_igc_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c\nindex b5489eedd2..7dee1bb0fa 100644\n--- a/drivers/net/igc/igc_txrx.c\n+++ b/drivers/net/igc/igc_txrx.c\n@@ -716,10 +716,10 @@ igc_rx_queue_release(struct igc_rx_queue *rxq)\n \trte_free(rxq);\n }\n \n-void eth_igc_rx_queue_release(void *rxq)\n+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tif (rxq)\n-\t\tigc_rx_queue_release(rxq);\n+\tif (dev->data->rx_queues[qid])\n+\t\tigc_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,\n@@ -1899,10 +1899,10 @@ igc_tx_queue_release(struct igc_tx_queue *txq)\n \trte_free(txq);\n }\n \n-void eth_igc_tx_queue_release(void *txq)\n+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tif (txq)\n-\t\tigc_tx_queue_release(txq);\n+\tif (dev->data->tx_queues[qid])\n+\t\tigc_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n static void\ndiff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h\nindex f2b2d75bbc..57bb87b3e4 100644\n--- a/drivers/net/igc/igc_txrx.h\n+++ b/drivers/net/igc/igc_txrx.h\n@@ -14,8 +14,8 @@ extern \"C\" {\n /*\n  * RX/TX function prototypes\n  */\n-void eth_igc_tx_queue_release(void *txq);\n-void eth_igc_rx_queue_release(void *rxq);\n+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void igc_dev_clear_queues(struct rte_eth_dev *dev);\n int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\tuint16_t nb_rx_desc, unsigned int socket_id,\ndiff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c\nindex 431eda777b..a1f9ce2d81 100644\n--- a/drivers/net/ionic/ionic_lif.c\n+++ b/drivers/net/ionic/ionic_lif.c\n@@ -1056,11 +1056,11 @@ ionic_lif_free_queues(struct ionic_lif *lif)\n \tuint32_t i;\n \n \tfor (i = 0; i < lif->ntxqcqs; i++) {\n-\t\tionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]);\n+\t\tionic_dev_tx_queue_release(lif->eth_dev, i);\n \t\tlif->eth_dev->data->tx_queues[i] = NULL;\n \t}\n \tfor (i = 0; i < lif->nrxqcqs; i++) {\n-\t\tionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]);\n+\t\tionic_dev_rx_queue_release(lif->eth_dev, i);\n \t\tlif->eth_dev->data->rx_queues[i] = NULL;\n \t}\n }\ndiff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c\nindex b83ea1bcaa..67631a5813 100644\n--- a/drivers/net/ionic/ionic_rxtx.c\n+++ b/drivers/net/ionic/ionic_rxtx.c\n@@ -118,9 +118,9 @@ ionic_tx_flush(struct ionic_tx_qcq *txq)\n }\n \n void __rte_cold\n-ionic_dev_tx_queue_release(void *tx_queue)\n+ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ionic_tx_qcq *txq = tx_queue;\n+\tstruct ionic_tx_qcq *txq = dev->data->tx_queues[qid];\n \tstruct ionic_tx_stats *stats = &txq->stats;\n \n \tIONIC_PRINT_CALL();\n@@ -185,8 +185,7 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,\n \n \t/* Free memory prior to re-allocation if needed... */\n \tif (eth_dev->data->tx_queues[tx_queue_id] != NULL) {\n-\t\tvoid *tx_queue = eth_dev->data->tx_queues[tx_queue_id];\n-\t\tionic_dev_tx_queue_release(tx_queue);\n+\t\tionic_dev_tx_queue_release(eth_dev, tx_queue_id);\n \t\teth_dev->data->tx_queues[tx_queue_id] = NULL;\n \t}\n \n@@ -664,9 +663,9 @@ ionic_rx_empty(struct ionic_rx_qcq *rxq)\n }\n \n void __rte_cold\n-ionic_dev_rx_queue_release(void *rx_queue)\n+ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ionic_rx_qcq *rxq = rx_queue;\n+\tstruct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];\n \tstruct ionic_rx_stats *stats;\n \n \tif (!rxq)\n@@ -726,8 +725,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \n \t/* Free memory prior to re-allocation if needed... */\n \tif (eth_dev->data->rx_queues[rx_queue_id] != NULL) {\n-\t\tvoid *rx_queue = eth_dev->data->rx_queues[rx_queue_id];\n-\t\tionic_dev_rx_queue_release(rx_queue);\n+\t\tionic_dev_rx_queue_release(eth_dev, rx_queue_id);\n \t\teth_dev->data->rx_queues[rx_queue_id] = NULL;\n \t}\n \ndiff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h\nindex 5c85b9c493..befbe61cef 100644\n--- a/drivers/net/ionic/ionic_rxtx.h\n+++ b/drivers/net/ionic/ionic_rxtx.h\n@@ -25,14 +25,14 @@ uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \tuint16_t nb_desc, uint32_t socket_id,\n \tconst struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);\n-void ionic_dev_rx_queue_release(void *rxq);\n+void ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);\n \n int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n \tuint16_t nb_desc,  uint32_t socket_id,\n \tconst struct rte_eth_txconf *tx_conf);\n-void ionic_dev_tx_queue_release(void *tx_queue);\n+void ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);\n int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n \ndiff --git a/drivers/net/ipn3ke/ipn3ke_representor.c b/drivers/net/ipn3ke/ipn3ke_representor.c\nindex 589d9fa587..694435a4ae 100644\n--- a/drivers/net/ipn3ke/ipn3ke_representor.c\n+++ b/drivers/net/ipn3ke/ipn3ke_representor.c\n@@ -288,11 +288,6 @@ ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static void\n-ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)\n-{\n-}\n-\n static int\n ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,\n \t__rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,\n@@ -302,11 +297,6 @@ ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static void\n-ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)\n-{\n-}\n-\n /* Statistics collected by each port, VSI, VEB, and S-channel */\n struct ipn3ke_rpst_eth_stats {\n \tuint64_t tx_bytes;               /* gotc */\n@@ -2865,9 +2855,7 @@ static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {\n \t.tx_queue_start       = ipn3ke_rpst_tx_queue_start,\n \t.tx_queue_stop        = ipn3ke_rpst_tx_queue_stop,\n \t.rx_queue_setup       = ipn3ke_rpst_rx_queue_setup,\n-\t.rx_queue_release     = ipn3ke_rpst_rx_queue_release,\n \t.tx_queue_setup       = ipn3ke_rpst_tx_queue_setup,\n-\t.tx_queue_release     = ipn3ke_rpst_tx_queue_release,\n \n \t.dev_set_link_up      = ipn3ke_rpst_dev_set_link_up,\n \t.dev_set_link_down    = ipn3ke_rpst_dev_set_link_down,\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex a0ce18ca24..25ad14d084 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -589,9 +589,9 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);\n \n void ixgbe_dev_free_queues(struct rte_eth_dev *dev);\n \n-void ixgbe_dev_rx_queue_release(void *rxq);\n+void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n-void ixgbe_dev_tx_queue_release(void *txq);\n+void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\tuint16_t nb_rx_desc, unsigned int socket_id,\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex c814a28cb4..aa2a444bbe 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -2487,9 +2487,9 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)\n }\n \n void __rte_cold\n-ixgbe_dev_tx_queue_release(void *txq)\n+ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tixgbe_tx_queue_release(txq);\n+\tixgbe_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n /* (Re)set dynamic ixgbe_tx_queue fields to defaults */\n@@ -2892,9 +2892,9 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)\n }\n \n void __rte_cold\n-ixgbe_dev_rx_queue_release(void *rxq)\n+ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tixgbe_rx_queue_release(rxq);\n+\tixgbe_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n /*\n@@ -3427,14 +3427,14 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tixgbe_dev_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"rx_ring\", i);\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tixgbe_dev_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t\trte_eth_dma_zone_free(dev, \"tx_ring\", i);\n \t}\ndiff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c\nindex 871d11c413..cb9f7c8e82 100644\n--- a/drivers/net/kni/rte_eth_kni.c\n+++ b/drivers/net/kni/rte_eth_kni.c\n@@ -284,11 +284,6 @@ eth_kni_tx_queue_setup(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-static void\n-eth_kni_queue_release(void *q __rte_unused)\n-{\n-}\n-\n static int\n eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,\n \t\tint wait_to_complete __rte_unused)\n@@ -362,8 +357,6 @@ static const struct eth_dev_ops eth_kni_ops = {\n \t.dev_infos_get = eth_kni_dev_info,\n \t.rx_queue_setup = eth_kni_rx_queue_setup,\n \t.tx_queue_setup = eth_kni_tx_queue_setup,\n-\t.rx_queue_release = eth_kni_queue_release,\n-\t.tx_queue_release = eth_kni_queue_release,\n \t.link_update = eth_kni_link_update,\n \t.stats_get = eth_kni_stats_get,\n \t.stats_reset = eth_kni_stats_reset,\ndiff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c\nindex b72060a449..b2d83396f9 100644\n--- a/drivers/net/liquidio/lio_ethdev.c\n+++ b/drivers/net/liquidio/lio_ethdev.c\n@@ -1182,7 +1182,7 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n \n \t/* Free previous allocation if any */\n \tif (eth_dev->data->rx_queues[q_no] != NULL) {\n-\t\tlio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);\n+\t\tlio_dev_rx_queue_release(eth_dev, q_no);\n \t\teth_dev->data->rx_queues[q_no] = NULL;\n \t}\n \n@@ -1204,16 +1204,18 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n  * Release the receive queue/ringbuffer. Called by\n  * the upper layers.\n  *\n- * @param rxq\n- *    Opaque pointer to the receive queue to release\n+ * @param eth_dev\n+ *    Pointer to the structure rte_eth_dev\n+ * @param q_no\n+ *    Queue number\n  *\n  * @return\n  *    - nothing\n  */\n void\n-lio_dev_rx_queue_release(void *rxq)\n+lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n {\n-\tstruct lio_droq *droq = rxq;\n+\tstruct lio_droq *droq = dev->data->rx_queues[q_no];\n \tint oq_no;\n \n \tif (droq) {\n@@ -1262,7 +1264,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n \n \t/* Free previous allocation if any */\n \tif (eth_dev->data->tx_queues[q_no] != NULL) {\n-\t\tlio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);\n+\t\tlio_dev_tx_queue_release(eth_dev, q_no);\n \t\teth_dev->data->tx_queues[q_no] = NULL;\n \t}\n \n@@ -1292,16 +1294,18 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n  * Release the transmit queue/ringbuffer. Called by\n  * the upper layers.\n  *\n- * @param txq\n- *    Opaque pointer to the transmit queue to release\n+ * @param eth_dev\n+ *    Pointer to the structure rte_eth_dev\n+ * @param q_no\n+ *    Queue number\n  *\n  * @return\n  *    - nothing\n  */\n void\n-lio_dev_tx_queue_release(void *txq)\n+lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n {\n-\tstruct lio_instr_queue *tq = txq;\n+\tstruct lio_instr_queue *tq = dev->data->tx_queues[q_no];\n \tuint32_t fw_mapped_iq_no;\n \n \ndiff --git a/drivers/net/liquidio/lio_ethdev.h b/drivers/net/liquidio/lio_ethdev.h\nindex d33be1c44d..ece2b03858 100644\n--- a/drivers/net/liquidio/lio_ethdev.h\n+++ b/drivers/net/liquidio/lio_ethdev.h\n@@ -172,8 +172,8 @@ struct lio_rss_set {\n \tuint8_t key[LIO_RSS_MAX_KEY_SZ];\n };\n \n-void lio_dev_rx_queue_release(void *rxq);\n+void lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);\n \n-void lio_dev_tx_queue_release(void *txq);\n+void lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);\n \n #endif\t/* _LIO_ETHDEV_H_ */\ndiff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c\nindex a067b60e47..616abec070 100644\n--- a/drivers/net/liquidio/lio_rxtx.c\n+++ b/drivers/net/liquidio/lio_rxtx.c\n@@ -1791,7 +1791,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)\n \tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n \t\ttxq = eth_dev->data->tx_queues[i];\n \t\tif (txq != NULL) {\n-\t\t\tlio_dev_tx_queue_release(txq);\n+\t\t\tlio_dev_tx_queue_release(eth_dev, i);\n \t\t\teth_dev->data->tx_queues[i] = NULL;\n \t\t}\n \t}\n@@ -1799,7 +1799,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)\n \tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n \t\trxq = eth_dev->data->rx_queues[i];\n \t\tif (rxq != NULL) {\n-\t\t\tlio_dev_rx_queue_release(rxq);\n+\t\t\tlio_dev_rx_queue_release(eth_dev, i);\n \t\t\teth_dev->data->rx_queues[i] = NULL;\n \t\t}\n \t}\ndiff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c\nindex de6becd45e..7e093e65d8 100644\n--- a/drivers/net/memif/rte_eth_memif.c\n+++ b/drivers/net/memif/rte_eth_memif.c\n@@ -1255,9 +1255,9 @@ memif_dev_close(struct rte_eth_dev *dev)\n \t\tmemif_disconnect(dev);\n \n \t\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n-\t\t\t(*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);\n+\t\t\t(*dev->dev_ops->rx_queue_release)(dev, i);\n \t\tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n-\t\t\t(*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);\n+\t\t\t(*dev->dev_ops->tx_queue_release)(dev, i);\n \n \t\tmemif_socket_remove_device(dev);\n \t} else {\n@@ -1349,9 +1349,20 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,\n }\n \n static void\n-memif_queue_release(void *queue)\n+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct memif_queue *mq = (struct memif_queue *)queue;\n+\tstruct memif_queue *mq = dev->data->rx_queues[qid];\n+\n+\tif (!mq)\n+\t\treturn;\n+\n+\trte_free(mq);\n+}\n+\n+static void\n+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tstruct memif_queue *mq = dev->data->tx_queues[qid];\n \n \tif (!mq)\n \t\treturn;\n@@ -1468,8 +1479,8 @@ static const struct eth_dev_ops ops = {\n \t.dev_configure = memif_dev_configure,\n \t.tx_queue_setup = memif_tx_queue_setup,\n \t.rx_queue_setup = memif_rx_queue_setup,\n-\t.rx_queue_release = memif_queue_release,\n-\t.tx_queue_release = memif_queue_release,\n+\t.rx_queue_release = memif_rx_queue_release,\n+\t.tx_queue_release = memif_tx_queue_release,\n \t.rx_queue_intr_enable = memif_rx_queue_intr_enable,\n \t.rx_queue_intr_disable = memif_rx_queue_intr_disable,\n \t.link_update = memif_link_update,\ndiff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c\nindex c522157a0a..46a22441c2 100644\n--- a/drivers/net/mlx4/mlx4.c\n+++ b/drivers/net/mlx4/mlx4.c\n@@ -391,9 +391,9 @@ mlx4_dev_close(struct rte_eth_dev *dev)\n \tmlx4_flow_clean(priv);\n \tmlx4_rss_deinit(priv);\n \tfor (i = 0; i != dev->data->nb_rx_queues; ++i)\n-\t\tmlx4_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tmlx4_rx_queue_release(dev, i);\n \tfor (i = 0; i != dev->data->nb_tx_queues; ++i)\n-\t\tmlx4_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tmlx4_tx_queue_release(dev, i);\n \tmlx4_proc_priv_uninit(dev);\n \tmlx4_mr_release(dev);\n \tif (priv->pd != NULL) {\ndiff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c\nindex 978cbb8201..37ae707639 100644\n--- a/drivers/net/mlx4/mlx4_rxq.c\n+++ b/drivers/net/mlx4/mlx4_rxq.c\n@@ -826,6 +826,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t},\n \t\t.socket = socket,\n \t};\n+\tdev->data->rx_queues[idx] = rxq;\n \t/* Enable scattered packets support for this queue if necessary. */\n \tMLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);\n \tif (dev->data->dev_conf.rxmode.max_rx_pkt_len <=\n@@ -896,12 +897,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t}\n \t}\n \tDEBUG(\"%p: adding Rx queue %p to list\", (void *)dev, (void *)rxq);\n-\tdev->data->rx_queues[idx] = rxq;\n \treturn 0;\n error:\n-\tdev->data->rx_queues[idx] = NULL;\n \tret = rte_errno;\n-\tmlx4_rx_queue_release(rxq);\n+\tmlx4_rx_queue_release(dev, idx);\n \trte_errno = ret;\n \tMLX4_ASSERT(rte_errno > 0);\n \treturn -rte_errno;\n@@ -910,26 +909,20 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n /**\n  * DPDK callback to release a Rx queue.\n  *\n- * @param dpdk_rxq\n- *   Generic Rx queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   Rx queue index.\n  */\n void\n-mlx4_rx_queue_release(void *dpdk_rxq)\n+mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n-\tstruct rxq *rxq = (struct rxq *)dpdk_rxq;\n-\tstruct mlx4_priv *priv;\n-\tunsigned int i;\n+\tstruct rxq *rxq = dev->data->rx_queues[idx];\n \n \tif (rxq == NULL)\n \t\treturn;\n-\tpriv = rxq->priv;\n-\tfor (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)\n-\t\tif (ETH_DEV(priv)->data->rx_queues[i] == rxq) {\n-\t\t\tDEBUG(\"%p: removing Rx queue %p from list\",\n-\t\t\t      (void *)ETH_DEV(priv), (void *)rxq);\n-\t\t\tETH_DEV(priv)->data->rx_queues[i] = NULL;\n-\t\t\tbreak;\n-\t\t}\n+\tdev->data->rx_queues[idx] = NULL;\n+\tDEBUG(\"%p: removing Rx queue %hu from list\", (void *)dev, idx);\n \tMLX4_ASSERT(!rxq->cq);\n \tMLX4_ASSERT(!rxq->wq);\n \tMLX4_ASSERT(!rxq->wqes);\ndiff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h\nindex c838afc242..83e9534cd0 100644\n--- a/drivers/net/mlx4/mlx4_rxtx.h\n+++ b/drivers/net/mlx4/mlx4_rxtx.h\n@@ -141,7 +141,7 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\tuint16_t desc, unsigned int socket,\n \t\t\tconst struct rte_eth_rxconf *conf,\n \t\t\tstruct rte_mempool *mp);\n-void mlx4_rx_queue_release(void *dpdk_rxq);\n+void mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx);\n \n /* mlx4_rxtx.c */\n \n@@ -162,7 +162,7 @@ uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);\n int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\tuint16_t desc, unsigned int socket,\n \t\t\tconst struct rte_eth_txconf *conf);\n-void mlx4_tx_queue_release(void *dpdk_txq);\n+void mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx);\n \n /* mlx4_mr.c */\n \ndiff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c\nindex 2df26842fb..c8da686ece 100644\n--- a/drivers/net/mlx4/mlx4_txq.c\n+++ b/drivers/net/mlx4/mlx4_txq.c\n@@ -404,6 +404,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t.lb = !!priv->vf,\n \t\t.bounce_buf = bounce_buf,\n \t};\n+\tdev->data->tx_queues[idx] = txq;\n \tpriv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE;\n \tpriv->verbs_alloc_ctx.obj = txq;\n \ttxq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);\n@@ -507,13 +508,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t/* Save pointer of global generation number to check memory event. */\n \ttxq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;\n \tDEBUG(\"%p: adding Tx queue %p to list\", (void *)dev, (void *)txq);\n-\tdev->data->tx_queues[idx] = txq;\n \tpriv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;\n \treturn 0;\n error:\n-\tdev->data->tx_queues[idx] = NULL;\n \tret = rte_errno;\n-\tmlx4_tx_queue_release(txq);\n+\tmlx4_tx_queue_release(dev, idx);\n \trte_errno = ret;\n \tMLX4_ASSERT(rte_errno > 0);\n \tpriv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;\n@@ -523,26 +522,20 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n /**\n  * DPDK callback to release a Tx queue.\n  *\n- * @param dpdk_txq\n- *   Generic Tx queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   Tx queue index.\n  */\n void\n-mlx4_tx_queue_release(void *dpdk_txq)\n+mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx)\n {\n-\tstruct txq *txq = (struct txq *)dpdk_txq;\n-\tstruct mlx4_priv *priv;\n-\tunsigned int i;\n+\tstruct txq *txq = dev->data->tx_queues[idx];\n \n \tif (txq == NULL)\n \t\treturn;\n-\tpriv = txq->priv;\n-\tfor (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)\n-\t\tif (ETH_DEV(priv)->data->tx_queues[i] == txq) {\n-\t\t\tDEBUG(\"%p: removing Tx queue %p from list\",\n-\t\t\t      (void *)ETH_DEV(priv), (void *)txq);\n-\t\t\tETH_DEV(priv)->data->tx_queues[i] = NULL;\n-\t\t\tbreak;\n-\t\t}\n+\tDEBUG(\"%p: removing Tx queue %hu from list\", (void *)dev, idx);\n+\tdev->data->tx_queues[idx] = NULL;\n \tmlx4_txq_free_elts(txq);\n \tif (txq->qp)\n \t\tclaim_zero(mlx4_glue->destroy_qp(txq->qp));\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 3f2b99fb65..2b7ad3e48b 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -191,7 +191,7 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n int mlx5_rx_hairpin_queue_setup\n \t(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t const struct rte_eth_hairpin_conf *hairpin_conf);\n-void mlx5_rx_queue_release(void *dpdk_rxq);\n+void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);\n void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);\n int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex abd8ce7989..d6de159486 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -794,25 +794,22 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n /**\n  * DPDK callback to release a RX queue.\n  *\n- * @param dpdk_rxq\n- *   Generic RX queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   RX queue index.\n  */\n void\n-mlx5_rx_queue_release(void *dpdk_rxq)\n+mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\tstruct mlx5_priv *priv;\n+\tstruct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];\n \n \tif (rxq == NULL)\n \t\treturn;\n-\trxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);\n-\tpriv = rxq_ctrl->priv;\n-\tif (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))\n+\tif (!mlx5_rxq_releasable(dev, qid))\n \t\trte_panic(\"port %u Rx queue %u is still used by a flow and\"\n-\t\t\t  \" cannot be removed\\n\",\n-\t\t\t  PORT_ID(priv), rxq->idx);\n-\tmlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);\n+\t\t\t  \" cannot be removed\\n\", dev->data->port_id, qid);\n+\tmlx5_rxq_release(dev, qid);\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h\nindex 1a35919371..73364ed269 100644\n--- a/drivers/net/mlx5/mlx5_tx.h\n+++ b/drivers/net/mlx5/mlx5_tx.h\n@@ -204,7 +204,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n int mlx5_tx_hairpin_queue_setup\n \t(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t const struct rte_eth_hairpin_conf *hairpin_conf);\n-void mlx5_tx_queue_release(void *dpdk_txq);\n+void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);\n int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);\n void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex eb4d34ca55..89392dd091 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -470,28 +470,21 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,\n /**\n  * DPDK callback to release a TX queue.\n  *\n- * @param dpdk_txq\n- *   Generic TX queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   TX queue index.\n  */\n void\n-mlx5_tx_queue_release(void *dpdk_txq)\n+mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;\n-\tstruct mlx5_txq_ctrl *txq_ctrl;\n-\tstruct mlx5_priv *priv;\n-\tunsigned int i;\n+\tstruct mlx5_txq_data *txq = dev->data->tx_queues[qid];\n \n \tif (txq == NULL)\n \t\treturn;\n-\ttxq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);\n-\tpriv = txq_ctrl->priv;\n-\tfor (i = 0; (i != priv->txqs_n); ++i)\n-\t\tif ((*priv->txqs)[i] == txq) {\n-\t\t\tDRV_LOG(DEBUG, \"port %u removing Tx queue %u from list\",\n-\t\t\t\tPORT_ID(priv), txq->idx);\n-\t\t\tmlx5_txq_release(ETH_DEV(priv), i);\n-\t\t\tbreak;\n-\t\t}\n+\tDRV_LOG(DEBUG, \"port %u removing Tx queue %u from list\",\n+\t\tdev->data->port_id, qid);\n+\tmlx5_txq_release(dev, qid);\n }\n \n /**\ndiff --git a/drivers/net/mvneta/mvneta_ethdev.c b/drivers/net/mvneta/mvneta_ethdev.c\nindex a3ee150204..f51bc2258f 100644\n--- a/drivers/net/mvneta/mvneta_ethdev.c\n+++ b/drivers/net/mvneta/mvneta_ethdev.c\n@@ -446,12 +446,12 @@ mvneta_dev_close(struct rte_eth_dev *dev)\n \t\tret = mvneta_dev_stop(dev);\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tmvneta_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tmvneta_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tmvneta_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tmvneta_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \ndiff --git a/drivers/net/mvneta/mvneta_rxtx.c b/drivers/net/mvneta/mvneta_rxtx.c\nindex dfa7ecc090..2d61930382 100644\n--- a/drivers/net/mvneta/mvneta_rxtx.c\n+++ b/drivers/net/mvneta/mvneta_rxtx.c\n@@ -796,13 +796,15 @@ mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n /**\n  * DPDK callback to release the transmit queue.\n  *\n- * @param txq\n- *   Generic transmit queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   Transmit queue index.\n  */\n void\n-mvneta_tx_queue_release(void *txq)\n+mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mvneta_txq *q = txq;\n+\tstruct mvneta_txq *q = dev->data->tx_queues[qid];\n \n \tif (!q)\n \t\treturn;\n@@ -959,13 +961,15 @@ mvneta_flush_queues(struct rte_eth_dev *dev)\n /**\n  * DPDK callback to release the receive queue.\n  *\n- * @param rxq\n- *   Generic receive queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   Receive queue index.\n  */\n void\n-mvneta_rx_queue_release(void *rxq)\n+mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mvneta_rxq *q = rxq;\n+\tstruct mvneta_rxq *q = dev->data->rx_queues[qid];\n \n \tif (!q)\n \t\treturn;\n@@ -978,7 +982,7 @@ mvneta_rx_queue_release(void *rxq)\n \tif (q->priv->ppio)\n \t\tmvneta_rx_queue_flush(q);\n \n-\trte_free(rxq);\n+\trte_free(q);\n }\n \n /**\ndiff --git a/drivers/net/mvneta/mvneta_rxtx.h b/drivers/net/mvneta/mvneta_rxtx.h\nindex cc29190177..41b7539a57 100644\n--- a/drivers/net/mvneta/mvneta_rxtx.h\n+++ b/drivers/net/mvneta/mvneta_rxtx.h\n@@ -32,7 +32,7 @@ int\n mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t      unsigned int socket, const struct rte_eth_txconf *conf);\n \n-void mvneta_rx_queue_release(void *rxq);\n-void mvneta_tx_queue_release(void *txq);\n+void mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n #endif /* _MVNETA_RXTX_H_ */\ndiff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c\nindex 078aefbb8d..4bf00c72ce 100644\n--- a/drivers/net/mvpp2/mrvl_ethdev.c\n+++ b/drivers/net/mvpp2/mrvl_ethdev.c\n@@ -2059,13 +2059,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n /**\n  * DPDK callback to release the receive queue.\n  *\n- * @param rxq\n- *   Generic receive queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   RX queue index.\n  */\n static void\n-mrvl_rx_queue_release(void *rxq)\n+mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mrvl_rxq *q = rxq;\n+\tstruct mrvl_rxq *q = dev->data->rx_queues[qid];\n \tstruct pp2_ppio_tc_params *tc_params;\n \tint i, num, tc, inq;\n \tstruct pp2_hif *hif;\n@@ -2146,13 +2148,15 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n /**\n  * DPDK callback to release the transmit queue.\n  *\n- * @param txq\n- *   Generic transmit queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   TX queue index.\n  */\n static void\n-mrvl_tx_queue_release(void *txq)\n+mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct mrvl_txq *q = txq;\n+\tstruct mrvl_txq *q = dev->data->tx_queues[qid];\n \n \tif (!q)\n \t\treturn;\ndiff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c\nindex c6bf7cc132..e880dc2bb2 100644\n--- a/drivers/net/netvsc/hn_rxtx.c\n+++ b/drivers/net/netvsc/hn_rxtx.c\n@@ -356,9 +356,9 @@ static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)\n }\n \n void\n-hn_dev_tx_queue_release(void *arg)\n+hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hn_tx_queue *txq = arg;\n+\tstruct hn_tx_queue *txq = dev->data->tx_queues[qid];\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -1004,9 +1004,9 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)\n }\n \n void\n-hn_dev_rx_queue_release(void *arg)\n+hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct hn_rx_queue *rxq = arg;\n+\tstruct hn_rx_queue *rxq = dev->data->rx_queues[qid];\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -1648,7 +1648,7 @@ hn_dev_free_queues(struct rte_eth_dev *dev)\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\thn_dev_tx_queue_release(dev->data->tx_queues[i]);\n+\t\thn_dev_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h\nindex 43642408bc..2cd1f8a881 100644\n--- a/drivers/net/netvsc/hn_var.h\n+++ b/drivers/net/netvsc/hn_var.h\n@@ -198,7 +198,7 @@ int\thn_dev_link_update(struct rte_eth_dev *dev, int wait);\n int\thn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t      uint16_t nb_desc, unsigned int socket_id,\n \t\t\t      const struct rte_eth_txconf *tx_conf);\n-void\thn_dev_tx_queue_release(void *arg);\n+void\thn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void\thn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t     struct rte_eth_txq_info *qinfo);\n int\thn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);\n@@ -214,7 +214,7 @@ int\thn_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t      struct rte_mempool *mp);\n void\thn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,\n \t\t\t     struct rte_eth_rxq_info *qinfo);\n-void\thn_dev_rx_queue_release(void *arg);\n+void\thn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);\n int\thn_dev_rx_queue_status(void *rxq, uint16_t offset);\n void\thn_dev_free_queues(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c\nindex 75192e6319..fead8eba5d 100644\n--- a/drivers/net/netvsc/hn_vf.c\n+++ b/drivers/net/netvsc/hn_vf.c\n@@ -624,11 +624,8 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)\n \n \trte_rwlock_read_lock(&hv->vf_lock);\n \tvf_dev = hn_get_vf_dev(hv);\n-\tif (vf_dev && vf_dev->dev_ops->tx_queue_release) {\n-\t\tvoid *subq = vf_dev->data->tx_queues[queue_id];\n-\n-\t\t(*vf_dev->dev_ops->tx_queue_release)(subq);\n-\t}\n+\tif (vf_dev && vf_dev->dev_ops->tx_queue_release)\n+\t\t(*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);\n \n \trte_rwlock_read_unlock(&hv->vf_lock);\n }\n@@ -659,11 +656,8 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)\n \n \trte_rwlock_read_lock(&hv->vf_lock);\n \tvf_dev = hn_get_vf_dev(hv);\n-\tif (vf_dev && vf_dev->dev_ops->rx_queue_release) {\n-\t\tvoid *subq = vf_dev->data->rx_queues[queue_id];\n-\n-\t\t(*vf_dev->dev_ops->rx_queue_release)(subq);\n-\t}\n+\tif (vf_dev && vf_dev->dev_ops->rx_queue_release)\n+\t\t(*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);\n \trte_rwlock_read_unlock(&hv->vf_lock);\n }\n \ndiff --git a/drivers/net/nfb/nfb_ethdev.c b/drivers/net/nfb/nfb_ethdev.c\nindex 7e91d59847..99d93ebf46 100644\n--- a/drivers/net/nfb/nfb_ethdev.c\n+++ b/drivers/net/nfb/nfb_ethdev.c\n@@ -231,12 +231,12 @@ nfb_eth_dev_close(struct rte_eth_dev *dev)\n \tnfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);\n \n \tfor (i = 0; i < nb_rx; i++) {\n-\t\tnfb_eth_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tnfb_eth_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \tfor (i = 0; i < nb_tx; i++) {\n-\t\tnfb_eth_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tnfb_eth_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/nfb/nfb_rx.c b/drivers/net/nfb/nfb_rx.c\nindex d6d4ba9663..3ebb332ae4 100644\n--- a/drivers/net/nfb/nfb_rx.c\n+++ b/drivers/net/nfb/nfb_rx.c\n@@ -176,9 +176,10 @@ nfb_eth_rx_queue_init(struct nfb_device *nfb,\n }\n \n void\n-nfb_eth_rx_queue_release(void *q)\n+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q;\n+\tstruct ndp_rx_queue *rxq = dev->data->rx_queues[qid];\n+\n \tif (rxq->queue != NULL) {\n \t\tndp_close_rx_queue(rxq->queue);\n \t\trte_free(rxq);\ndiff --git a/drivers/net/nfb/nfb_rx.h b/drivers/net/nfb/nfb_rx.h\nindex c9708259af..48e8abce1f 100644\n--- a/drivers/net/nfb/nfb_rx.h\n+++ b/drivers/net/nfb/nfb_rx.h\n@@ -94,11 +94,13 @@ nfb_eth_rx_queue_setup(struct rte_eth_dev *dev,\n /**\n  * DPDK callback to release a RX queue.\n  *\n- * @param dpdk_rxq\n- *   Generic RX queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   RX queue index.\n  */\n void\n-nfb_eth_rx_queue_release(void *q);\n+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n /**\n  * Start traffic on Rx queue.\ndiff --git a/drivers/net/nfb/nfb_tx.c b/drivers/net/nfb/nfb_tx.c\nindex 9b912feb1d..d49fc324e7 100644\n--- a/drivers/net/nfb/nfb_tx.c\n+++ b/drivers/net/nfb/nfb_tx.c\n@@ -102,9 +102,10 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,\n }\n \n void\n-nfb_eth_tx_queue_release(void *q)\n+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct ndp_tx_queue *txq = (struct ndp_tx_queue *)q;\n+\tstruct ndp_tx_queue *txq = dev->data->tx_queues[qid];\n+\n \tif (txq->queue != NULL) {\n \t\tndp_close_tx_queue(txq->queue);\n \t\trte_free(txq);\ndiff --git a/drivers/net/nfb/nfb_tx.h b/drivers/net/nfb/nfb_tx.h\nindex 28daeae0b8..942f74f33b 100644\n--- a/drivers/net/nfb/nfb_tx.h\n+++ b/drivers/net/nfb/nfb_tx.h\n@@ -70,11 +70,13 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,\n /**\n  * DPDK callback to release a RX queue.\n  *\n- * @param dpdk_rxq\n- *   Generic RX queue pointer.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param qid\n+ *   TX queue index.\n  */\n void\n-nfb_eth_tx_queue_release(void *q);\n+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n /**\n  * Start traffic on Tx queue.\ndiff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c\nindex a30e78db16..c83b5f387f 100644\n--- a/drivers/net/nfp/nfp_net.c\n+++ b/drivers/net/nfp/nfp_net.c\n@@ -72,13 +72,13 @@ static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,\n \t\t\t\t       uint16_t queue_idx);\n static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t\t\t  uint16_t nb_pkts);\n-static void nfp_net_rx_queue_release(void *rxq);\n+static void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n \t\t\t\t  const struct rte_eth_rxconf *rx_conf,\n \t\t\t\t  struct rte_mempool *mp);\n static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);\n-static void nfp_net_tx_queue_release(void *txq);\n+static void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t  uint16_t nb_desc, unsigned int socket_id,\n \t\t\t\t  const struct rte_eth_txconf *tx_conf);\n@@ -230,14 +230,15 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)\n }\n \n static void\n-nfp_net_rx_queue_release(void *rx_queue)\n+nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct nfp_net_rxq *rxq = rx_queue;\n+\tstruct nfp_net_rxq *rxq = dev->data->rx_queues[qid];\n \n \tif (rxq) {\n \t\tnfp_net_rx_queue_release_mbufs(rxq);\n \t\trte_free(rxq->rxbufs);\n \t\trte_free(rxq);\n+\t\tdev->data->rx_queues[qid] = NULL;\n \t}\n }\n \n@@ -266,14 +267,15 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)\n }\n \n static void\n-nfp_net_tx_queue_release(void *tx_queue)\n+nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct nfp_net_txq *txq = tx_queue;\n+\tstruct nfp_net_txq *txq = dev->data->tx_queues[qid];\n \n \tif (txq) {\n \t\tnfp_net_tx_queue_release_mbufs(txq);\n \t\trte_free(txq->txbufs);\n \t\trte_free(txq);\n+\t\tdev->data->tx_queues[qid] = NULL;\n \t}\n }\n \n@@ -1598,10 +1600,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \t * Free memory prior to re-allocation if needed. This is the case after\n \t * calling nfp_net_stop\n \t */\n-\tif (dev->data->rx_queues[queue_idx]) {\n-\t\tnfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);\n-\t\tdev->data->rx_queues[queue_idx] = NULL;\n-\t}\n+\tif (dev->data->rx_queues[queue_idx])\n+\t\tnfp_net_rx_queue_release(dev, queue_idx);\n \n \t/* Allocating rx queue data structure */\n \trxq = rte_zmalloc_socket(\"ethdev RX queue\", sizeof(struct nfp_net_rxq),\n@@ -1609,6 +1609,9 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \tif (rxq == NULL)\n \t\treturn -ENOMEM;\n \n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\trxq->hw = hw;\n+\n \t/* Hw queues mapping based on firmware configuration */\n \trxq->qidx = queue_idx;\n \trxq->fl_qcidx = queue_idx * hw->stride_rx;\n@@ -1642,7 +1645,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \n \tif (tz == NULL) {\n \t\tPMD_DRV_LOG(ERR, \"Error allocating rx dma\");\n-\t\tnfp_net_rx_queue_release(rxq);\n+\t\tnfp_net_rx_queue_release(dev, queue_idx);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1655,7 +1658,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\t\t\t sizeof(*rxq->rxbufs) * nb_desc,\n \t\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n \tif (rxq->rxbufs == NULL) {\n-\t\tnfp_net_rx_queue_release(rxq);\n+\t\tnfp_net_rx_queue_release(dev, queue_idx);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1664,9 +1667,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \n \tnfp_net_reset_rx_queue(rxq);\n \n-\tdev->data->rx_queues[queue_idx] = rxq;\n-\trxq->hw = hw;\n-\n \t/*\n \t * Telling the HW about the physical address of the RX ring and number\n \t * of descriptors in log2 format\n@@ -1763,8 +1763,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \tif (dev->data->tx_queues[queue_idx]) {\n \t\tPMD_TX_LOG(DEBUG, \"Freeing memory prior to re-allocation %d\",\n \t\t\t   queue_idx);\n-\t\tnfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);\n-\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t\tnfp_net_tx_queue_release(dev, queue_idx);\n \t}\n \n \t/* Allocating tx queue data structure */\n@@ -1775,6 +1774,9 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\treturn -ENOMEM;\n \t}\n \n+\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->hw = hw;\n+\n \t/*\n \t * Allocate TX ring hardware descriptors. A memzone large enough to\n \t * handle the maximum ring size is allocated in order to allow for\n@@ -1786,7 +1788,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t   socket_id);\n \tif (tz == NULL) {\n \t\tPMD_DRV_LOG(ERR, \"Error allocating tx dma\");\n-\t\tnfp_net_tx_queue_release(txq);\n+\t\tnfp_net_tx_queue_release(dev, queue_idx);\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1812,7 +1814,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t\t sizeof(*txq->txbufs) * nb_desc,\n \t\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n \tif (txq->txbufs == NULL) {\n-\t\tnfp_net_tx_queue_release(txq);\n+\t\tnfp_net_tx_queue_release(dev, queue_idx);\n \t\treturn -ENOMEM;\n \t}\n \tPMD_TX_LOG(DEBUG, \"txbufs=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n@@ -1820,9 +1822,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \n \tnfp_net_reset_tx_queue(txq);\n \n-\tdev->data->tx_queues[queue_idx] = txq;\n-\ttxq->hw = hw;\n-\n \t/*\n \t * Telling the HW about the physical address of the TX ring and number\n \t * of descriptors in log2 format\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex 7fb72f3f1f..c039e7dcc3 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -69,9 +69,9 @@ void ngbe_dev_clear_queues(struct rte_eth_dev *dev);\n \n void ngbe_dev_free_queues(struct rte_eth_dev *dev);\n \n-void ngbe_dev_rx_queue_release(void *rxq);\n+void ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n-void ngbe_dev_tx_queue_release(void *txq);\n+void ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int  ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\tuint16_t nb_rx_desc, unsigned int socket_id,\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nindex 5c06e0d550..d508015bd2 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.c\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -453,9 +453,9 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq)\n }\n \n void\n-ngbe_dev_tx_queue_release(void *txq)\n+ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tngbe_tx_queue_release(txq);\n+\tngbe_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n /* (Re)set dynamic ngbe_tx_queue fields to defaults */\n@@ -673,9 +673,9 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)\n }\n \n void\n-ngbe_dev_rx_queue_release(void *rxq)\n+ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tngbe_rx_queue_release(rxq);\n+\tngbe_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n /*\n@@ -916,13 +916,13 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tngbe_dev_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tngbe_dev_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tngbe_dev_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tngbe_dev_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c\nindex 508bafc12a..25b9e5b1ce 100644\n--- a/drivers/net/null/rte_eth_null.c\n+++ b/drivers/net/null/rte_eth_null.c\n@@ -353,14 +353,24 @@ eth_stats_reset(struct rte_eth_dev *dev)\n }\n \n static void\n-eth_queue_release(void *q)\n+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct null_queue *nq;\n+\tstruct null_queue *nq = dev->data->rx_queues[qid];\n \n-\tif (q == NULL)\n+\tif (nq == NULL)\n+\t\treturn;\n+\n+\trte_free(nq->dummy_packet);\n+}\n+\n+static void\n+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tstruct null_queue *nq = dev->data->tx_queues[qid];\n+\n+\tif (nq == NULL)\n \t\treturn;\n \n-\tnq = q;\n \trte_free(nq->dummy_packet);\n }\n \n@@ -483,8 +493,8 @@ static const struct eth_dev_ops ops = {\n \t.dev_infos_get = eth_dev_info,\n \t.rx_queue_setup = eth_rx_queue_setup,\n \t.tx_queue_setup = eth_tx_queue_setup,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n+\t.rx_queue_release = eth_rx_queue_release,\n+\t.tx_queue_release = eth_tx_queue_release,\n \t.mtu_set = eth_mtu_set,\n \t.link_update = eth_link_update,\n \t.mac_addr_set = eth_mac_address_set,\ndiff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c\nindex 9f4c0503b4..7c91494f0e 100644\n--- a/drivers/net/octeontx/octeontx_ethdev.c\n+++ b/drivers/net/octeontx/octeontx_ethdev.c\n@@ -971,20 +971,18 @@ octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)\n }\n \n static void\n-octeontx_dev_tx_queue_release(void *tx_queue)\n+octeontx_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct octeontx_txq *txq = tx_queue;\n \tint res;\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tif (txq) {\n-\t\tres = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);\n+\tif (dev->data->tx_queues[qid]) {\n+\t\tres = octeontx_dev_tx_queue_stop(dev, qid);\n \t\tif (res < 0)\n-\t\t\tocteontx_log_err(\"failed stop tx_queue(%d)\\n\",\n-\t\t\t\t   txq->queue_id);\n+\t\t\tocteontx_log_err(\"failed stop tx_queue(%d)\\n\", qid);\n \n-\t\trte_free(txq);\n+\t\trte_free(dev->data->tx_queues[qid]);\n \t}\n }\n \n@@ -1013,7 +1011,7 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \tif (dev->data->tx_queues[qidx] != NULL) {\n \t\tPMD_TX_LOG(DEBUG, \"freeing memory prior to re-allocation %d\",\n \t\t\t\tqidx);\n-\t\tocteontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);\n+\t\tocteontx_dev_tx_queue_release(dev, qidx);\n \t\tdev->data->tx_queues[qidx] = NULL;\n \t}\n \n@@ -1221,9 +1219,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n }\n \n static void\n-octeontx_dev_rx_queue_release(void *rxq)\n+octeontx_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\trte_free(rxq);\n+\trte_free(dev->data->rx_queues[qid]);\n }\n \n static const uint32_t *\ndiff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c\nindex 75d4cabf2e..d576bc6989 100644\n--- a/drivers/net/octeontx2/otx2_ethdev.c\n+++ b/drivers/net/octeontx2/otx2_ethdev.c\n@@ -555,16 +555,17 @@ otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)\n }\n \n static void\n-otx2_nix_rx_queue_release(void *rx_queue)\n+otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct otx2_eth_rxq *rxq = rx_queue;\n+\tstruct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];\n \n \tif (!rxq)\n \t\treturn;\n \n \totx2_nix_dbg(\"Releasing rxq %u\", rxq->rq);\n \tnix_cq_rq_uninit(rxq->eth_dev, rxq);\n-\trte_free(rx_queue);\n+\trte_free(rxq);\n+\tdev->data->rx_queues[qid] = NULL;\n }\n \n static int\n@@ -608,9 +609,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,\n \t/* Free memory prior to re-allocation if needed */\n \tif (eth_dev->data->rx_queues[rq] != NULL) {\n \t\totx2_nix_dbg(\"Freeing memory prior to re-allocation %d\", rq);\n-\t\totx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);\n+\t\totx2_nix_rx_queue_release(eth_dev, rq);\n \t\trte_eth_dma_zone_free(eth_dev, \"cq\", rq);\n-\t\teth_dev->data->rx_queues[rq] = NULL;\n \t}\n \n \toffloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;\n@@ -641,6 +641,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,\n \trxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();\n \trxq->tstamp = &dev->tstamp;\n \n+\teth_dev->data->rx_queues[rq] = rxq;\n+\n \t/* Alloc completion queue */\n \trc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);\n \tif (rc) {\n@@ -657,7 +659,6 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,\n \totx2_nix_dbg(\"rq=%d pool=%s qsize=%d nb_desc=%d->%d\",\n \t\t     rq, mp->name, qsize, nb_desc, rxq->qlen);\n \n-\teth_dev->data->rx_queues[rq] = rxq;\n \teth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;\n \n \t/* Calculating delta and freq mult between PTP HI clock and tsc.\n@@ -679,7 +680,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,\n \treturn 0;\n \n free_rxq:\n-\totx2_nix_rx_queue_release(rxq);\n+\totx2_nix_rx_queue_release(eth_dev, rq);\n fail:\n \treturn rc;\n }\n@@ -1217,16 +1218,13 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq)\n }\n \n static void\n-otx2_nix_tx_queue_release(void *_txq)\n+otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)\n {\n-\tstruct otx2_eth_txq *txq = _txq;\n-\tstruct rte_eth_dev *eth_dev;\n+\tstruct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];\n \n \tif (!txq)\n \t\treturn;\n \n-\teth_dev = txq->dev->eth_dev;\n-\n \totx2_nix_dbg(\"Releasing txq %u\", txq->sq);\n \n \t/* Flush and disable tm */\n@@ -1241,6 +1239,7 @@ otx2_nix_tx_queue_release(void *_txq)\n \t}\n \totx2_nix_sq_flush_post(txq);\n \trte_free(txq);\n+\teth_dev->data->tx_queues[qid] = NULL;\n }\n \n \n@@ -1268,8 +1267,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,\n \t/* Free memory prior to re-allocation if needed. */\n \tif (eth_dev->data->tx_queues[sq] != NULL) {\n \t\totx2_nix_dbg(\"Freeing memory prior to re-allocation %d\", sq);\n-\t\totx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);\n-\t\teth_dev->data->tx_queues[sq] = NULL;\n+\t\totx2_nix_tx_queue_release(eth_dev, sq);\n \t}\n \n \t/* Find the expected offloads for this queue */\n@@ -1288,6 +1286,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,\n \ttxq->sqb_pool = NULL;\n \ttxq->offloads = offloads;\n \tdev->tx_offloads |= offloads;\n+\teth_dev->data->tx_queues[sq] = txq;\n \n \t/*\n \t * Allocate memory for flow control updates from HW.\n@@ -1334,12 +1333,11 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,\n \t\t     \" lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d\", sq,\n \t\t     fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,\n \t\t     txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);\n-\teth_dev->data->tx_queues[sq] = txq;\n \teth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;\n \treturn 0;\n \n free_txq:\n-\totx2_nix_tx_queue_release(txq);\n+\totx2_nix_tx_queue_release(eth_dev, sq);\n fail:\n \treturn rc;\n }\n@@ -1378,8 +1376,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)\n \t\t}\n \t\tmemcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));\n \t\ttx_qconf[i].valid = true;\n-\t\totx2_nix_tx_queue_release(txq[i]);\n-\t\teth_dev->data->tx_queues[i] = NULL;\n+\t\totx2_nix_tx_queue_release(eth_dev, i);\n \t}\n \n \trxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;\n@@ -1391,8 +1388,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)\n \t\t}\n \t\tmemcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));\n \t\trx_qconf[i].valid = true;\n-\t\totx2_nix_rx_queue_release(rxq[i]);\n-\t\teth_dev->data->rx_queues[i] = NULL;\n+\t\totx2_nix_rx_queue_release(eth_dev, i);\n \t}\n \n \tdev->tx_qconf = tx_qconf;\n@@ -1412,8 +1408,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \tstruct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);\n \tstruct otx2_eth_qconf *tx_qconf = dev->tx_qconf;\n \tstruct otx2_eth_qconf *rx_qconf = dev->rx_qconf;\n-\tstruct otx2_eth_txq **txq;\n-\tstruct otx2_eth_rxq **rxq;\n \tint rc, i, nb_rxq, nb_txq;\n \n \tnb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);\n@@ -1450,9 +1444,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \t\t\t\t\t     &tx_qconf[i].conf.tx);\n \t\tif (rc) {\n \t\t\totx2_err(\"Failed to setup tx queue rc=%d\", rc);\n-\t\t\ttxq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;\n \t\t\tfor (i -= 1; i >= 0; i--)\n-\t\t\t\totx2_nix_tx_queue_release(txq[i]);\n+\t\t\t\totx2_nix_tx_queue_release(eth_dev, i);\n \t\t\tgoto fail;\n \t\t}\n \t}\n@@ -1468,9 +1461,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \t\t\t\t\t     rx_qconf[i].mempool);\n \t\tif (rc) {\n \t\t\totx2_err(\"Failed to setup rx queue rc=%d\", rc);\n-\t\t\trxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;\n \t\t\tfor (i -= 1; i >= 0; i--)\n-\t\t\t\totx2_nix_rx_queue_release(rxq[i]);\n+\t\t\t\totx2_nix_rx_queue_release(eth_dev, i);\n \t\t\tgoto release_tx_queues;\n \t\t}\n \t}\n@@ -1480,9 +1472,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)\n \treturn 0;\n \n release_tx_queues:\n-\ttxq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;\n \tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n-\t\totx2_nix_tx_queue_release(txq[i]);\n+\t\totx2_nix_tx_queue_release(eth_dev, i);\n fail:\n \tif (tx_qconf)\n \t\tfree(tx_qconf);\n@@ -2647,17 +2638,13 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)\n \tdev->ops = NULL;\n \n \t/* Free up SQs */\n-\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n-\t\totx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);\n-\t\teth_dev->data->tx_queues[i] = NULL;\n-\t}\n+\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++)\n+\t\totx2_nix_tx_queue_release(eth_dev, i);\n \teth_dev->data->nb_tx_queues = 0;\n \n \t/* Free up RQ's and CQ's */\n-\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n-\t\totx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);\n-\t\teth_dev->data->rx_queues[i] = NULL;\n-\t}\n+\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++)\n+\t\totx2_nix_rx_queue_release(eth_dev, i);\n \teth_dev->data->nb_rx_queues = 0;\n \n \t/* Free tm resources */\ndiff --git a/drivers/net/octeontx_ep/otx_ep_ethdev.c b/drivers/net/octeontx_ep/otx_ep_ethdev.c\nindex a243683d61..316927f28c 100644\n--- a/drivers/net/octeontx_ep/otx_ep_ethdev.c\n+++ b/drivers/net/octeontx_ep/otx_ep_ethdev.c\n@@ -248,16 +248,18 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n  * Release the receive queue/ringbuffer. Called by\n  * the upper layers.\n  *\n- * @param rxq\n- *    Opaque pointer to the receive queue to release\n+ * @param dev\n+ *    Pointer to the structure rte_eth_dev\n+ * @param q_no\n+ *    Queue number\n  *\n  * @return\n  *    - nothing\n  */\n static void\n-otx_ep_rx_queue_release(void *rxq)\n+otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n {\n-\tstruct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;\n+\tstruct otx_ep_droq *rq = dev->data->rx_queues[q_no];\n \tstruct otx_ep_device *otx_epvf = rq->otx_ep_dev;\n \tint q_id = rq->q_no;\n \n@@ -321,16 +323,18 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n  * Release the transmit queue/ringbuffer. Called by\n  * the upper layers.\n  *\n- * @param txq\n- *    Opaque pointer to the transmit queue to release\n+ * @param dev\n+ *    Pointer to the structure rte_eth_dev\n+ * @param q_no\n+ *    Queue number\n  *\n  * @return\n  *    - nothing\n  */\n static void\n-otx_ep_tx_queue_release(void *txq)\n+otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n {\n-\tstruct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;\n+\tstruct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];\n \n \totx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);\n }\ndiff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c\nindex a8774b7a43..815bf03e5a 100644\n--- a/drivers/net/pcap/pcap_ethdev.c\n+++ b/drivers/net/pcap/pcap_ethdev.c\n@@ -846,11 +846,6 @@ eth_dev_close(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n-static void\n-eth_queue_release(void *q __rte_unused)\n-{\n-}\n-\n static int\n eth_link_update(struct rte_eth_dev *dev __rte_unused,\n \t\tint wait_to_complete __rte_unused)\n@@ -995,8 +990,6 @@ static const struct eth_dev_ops ops = {\n \t.tx_queue_start = eth_tx_queue_start,\n \t.rx_queue_stop = eth_rx_queue_stop,\n \t.tx_queue_stop = eth_tx_queue_stop,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n \t.link_update = eth_link_update,\n \t.stats_get = eth_stats_get,\n \t.stats_reset = eth_stats_reset,\ndiff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c\nindex feec4d10a2..4c7f568bf4 100644\n--- a/drivers/net/pfe/pfe_ethdev.c\n+++ b/drivers/net/pfe/pfe_ethdev.c\n@@ -494,18 +494,6 @@ pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \treturn 0;\n }\n \n-static void\n-pfe_rx_queue_release(void *q __rte_unused)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-}\n-\n-static void\n-pfe_tx_queue_release(void *q __rte_unused)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-}\n-\n static int\n pfe_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t   uint16_t queue_idx,\n@@ -759,9 +747,7 @@ static const struct eth_dev_ops ops = {\n \t.dev_configure = pfe_eth_configure,\n \t.dev_infos_get = pfe_eth_info,\n \t.rx_queue_setup = pfe_rx_queue_setup,\n-\t.rx_queue_release  = pfe_rx_queue_release,\n \t.tx_queue_setup = pfe_tx_queue_setup,\n-\t.tx_queue_release  = pfe_tx_queue_release,\n \t.dev_supported_ptypes_get = pfe_supported_ptypes_get,\n \t.link_update  = pfe_eth_link_update,\n \t.promiscuous_enable   = pfe_promiscuous_enable,\ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex 323d46e6eb..b538a7d02e 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -2396,13 +2396,25 @@ qede_dev_reset(struct rte_eth_dev *dev)\n \treturn qede_eth_dev_init(dev);\n }\n \n+static void\n+qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tqede_rx_queue_release(dev->data->rx_queues[qid]);\n+}\n+\n+static void\n+qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\tqede_tx_queue_release(dev->data->tx_queues[qid]);\n+}\n+\n static const struct eth_dev_ops qede_eth_dev_ops = {\n \t.dev_configure = qede_dev_configure,\n \t.dev_infos_get = qede_dev_info_get,\n \t.rx_queue_setup = qede_rx_queue_setup,\n-\t.rx_queue_release = qede_rx_queue_release,\n+\t.rx_queue_release = qede_dev_rx_queue_release,\n \t.tx_queue_setup = qede_tx_queue_setup,\n-\t.tx_queue_release = qede_tx_queue_release,\n+\t.tx_queue_release = qede_dev_tx_queue_release,\n \t.dev_start = qede_dev_start,\n \t.dev_reset = qede_dev_reset,\n \t.dev_set_link_up = qede_dev_set_link_up,\n@@ -2444,9 +2456,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {\n \t.dev_configure = qede_dev_configure,\n \t.dev_infos_get = qede_dev_info_get,\n \t.rx_queue_setup = qede_rx_queue_setup,\n-\t.rx_queue_release = qede_rx_queue_release,\n+\t.rx_queue_release = qede_dev_rx_queue_release,\n \t.tx_queue_setup = qede_tx_queue_setup,\n-\t.tx_queue_release = qede_tx_queue_release,\n+\t.tx_queue_release = qede_dev_tx_queue_release,\n \t.dev_start = qede_dev_start,\n \t.dev_reset = qede_dev_reset,\n \t.dev_set_link_up = qede_dev_set_link_up,\ndiff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c\nindex 1faf38a714..0440019e07 100644\n--- a/drivers/net/ring/rte_eth_ring.c\n+++ b/drivers/net/ring/rte_eth_ring.c\n@@ -225,8 +225,6 @@ eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,\n \treturn 0;\n }\n \n-static void\n-eth_queue_release(void *q __rte_unused) { ; }\n static int\n eth_link_update(struct rte_eth_dev *dev __rte_unused,\n \t\tint wait_to_complete __rte_unused) { return 0; }\n@@ -272,8 +270,6 @@ static const struct eth_dev_ops ops = {\n \t.dev_infos_get = eth_dev_info,\n \t.rx_queue_setup = eth_rx_queue_setup,\n \t.tx_queue_setup = eth_tx_queue_setup,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n \t.link_update = eth_link_update,\n \t.stats_get = eth_stats_get,\n \t.stats_reset = eth_stats_reset,\ndiff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c\nindex 2db0d000c3..2f85925b7b 100644\n--- a/drivers/net/sfc/sfc_ethdev.c\n+++ b/drivers/net/sfc/sfc_ethdev.c\n@@ -504,9 +504,9 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,\n }\n \n static void\n-sfc_rx_queue_release(void *queue)\n+sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct sfc_dp_rxq *dp_rxq = queue;\n+\tstruct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];\n \tstruct sfc_rxq *rxq;\n \tstruct sfc_adapter *sa;\n \tsfc_sw_index_t sw_index;\n@@ -561,9 +561,9 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,\n }\n \n static void\n-sfc_tx_queue_release(void *queue)\n+sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct sfc_dp_txq *dp_txq = queue;\n+\tstruct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];\n \tstruct sfc_txq *txq;\n \tsfc_sw_index_t sw_index;\n \tstruct sfc_adapter *sa;\ndiff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c\nindex 7416a6b1b8..76977f3757 100644\n--- a/drivers/net/szedata2/rte_eth_szedata2.c\n+++ b/drivers/net/szedata2/rte_eth_szedata2.c\n@@ -1143,26 +1143,28 @@ eth_stats_reset(struct rte_eth_dev *dev)\n }\n \n static void\n-eth_rx_queue_release(void *q)\n+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;\n+\tstruct szedata2_rx_queue *rxq = dev->data->rx_queues[qid];\n \n \tif (rxq != NULL) {\n \t\tif (rxq->sze != NULL)\n \t\t\tszedata_close(rxq->sze);\n \t\trte_free(rxq);\n+\t\tdev->data->rx_queues[qid] = NULL;\n \t}\n }\n \n static void\n-eth_tx_queue_release(void *q)\n+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;\n+\tstruct szedata2_tx_queue *txq = dev->data->tx_queues[qid];\n \n \tif (txq != NULL) {\n \t\tif (txq->sze != NULL)\n \t\t\tszedata_close(txq->sze);\n \t\trte_free(txq);\n+\t\tdev->data->tx_queues[i] = NULL;\n \t}\n }\n \n@@ -1182,15 +1184,11 @@ eth_dev_close(struct rte_eth_dev *dev)\n \n \tfree(internals->sze_dev_path);\n \n-\tfor (i = 0; i < nb_rx; i++) {\n-\t\teth_rx_queue_release(dev->data->rx_queues[i]);\n-\t\tdev->data->rx_queues[i] = NULL;\n-\t}\n+\tfor (i = 0; i < nb_rx; i++)\n+\t\teth_rx_queue_release(dev, i);\n \tdev->data->nb_rx_queues = 0;\n-\tfor (i = 0; i < nb_tx; i++) {\n-\t\teth_tx_queue_release(dev->data->tx_queues[i]);\n-\t\tdev->data->tx_queues[i] = NULL;\n-\t}\n+\tfor (i = 0; i < nb_tx; i++)\n+\t\teth_tx_queue_release(dev, i);\n \tdev->data->nb_tx_queues = 0;\n \n \treturn ret;\n@@ -1244,10 +1242,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tif (dev->data->rx_queues[rx_queue_id] != NULL) {\n-\t\teth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);\n-\t\tdev->data->rx_queues[rx_queue_id] = NULL;\n-\t}\n+\tif (dev->data->rx_queues[rx_queue_id] != NULL)\n+\t\teth_rx_queue_release(dev, rx_queue_id);\n \n \trxq = rte_zmalloc_socket(\"szedata2 rx queue\",\n \t\t\tsizeof(struct szedata2_rx_queue),\n@@ -1259,18 +1255,20 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \trxq->priv = internals;\n+\tdev->data->rx_queues[rx_queue_id] = rxq;\n+\n \trxq->sze = szedata_open(internals->sze_dev_path);\n \tif (rxq->sze == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"szedata_open() failed for rx queue id \"\n \t\t\t\t\"%\" PRIu16 \"!\", rx_queue_id);\n-\t\teth_rx_queue_release(rxq);\n+\t\teth_rx_queue_release(dev, rx_queue_id);\n \t\treturn -EINVAL;\n \t}\n \tret = szedata_subscribe3(rxq->sze, &rx, &tx);\n \tif (ret != 0 || rx == 0) {\n \t\tPMD_INIT_LOG(ERR, \"szedata_subscribe3() failed for rx queue id \"\n \t\t\t\t\"%\" PRIu16 \"!\", rx_queue_id);\n-\t\teth_rx_queue_release(rxq);\n+\t\teth_rx_queue_release(dev, rx_queue_id);\n \t\treturn -EINVAL;\n \t}\n \trxq->rx_channel = rx_channel;\n@@ -1281,8 +1279,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->rx_bytes = 0;\n \trxq->err_pkts = 0;\n \n-\tdev->data->rx_queues[rx_queue_id] = rxq;\n-\n \tPMD_INIT_LOG(DEBUG, \"Configured rx queue id %\" PRIu16 \" on socket \"\n \t\t\t\"%u (channel id %u).\", rxq->qid, socket_id,\n \t\t\trxq->rx_channel);\n@@ -1306,10 +1302,8 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tif (dev->data->tx_queues[tx_queue_id] != NULL) {\n-\t\teth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);\n-\t\tdev->data->tx_queues[tx_queue_id] = NULL;\n-\t}\n+\tif (dev->data->tx_queues[tx_queue_id] != NULL)\n+\t\teth_tx_queue_release(dev, tx_queue_id);\n \n \ttxq = rte_zmalloc_socket(\"szedata2 tx queue\",\n \t\t\tsizeof(struct szedata2_tx_queue),\n@@ -1321,18 +1315,20 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \ttxq->priv = internals;\n+\tdev->data->tx_queues[tx_queue_id] = txq;\n+\n \ttxq->sze = szedata_open(internals->sze_dev_path);\n \tif (txq->sze == NULL) {\n \t\tPMD_INIT_LOG(ERR, \"szedata_open() failed for tx queue id \"\n \t\t\t\t\"%\" PRIu16 \"!\", tx_queue_id);\n-\t\teth_tx_queue_release(txq);\n+\t\teth_tx_queue_release(dev, tx_queue_id);\n \t\treturn -EINVAL;\n \t}\n \tret = szedata_subscribe3(txq->sze, &rx, &tx);\n \tif (ret != 0 || tx == 0) {\n \t\tPMD_INIT_LOG(ERR, \"szedata_subscribe3() failed for tx queue id \"\n \t\t\t\t\"%\" PRIu16 \"!\", tx_queue_id);\n-\t\teth_tx_queue_release(txq);\n+\t\teth_tx_queue_release(dev, tx_queue_id);\n \t\treturn -EINVAL;\n \t}\n \ttxq->tx_channel = tx_channel;\n@@ -1341,8 +1337,6 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->tx_bytes = 0;\n \ttxq->err_pkts = 0;\n \n-\tdev->data->tx_queues[tx_queue_id] = txq;\n-\n \tPMD_INIT_LOG(DEBUG, \"Configured tx queue id %\" PRIu16 \" on socket \"\n \t\t\t\"%u (channel id %u).\", txq->qid, socket_id,\n \t\t\ttxq->tx_channel);\ndiff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c\nindex c515de3bf7..046f17669d 100644\n--- a/drivers/net/tap/rte_eth_tap.c\n+++ b/drivers/net/tap/rte_eth_tap.c\n@@ -1151,9 +1151,9 @@ tap_dev_close(struct rte_eth_dev *dev)\n }\n \n static void\n-tap_rx_queue_release(void *queue)\n+tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct rx_queue *rxq = queue;\n+\tstruct rx_queue *rxq = dev->data->rx_queues[qid];\n \tstruct pmd_process_private *process_private;\n \n \tif (!rxq)\n@@ -1170,9 +1170,9 @@ tap_rx_queue_release(void *queue)\n }\n \n static void\n-tap_tx_queue_release(void *queue)\n+tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct tx_queue *txq = queue;\n+\tstruct tx_queue *txq = dev->data->tx_queues[qid];\n \tstruct pmd_process_private *process_private;\n \n \tif (!txq)\ndiff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c\nindex fc1844ddfc..db5ffe5965 100644\n--- a/drivers/net/thunderx/nicvf_ethdev.c\n+++ b/drivers/net/thunderx/nicvf_ethdev.c\n@@ -858,13 +858,12 @@ nicvf_configure_rss_reta(struct rte_eth_dev *dev)\n }\n \n static void\n-nicvf_dev_tx_queue_release(void *sq)\n+nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tstruct nicvf_txq *txq;\n+\tstruct nicvf_txq *txq = dev->data->tx_queues[qid];\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\ttxq = (struct nicvf_txq *)sq;\n \tif (txq) {\n \t\tif (txq->txbuffs != NULL) {\n \t\t\tnicvf_tx_queue_release_mbufs(txq);\n@@ -872,6 +871,7 @@ nicvf_dev_tx_queue_release(void *sq)\n \t\t\ttxq->txbuffs = NULL;\n \t\t}\n \t\trte_free(txq);\n+\t\tdev->data->tx_queues[qid] = NULL;\n \t}\n }\n \n@@ -985,8 +985,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \tif (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {\n \t\tPMD_TX_LOG(DEBUG, \"Freeing memory prior to re-allocation %d\",\n \t\t\t\tnicvf_netdev_qidx(nic, qidx));\n-\t\tnicvf_dev_tx_queue_release(\n-\t\t\tdev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);\n+\t\tnicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));\n \t\tdev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;\n \t}\n \n@@ -1020,19 +1019,21 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \t\ttxq->pool_free = nicvf_single_pool_free_xmited_buffers;\n \t}\n \n+\tdev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;\n+\n \t/* Allocate software ring */\n \ttxq->txbuffs = rte_zmalloc_socket(\"txq->txbuffs\",\n \t\t\t\tnb_desc * sizeof(struct rte_mbuf *),\n \t\t\t\tRTE_CACHE_LINE_SIZE, nic->node);\n \n \tif (txq->txbuffs == NULL) {\n-\t\tnicvf_dev_tx_queue_release(txq);\n+\t\tnicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));\n \t\treturn -ENOMEM;\n \t}\n \n \tif (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to allocate mem for sq %d\", qidx);\n-\t\tnicvf_dev_tx_queue_release(txq);\n+\t\tnicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1043,7 +1044,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \t\t\tnicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,\n \t\t\ttxq->phys, txq->offloads);\n \n-\tdev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;\n \tdev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =\n \t\tRTE_ETH_QUEUE_STATE_STOPPED;\n \treturn 0;\n@@ -1161,11 +1161,11 @@ nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,\n }\n \n static void\n-nicvf_dev_rx_queue_release(void *rx_queue)\n+nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n \tPMD_INIT_FUNC_TRACE();\n \n-\trte_free(rx_queue);\n+\trte_free(dev->data->rx_queues[qid]);\n }\n \n static int\n@@ -1336,8 +1336,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \tif (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {\n \t\tPMD_RX_LOG(DEBUG, \"Freeing memory prior to re-allocation %d\",\n \t\t\t\tnicvf_netdev_qidx(nic, qidx));\n-\t\tnicvf_dev_rx_queue_release(\n-\t\t\tdev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);\n+\t\tnicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));\n \t\tdev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;\n \t}\n \n@@ -1365,12 +1364,14 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \telse\n \t\trxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;\n \n+\tdev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;\n+\n \tnicvf_rxq_mbuf_setup(rxq);\n \n \t/* Alloc completion queue */\n \tif (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {\n \t\tPMD_INIT_LOG(ERR, \"failed to allocate cq %u\", rxq->queue_id);\n-\t\tnicvf_dev_rx_queue_release(rxq);\n+\t\tnicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));\n \t\treturn -ENOMEM;\n \t}\n \n@@ -1382,7 +1383,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,\n \t\t\tnicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,\n \t\t\trte_mempool_avail_count(mp), rxq->phys, offloads);\n \n-\tdev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;\n \tdev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =\n \t\tRTE_ETH_QUEUE_STATE_STOPPED;\n \treturn 0;\ndiff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h\nindex 3021933965..d979b12027 100644\n--- a/drivers/net/txgbe/txgbe_ethdev.h\n+++ b/drivers/net/txgbe/txgbe_ethdev.h\n@@ -433,9 +433,9 @@ void txgbe_dev_clear_queues(struct rte_eth_dev *dev);\n \n void txgbe_dev_free_queues(struct rte_eth_dev *dev);\n \n-void txgbe_dev_rx_queue_release(void *rxq);\n+void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n-void txgbe_dev_tx_queue_release(void *txq);\n+void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int  txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n \t\tuint16_t nb_rx_desc, unsigned int socket_id,\ndiff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c\nindex 1a261287d1..b6339fe50b 100644\n--- a/drivers/net/txgbe/txgbe_rxtx.c\n+++ b/drivers/net/txgbe/txgbe_rxtx.c\n@@ -2109,9 +2109,9 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq)\n }\n \n void __rte_cold\n-txgbe_dev_tx_queue_release(void *txq)\n+txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\ttxgbe_tx_queue_release(txq);\n+\ttxgbe_tx_queue_release(dev->data->tx_queues[qid]);\n }\n \n /* (Re)set dynamic txgbe_tx_queue fields to defaults */\n@@ -2437,9 +2437,9 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)\n }\n \n void __rte_cold\n-txgbe_dev_rx_queue_release(void *rxq)\n+txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\ttxgbe_rx_queue_release(rxq);\n+\ttxgbe_rx_queue_release(dev->data->rx_queues[qid]);\n }\n \n /*\n@@ -2795,13 +2795,13 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\ttxgbe_dev_rx_queue_release(dev->data->rx_queues[i]);\n+\t\ttxgbe_dev_rx_queue_release(dev, i);\n \t\tdev->data->rx_queues[i] = NULL;\n \t}\n \tdev->data->nb_rx_queues = 0;\n \n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\ttxgbe_dev_tx_queue_release(dev->data->tx_queues[i]);\n+\t\ttxgbe_dev_tx_queue_release(dev, i);\n \t\tdev->data->tx_queues[i] = NULL;\n \t}\n \tdev->data->nb_tx_queues = 0;\ndiff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c\nindex a202931e9a..2e24e5f7ff 100644\n--- a/drivers/net/vhost/rte_eth_vhost.c\n+++ b/drivers/net/vhost/rte_eth_vhost.c\n@@ -1346,9 +1346,15 @@ eth_stats_reset(struct rte_eth_dev *dev)\n }\n \n static void\n-eth_queue_release(void *q)\n+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\trte_free(q);\n+\trte_free(dev->data->rx_queues[qid]);\n+}\n+\n+static void\n+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n+{\n+\trte_free(dev->data->tx_queues[qid]);\n }\n \n static int\n@@ -1388,8 +1394,8 @@ static const struct eth_dev_ops ops = {\n \t.dev_infos_get = eth_dev_info,\n \t.rx_queue_setup = eth_rx_queue_setup,\n \t.tx_queue_setup = eth_tx_queue_setup,\n-\t.rx_queue_release = eth_queue_release,\n-\t.tx_queue_release = eth_queue_release,\n+\t.rx_queue_release = eth_rx_queue_release,\n+\t.tx_queue_release = eth_tx_queue_release,\n \t.tx_done_cleanup = eth_tx_done_cleanup,\n \t.link_update = eth_link_update,\n \t.stats_get = eth_stats_get,\ndiff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c\nindex e58085a2c9..f300b6e3b0 100644\n--- a/drivers/net/virtio/virtio_ethdev.c\n+++ b/drivers/net/virtio/virtio_ethdev.c\n@@ -369,12 +369,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)\n \treturn 0;\n }\n \n-static void\n-virtio_dev_queue_release(void *queue __rte_unused)\n-{\n-\t/* do nothing */\n-}\n-\n static uint16_t\n virtio_get_nr_vq(struct virtio_hw *hw)\n {\n@@ -966,9 +960,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {\n \t.rx_queue_setup          = virtio_dev_rx_queue_setup,\n \t.rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,\n \t.rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,\n-\t.rx_queue_release        = virtio_dev_queue_release,\n \t.tx_queue_setup          = virtio_dev_tx_queue_setup,\n-\t.tx_queue_release        = virtio_dev_queue_release,\n \t/* collect stats per queue */\n \t.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,\n \t.vlan_filter_set         = virtio_vlan_filter_set,\ndiff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c\nindex 1a3291273a..3d60fd1841 100644\n--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c\n+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c\n@@ -1058,18 +1058,12 @@ vmxnet3_free_queues(struct rte_eth_dev *dev)\n \n \tPMD_INIT_FUNC_TRACE();\n \n-\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n-\t\tvoid *rxq = dev->data->rx_queues[i];\n-\n-\t\tvmxnet3_dev_rx_queue_release(rxq);\n-\t}\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n+\t\tvmxnet3_dev_rx_queue_release(dev, i);\n \tdev->data->nb_rx_queues = 0;\n \n-\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n-\t\tvoid *txq = dev->data->tx_queues[i];\n-\n-\t\tvmxnet3_dev_tx_queue_release(txq);\n-\t}\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++)\n+\t\tvmxnet3_dev_tx_queue_release(dev, i);\n \tdev->data->nb_tx_queues = 0;\n }\n \ndiff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h\nindex 59bee9723c..8950175460 100644\n--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h\n+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h\n@@ -182,8 +182,8 @@ vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)\n \n void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);\n \n-void vmxnet3_dev_rx_queue_release(void *rxq);\n-void vmxnet3_dev_tx_queue_release(void *txq);\n+void vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n+void vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int vmxnet3_v4_rss_configure(struct rte_eth_dev *dev);\n \ndiff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c\nindex 5cf53d4de8..b01c4c01f9 100644\n--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c\n+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c\n@@ -165,9 +165,9 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)\n }\n \n void\n-vmxnet3_dev_tx_queue_release(void *txq)\n+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n-\tvmxnet3_tx_queue_t *tq = txq;\n+\tvmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];\n \n \tif (tq != NULL) {\n \t\t/* Release mbufs */\n@@ -182,10 +182,10 @@ vmxnet3_dev_tx_queue_release(void *txq)\n }\n \n void\n-vmxnet3_dev_rx_queue_release(void *rxq)\n+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n \tint i;\n-\tvmxnet3_rx_queue_t *rq = rxq;\n+\tvmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];\n \n \tif (rq != NULL) {\n \t\t/* Release mbufs */\ndiff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h\nindex 40e474aa7e..524757cf6f 100644\n--- a/lib/ethdev/ethdev_driver.h\n+++ b/lib/ethdev/ethdev_driver.h\n@@ -282,7 +282,8 @@ typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,\n \t\t\t\t    uint16_t rx_queue_id);\n /**< @internal Disable interrupt of a receive queue of an Ethernet device. */\n \n-typedef void (*eth_queue_release_t)(void *queue);\n+typedef void (*eth_queue_release_t)(struct rte_eth_dev *dev,\n+\t\t\t\t    uint16_t rx_queue_id);\n /**< @internal Release memory resources allocated by given RX/TX queue. */\n \n typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,\ndiff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c\nindex 9d95cd11e1..193f0d8295 100644\n--- a/lib/ethdev/rte_ethdev.c\n+++ b/lib/ethdev/rte_ethdev.c\n@@ -906,12 +906,10 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)\n \t\t\treturn -(ENOMEM);\n \t\t}\n \t} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);\n-\n+\t\tif (dev->dev_ops->rx_queue_release != NULL)\n+\t\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t\t(*dev->dev_ops->rx_queue_release)(dev, i);\n \t\trxq = dev->data->rx_queues;\n-\n-\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\t(*dev->dev_ops->rx_queue_release)(rxq[i]);\n \t\trxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,\n \t\t\t\tRTE_CACHE_LINE_SIZE);\n \t\tif (rxq == NULL)\n@@ -926,12 +924,10 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)\n \t\tdev->data->rx_queues = rxq;\n \n \t} else if (dev->data->rx_queues != NULL && nb_queues == 0) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);\n-\n-\t\trxq = dev->data->rx_queues;\n \n-\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\t(*dev->dev_ops->rx_queue_release)(rxq[i]);\n+\t\tif (dev->dev_ops->rx_queue_release != NULL)\n+\t\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t\t(*dev->dev_ops->rx_queue_release)(dev, i);\n \n \t\trte_free(dev->data->rx_queues);\n \t\tdev->data->rx_queues = NULL;\n@@ -1146,12 +1142,11 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)\n \t\t\treturn -(ENOMEM);\n \t\t}\n \t} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);\n-\n \t\ttxq = dev->data->tx_queues;\n \n-\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\t(*dev->dev_ops->tx_queue_release)(txq[i]);\n+\t\tif (dev->dev_ops->tx_queue_release != NULL)\n+\t\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t\t(*dev->dev_ops->tx_queue_release)(dev, i);\n \t\ttxq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,\n \t\t\t\t  RTE_CACHE_LINE_SIZE);\n \t\tif (txq == NULL)\n@@ -1166,12 +1161,11 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)\n \t\tdev->data->tx_queues = txq;\n \n \t} else if (dev->data->tx_queues != NULL && nb_queues == 0) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);\n-\n \t\ttxq = dev->data->tx_queues;\n \n-\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n-\t\t\t(*dev->dev_ops->tx_queue_release)(txq[i]);\n+\t\tif (dev->dev_ops->tx_queue_release != NULL)\n+\t\t\tfor (i = nb_queues; i < old_nb_queues; i++)\n+\t\t\t\t(*dev->dev_ops->tx_queue_release)(dev, i);\n \n \t\trte_free(dev->data->tx_queues);\n \t\tdev->data->tx_queues = NULL;\n@@ -2113,9 +2107,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,\n \n \trxq = dev->data->rx_queues;\n \tif (rxq[rx_queue_id]) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,\n-\t\t\t\t\t-ENOTSUP);\n-\t\t(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);\n+\t\tif (dev->dev_ops->rx_queue_release != NULL)\n+\t\t\t(*dev->dev_ops->rx_queue_release)(dev, rx_queue_id);\n \t\trxq[rx_queue_id] = NULL;\n \t}\n \n@@ -2249,9 +2242,8 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,\n \t\treturn -EBUSY;\n \trxq = dev->data->rx_queues;\n \tif (rxq[rx_queue_id] != NULL) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,\n-\t\t\t\t\t-ENOTSUP);\n-\t\t(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);\n+\t\tif (dev->dev_ops->rx_queue_release != NULL)\n+\t\t\t(*dev->dev_ops->rx_queue_release)(dev, rx_queue_id);\n \t\trxq[rx_queue_id] = NULL;\n \t}\n \tret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,\n@@ -2317,9 +2309,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,\n \n \ttxq = dev->data->tx_queues;\n \tif (txq[tx_queue_id]) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,\n-\t\t\t\t\t-ENOTSUP);\n-\t\t(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);\n+\t\tif (dev->dev_ops->tx_queue_release != NULL)\n+\t\t\t(*dev->dev_ops->tx_queue_release)(dev, tx_queue_id);\n \t\ttxq[tx_queue_id] = NULL;\n \t}\n \n@@ -2429,9 +2420,8 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,\n \t\treturn -EBUSY;\n \ttxq = dev->data->tx_queues;\n \tif (txq[tx_queue_id] != NULL) {\n-\t\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,\n-\t\t\t\t\t-ENOTSUP);\n-\t\t(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);\n+\t\tif (dev->dev_ops->tx_queue_release != NULL)\n+\t\t\t(*dev->dev_ops->tx_queue_release)(dev, tx_queue_id);\n \t\ttxq[tx_queue_id] = NULL;\n \t}\n \tret = (*dev->dev_ops->tx_hairpin_queue_setup)\n",
    "prefixes": [
        "v1"
    ]
}