get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/107295/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 107295,
    "url": "http://patches.dpdk.org/api/patches/107295/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220210162926.20436-11-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220210162926.20436-11-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220210162926.20436-11-suanmingm@nvidia.com",
    "date": "2022-02-10T16:29:23",
    "name": "[10/13] net/mlx5: add queue and RSS action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fdfcd0b7d1bbb6dea33307a444c64b92c51db201",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220210162926.20436-11-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 21609,
            "url": "http://patches.dpdk.org/api/series/21609/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21609",
            "date": "2022-02-10T16:29:13",
            "name": "net/mlx5: add hardware steering",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/21609/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/107295/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/107295/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 299CAA00BE;\n\tThu, 10 Feb 2022 17:31:15 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 75BF5411FB;\n\tThu, 10 Feb 2022 17:30:14 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2071.outbound.protection.outlook.com [40.107.243.71])\n by mails.dpdk.org (Postfix) with ESMTP id 54C4641176\n for <dev@dpdk.org>; Thu, 10 Feb 2022 17:30:13 +0100 (CET)",
            "from CO2PR04CA0139.namprd04.prod.outlook.com (2603:10b6:104::17) by\n BY5PR12MB4241.namprd12.prod.outlook.com (2603:10b6:a03:20c::9) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.4951.11; Thu, 10 Feb 2022 16:30:09 +0000",
            "from CO1NAM11FT055.eop-nam11.prod.protection.outlook.com\n (2603:10b6:104:0:cafe::a9) by CO2PR04CA0139.outlook.office365.com\n (2603:10b6:104::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4951.17 via Frontend\n Transport; Thu, 10 Feb 2022 16:30:09 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n CO1NAM11FT055.mail.protection.outlook.com (10.13.175.129) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4975.11 via Frontend Transport; Thu, 10 Feb 2022 16:30:09 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 10 Feb 2022 16:30:05 +0000",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.9; Thu, 10 Feb 2022\n 08:30:03 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=K8Y2BuRExaKyMNnJ1ZI3NTefb4nrkH2YbnTNo5vb0BmDxiuvyQfINzywQfi7l6M4oT/j+R9mwKMLDbI1JmN81FQxsiMFavaEvpGtFP99iZwR0ySXj0NVAmJQM06XTWb8yIeXNYbUmXxbQIMgGqkXbVBCinVOzp4j8reoRt3PjokqaW8Ur9xm23mVjtyfNevw9eDWfdprkowUFHYUpaFg8a3gvuqJ6iSHMRjWZLgeVDi2rr9fUrV0r+qtvQJdgYmagjWbdiYxF87C13eVHaA83+5cUXs0CapI+mX7Cr0XrMADSG2h55O5stXxvYuZe6QtqG7sanj7bv05plB/wVU3MA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=rF/tLQ4/rSekHBpHHZnC9lwfvGbzJOi0iTNyzK713AU=;\n b=L53Zz+56nJBZfXTuC4Ddzpfn0KfFDjtss9/nD/oXrZrrOKgDzNZApDAWSZN8ddt4C1yQmvoHJ1kQuZtyLoIxs6rfGQU527POIkVWxrSsRVtKjQ6/EvHJ+ok8UD9D910nnuqQVGFAVR2AFEhbBsaSWeIDbHweRDG4Wykbljp+saIcXpVI3Ar78huOFB1rpiDEiTvwH0jxRyot9uYpz8bqL4fcTp9VtQ25BweLvZrPt4VugqYsLr6ji3eZKA+8ZkmdhulX9cLEnFt4DH458AM4NkR3yHWcQ6Vl79eTKBIHRy3eySnw26h1mBj4rQ9uq0UJOfn/l0PmivawPhiCR9jAAg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=rF/tLQ4/rSekHBpHHZnC9lwfvGbzJOi0iTNyzK713AU=;\n b=qGi0gE5tEp1nAvVSIAsW0sZod1pqLlSVmmCf04tAICHGCsl2Zq4PH6No1dkZco/cRcBgHFlqG6QQ1h1y42rOwGcjk6tJIuL1s6mIPVizlRFBVpnlD+Ia602mXwaaOpJiaTf6dTf38e/eTgkggA52v5OoEcKqKLfvwxUcR8ONvaZx+MtHKtvSF9gxE+oJ/h5FHfMQMd7pL7dyuI97WQ8AmCIh4dEDXGxCe19w4hI2bow90VItz21FYGCNuAxkT/UyZQy3qzZLdZBgxGvMM5AfrU04xv2nRfK526S5BKUpegtmjoLRtsCySZWgJvCk0WOFbcYzFwncaqvbg2AYF4y5NQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH 10/13] net/mlx5: add queue and RSS action",
        "Date": "Thu, 10 Feb 2022 18:29:23 +0200",
        "Message-ID": "<20220210162926.20436-11-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220210162926.20436-1-suanmingm@nvidia.com>",
        "References": "<20220210162926.20436-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e1656433-a91f-4ff0-5ee1-08d9ecb29ac9",
        "X-MS-TrafficTypeDiagnostic": "BY5PR12MB4241:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BY5PR12MB42411F03F3F66B8FFBB1954EC12F9@BY5PR12MB4241.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:758;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n NmQmxbnC06HmWpMkbyJ8znOHfqZL2PIDQAkrjtarCfrw7lIkNMs/YiifVgA0lAV4tGn4q1BtinTRNo/MiVMVv3XqK3zbiL3+0DbUDW/rV4wuHEvLgt4EIL3R699oC9+xc9gH3teZMCuFYJd/DeNfqrr/Wr6VFqesoEdS3FhGUse7BtW+xjicR0TO0hGTCw08msapH/ft7pfTDiqbdskoGvQ3QfF3zjO1i7MP4SCBikPX0GsAYYQDbMRkMFi162uyM7+uBJLWYEn3wnRYiNYLrxaSfEKuRxhG7NLZHOMZw0nVTxGqlE+zkzaHuqB1vDllbNU/+l+ZnmEXrNmOkbaBDgi3r9yfKehFBsYQ54AY00Pfl7x9+70i1CmdRSoFuTvIvegsG+0gWAuHJAHVCKVvhi8/8sYNY97Mhl4VkitycUcYWKic/gtJ1yu8j3E8KnlLV6HNQmFGwg93kiKey8NFhona7mnR8QTARrjVNYa8t0p+pS8ifDjyX+bV+0Q8yVWoIKvNdQvWeSfMaPwrYR0uovXhlvhyOYYcMaJSc2lmPM1Q4QO41fejAqgaSrmidi3wXS0PiFuTAdOKtd3T0bwY5X9HiGDlAcos9DKPVVOygpr1gTCQNCap1bBcA0rpZjb+dvbwI5BXiGfxz1jWL+znWuwp3Bj+zCLSZCBG25IZ2TbhHvdhudrBcgPUcbeGMIkpFFVQl+aElYxRsYwUoMQ2a7j/3iOb6oDdlm92NZ6ABmw=",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(46966006)(40470700004)(36840700001)(508600001)(36860700001)(316002)(40460700003)(6636002)(36756003)(7696005)(6666004)(110136005)(8936002)(5660300002)(70586007)(70206006)(8676002)(4326008)(2906002)(30864003)(82310400004)(26005)(81166007)(426003)(6286002)(186003)(16526019)(55016003)(336012)(356005)(83380400001)(54906003)(86362001)(47076005)(2616005)(1076003)(36900700001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "10 Feb 2022 16:30:09.3451 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e1656433-a91f-4ff0-5ee1-08d9ecb29ac9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT055.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB4241",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This commit adds the queue and RSS action. Similar to the jump action,\ndynamic ones will be added to the action construct list.\n\nDue to the queue and RSS action in template should not be destroyed\nduring port restart, the actions are created with standalone indirect\ntable as indirect action does. When port stops, detaches the indirect\ntable from action, when port starts, attaches the indirect table back\nto the action.\n\nOne more change is made to accelerate the action creation. Currently\nthe mlx5_hrxq_get() function returns the object index instead of object\npointer. This introduced an extra converting the index to the object by\ncalling mlx5_ipool_get() in most of the case. And that extra converting\nhurts multi-thread performance since mlx5_ipool_get() uses the global\nlock inside. As the hash Rx queue object itself also contains the index,\nreturns the object directly will achieve better performance without the\nglobal lock.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c   |  18 ++--\n drivers/net/mlx5/mlx5.h            |   4 +\n drivers/net/mlx5/mlx5_devx.c       |  10 ++\n drivers/net/mlx5/mlx5_flow.c       |  38 +++-----\n drivers/net/mlx5/mlx5_flow.h       |   7 ++\n drivers/net/mlx5/mlx5_flow_dv.c    | 150 ++++++++++++++---------------\n drivers/net/mlx5/mlx5_flow_hw.c    | 101 +++++++++++++++++++\n drivers/net/mlx5/mlx5_flow_verbs.c |   7 +-\n drivers/net/mlx5/mlx5_rx.h         |   9 +-\n drivers/net/mlx5/mlx5_rxq.c        |  78 +++++++++------\n 10 files changed, 271 insertions(+), 151 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 52e52a4ad7..8f0b15aad0 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1714,6 +1714,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tpriv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);\n \tif (!priv->drop_queue.hrxq)\n \t\tgoto error;\n+\tpriv->hrxqs = mlx5_list_create(\"hrxq\", eth_dev, true,\n+\t\t\t\t       mlx5_hrxq_create_cb,\n+\t\t\t\t       mlx5_hrxq_match_cb,\n+\t\t\t\t       mlx5_hrxq_remove_cb,\n+\t\t\t\t       mlx5_hrxq_clone_cb,\n+\t\t\t\t       mlx5_hrxq_clone_free_cb);\n+\tif (!priv->hrxqs)\n+\t\tgoto error;\n+\trte_rwlock_init(&priv->ind_tbls_lock);\n \tif (priv->config.dv_flow_en == 2)\n \t\treturn eth_dev;\n \t/* Port representor shares the same max priority with pf port. */\n@@ -1744,15 +1753,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\terr = ENOTSUP;\n \t\t\tgoto error;\n \t}\n-\tpriv->hrxqs = mlx5_list_create(\"hrxq\", eth_dev, true,\n-\t\t\t\t       mlx5_hrxq_create_cb,\n-\t\t\t\t       mlx5_hrxq_match_cb,\n-\t\t\t\t       mlx5_hrxq_remove_cb,\n-\t\t\t\t       mlx5_hrxq_clone_cb,\n-\t\t\t\t       mlx5_hrxq_clone_free_cb);\n-\tif (!priv->hrxqs)\n-\t\tgoto error;\n-\trte_rwlock_init(&priv->ind_tbls_lock);\n \t/* Query availability of metadata reg_c's. */\n \tif (!priv->sh->metadata_regc_check_flag) {\n \t\terr = mlx5_flow_discover_mreg_c(eth_dev);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0bc9897101..6fb82bf1f3 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1286,6 +1286,7 @@ struct mlx5_flow_rss_desc {\n \tuint64_t hash_fields; /* Verbs Hash fields. */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n \tuint32_t key_len; /**< RSS hash key len. */\n+\tuint32_t hws_flags; /**< HW steering action. */\n \tuint32_t tunnel; /**< Queue in tunnel. */\n \tuint32_t shared_rss; /**< Shared RSS index. */\n \tstruct mlx5_ind_table_obj *ind_tbl;\n@@ -1347,6 +1348,7 @@ struct mlx5_hrxq {\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \tvoid *action; /* DV QP action pointer. */\n #endif\n+\tuint32_t hws_flags; /* Hw steering flags. */\n \tuint64_t hash_fields; /* Verbs Hash fields. */\n \tuint32_t rss_key_len; /* Hash key length in bytes. */\n \tuint32_t idx; /* Hash Rx queue index. */\n@@ -1477,6 +1479,8 @@ struct mlx5_priv {\n \tLIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */\n \t/* Indirection tables. */\n \tLIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;\n+\t/* Standalone indirect tables. */\n+\tLIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;\n \t/* Pointer to next element. */\n \trte_rwlock_t ind_tbls_lock;\n \tuint32_t refcnt; /**< Reference counter. */\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 91243f684f..af131bcd1b 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -807,6 +807,14 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t\tgoto error;\n \t}\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+\tif (hrxq->hws_flags) {\n+\t\thrxq->action = mlx5dr_action_create_dest_tir\n+\t\t\t(priv->dr_ctx,\n+\t\t\t (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);\n+\t\tif (!hrxq->action)\n+\t\t\tgoto error;\n+\t\treturn 0;\n+\t}\n \tif (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,\n \t\t\t\t\t\t\t  &hrxq->action)) {\n \t\trte_errno = errno;\n@@ -1042,6 +1050,8 @@ mlx5_devx_drop_action_create(struct rte_eth_dev *dev)\n \t\tDRV_LOG(ERR, \"Cannot create drop RX queue\");\n \t\treturn ret;\n \t}\n+\tif (priv->config.dv_flow_en == 2)\n+\t\treturn 0;\n \t/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */\n \tret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);\n \tif (ret != 0) {\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 9ac96ac979..9cad84ebc6 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -9302,14 +9302,10 @@ int\n mlx5_action_handle_attach(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_indexed_pool *ipool =\n-\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n-\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n \tint ret = 0;\n-\tuint32_t idx;\n+\tstruct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;\n \n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tconst char *message;\n \t\tuint32_t queue_idx;\n \n@@ -9325,9 +9321,7 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)\n \t}\n \tif (ret != 0)\n \t\treturn ret;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tret = mlx5_ind_table_obj_attach(dev, ind_tbl);\n \t\tif (ret != 0) {\n \t\t\tDRV_LOG(ERR, \"Port %u could not attach \"\n@@ -9336,13 +9330,12 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)\n \t\t\tgoto error;\n \t\t}\n \t}\n+\n \treturn 0;\n error:\n-\tshared_rss_last = shared_rss;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n-\t\tif (shared_rss == shared_rss_last)\n+\tind_tbl_last = ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n+\t\tif (ind_tbl == ind_tbl_last)\n \t\t\tbreak;\n \t\tif (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)\n \t\t\tDRV_LOG(CRIT, \"Port %u could not detach \"\n@@ -9365,15 +9358,10 @@ int\n mlx5_action_handle_detach(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_indexed_pool *ipool =\n-\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n-\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n \tint ret = 0;\n-\tuint32_t idx;\n-\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\tstruct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;\n \n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tret = mlx5_ind_table_obj_detach(dev, ind_tbl);\n \t\tif (ret != 0) {\n \t\t\tDRV_LOG(ERR, \"Port %u could not detach \"\n@@ -9384,11 +9372,9 @@ mlx5_action_handle_detach(struct rte_eth_dev *dev)\n \t}\n \treturn 0;\n error:\n-\tshared_rss_last = shared_rss;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n-\t\tif (shared_rss == shared_rss_last)\n+\tind_tbl_last = ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n+\t\tif (ind_tbl == ind_tbl_last)\n \t\t\tbreak;\n \t\tif (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)\n \t\t\tDRV_LOG(CRIT, \"Port %u could not attach \"\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex a1ab9173d9..33094c8c07 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1022,6 +1022,7 @@ struct rte_flow_hw {\n \tunion {\n \t\t/* Jump action. */\n \t\tstruct mlx5_hw_jump_action *jump;\n+\t\tstruct mlx5_hrxq *hrxq; /* TIR action. */\n \t};\n \tstruct rte_flow_template_table *table; /* The table flow allcated from. */\n \tstruct mlx5dr_rule rule; /* HWS layer data struct. */\n@@ -1077,6 +1078,7 @@ struct mlx5_hw_actions {\n \t/* Dynamic action list. */\n \tLIST_HEAD(act_list, mlx5_action_construct_data) act_list;\n \tstruct mlx5_hw_jump_action *jump; /* Jump action. */\n+\tstruct mlx5_hrxq *tir; /* TIR action. */\n \tuint32_t acts_num:4; /* Total action number. */\n \t/* Translated DR action array from action template. */\n \tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n@@ -1907,6 +1909,11 @@ int flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,\n int\n flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,\n \t\t    struct rte_flow_error *error);\n+void flow_dv_hashfields_set(uint64_t item_flags,\n+\t\t\t    struct mlx5_flow_rss_desc *rss_desc,\n+\t\t\t    uint64_t *hash_fields);\n+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,\n+\t\t\t\t\tuint64_t *hash_field);\n \n struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);\n void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex ef9c66eddf..c3d9d30dba 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -10966,78 +10966,83 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,\n /**\n  * Set the hash fields according to the @p flow information.\n  *\n- * @param[in] dev_flow\n- *   Pointer to the mlx5_flow.\n+ * @param[in] item_flags\n+ *   The match pattern item flags.\n  * @param[in] rss_desc\n  *   Pointer to the mlx5_flow_rss_desc.\n+ * @param[out] hash_fields\n+ *   Pointer to the RSS hash fields.\n  */\n-static void\n-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,\n-\t\t       struct mlx5_flow_rss_desc *rss_desc)\n+void\n+flow_dv_hashfields_set(uint64_t item_flags,\n+\t\t       struct mlx5_flow_rss_desc *rss_desc,\n+\t\t       uint64_t *hash_fields)\n {\n-\tuint64_t items = dev_flow->handle->layers;\n+\tuint64_t items = item_flags;\n+\tuint64_t fields = 0;\n \tint rss_inner = 0;\n \tuint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);\n \n-\tdev_flow->hash_fields = 0;\n+\t*hash_fields = 0;\n #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n \tif (rss_desc->level >= 2)\n \t\trss_inner = 1;\n #endif\n \tif ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||\n-\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {\n+\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||\n+\t     !items) {\n \t\tif (rss_types & MLX5_IPV4_LAYER_TYPES) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_IPV4;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_IPV4;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_IPV4_IBV_RX_HASH;\n \t\t}\n \t} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||\n-\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {\n+\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||\n+\t\t   !items) {\n \t\tif (rss_types & MLX5_IPV6_LAYER_TYPES) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_IPV6;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_IPV6;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_IPV6_IBV_RX_HASH;\n \t\t}\n \t}\n-\tif (dev_flow->hash_fields == 0)\n+\tif (fields == 0)\n \t\t/*\n \t\t * There is no match between the RSS types and the\n \t\t * L3 protocol (IPv4/IPv6) defined in the flow rule.\n \t\t */\n \t\treturn;\n \tif ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||\n-\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {\n+\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||\n+\t    !items) {\n \t\tif (rss_types & RTE_ETH_RSS_UDP) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_PORT_UDP;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_PORT_UDP;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_UDP_IBV_RX_HASH;\n \t\t}\n \t} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||\n-\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {\n+\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||\n+\t\t   !items) {\n \t\tif (rss_types & RTE_ETH_RSS_TCP) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_PORT_TCP;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_PORT_TCP;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_TCP_IBV_RX_HASH;\n \t\t}\n \t}\n \tif (rss_inner)\n-\t\tdev_flow->hash_fields |= IBV_RX_HASH_INNER;\n+\t\tfields |= IBV_RX_HASH_INNER;\n+\t*hash_fields = fields;\n }\n \n /**\n@@ -11061,7 +11066,6 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,\n \t\t     struct mlx5_flow_rss_desc *rss_desc,\n \t\t     uint32_t *hrxq_idx)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_handle *dh = dev_flow->handle;\n \tstruct mlx5_hrxq *hrxq;\n \n@@ -11072,11 +11076,8 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,\n \trss_desc->shared_rss = 0;\n \tif (rss_desc->hash_fields == 0)\n \t\trss_desc->queue_num = 1;\n-\t*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);\n-\tif (!*hrxq_idx)\n-\t\treturn NULL;\n-\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n-\t\t\t      *hrxq_idx);\n+\thrxq = mlx5_hrxq_get(dev, rss_desc);\n+\t*hrxq_idx = hrxq ? hrxq->idx : 0;\n \treturn hrxq;\n }\n \n@@ -11622,7 +11623,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,\n \t\t\t * rss->level and rss.types should be set in advance\n \t\t\t * when expanding items for RSS.\n \t\t\t */\n-\t\t\tflow_dv_hashfields_set(dev_flow, rss_desc);\n+\t\t\tflow_dv_hashfields_set(dev_flow->handle->layers,\n+\t\t\t\t\t       rss_desc,\n+\t\t\t\t\t       &dev_flow->hash_fields);\n \t\t\thrxq = flow_dv_hrxq_prepare(dev, dev_flow,\n \t\t\t\t\t\t    rss_desc, &hrxq_idx);\n \t\t\tif (!hrxq)\n@@ -13647,7 +13650,9 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t */\n \thandle->layers |= item_flags;\n \tif (action_flags & MLX5_FLOW_ACTION_RSS)\n-\t\tflow_dv_hashfields_set(dev_flow, rss_desc);\n+\t\tflow_dv_hashfields_set(dev_flow->handle->layers,\n+\t\t\t\t       rss_desc,\n+\t\t\t\t       &dev_flow->hash_fields);\n \t/* If has RSS action in the sample action, the Sample/Mirror resource\n \t * should be registered after the hash filed be update.\n \t */\n@@ -14596,20 +14601,18 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,\n  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share\n  * same slot in mlx5_rss_hash_fields.\n  *\n- * @param[in] rss\n- *   Pointer to the shared action RSS conf.\n+ * @param[in] rss_types\n+ *   RSS type.\n  * @param[in, out] hash_field\n  *   hash_field variable needed to be adjusted.\n  *\n  * @return\n  *   void\n  */\n-static void\n-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,\n-\t\t\t\t     uint64_t *hash_field)\n+void\n+flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,\n+\t\t\t\t   uint64_t *hash_field)\n {\n-\tuint64_t rss_types = rss->origin.types;\n-\n \tswitch (*hash_field & ~IBV_RX_HASH_INNER) {\n \tcase MLX5_RSS_HASH_IPV4:\n \t\tif (rss_types & MLX5_IPV4_LAYER_TYPES) {\n@@ -14692,12 +14695,15 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \tsize_t i;\n \tint err;\n \n-\tif (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,\n-\t\t\t\t     !!dev->data->dev_started)) {\n+\tshared_rss->ind_tbl = mlx5_ind_table_obj_new\n+\t\t\t      (dev, shared_rss->origin.queue,\n+\t\t\t       shared_rss->origin.queue_num,\n+\t\t\t       true,\n+\t\t\t       !!dev->data->dev_started);\n+\tif (!shared_rss->ind_tbl)\n \t\treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"cannot setup indirection table\");\n-\t}\n \tmemcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);\n \trss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;\n \trss_desc.const_q = shared_rss->origin.queue;\n@@ -14706,19 +14712,20 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \trss_desc.shared_rss = action_idx;\n \trss_desc.ind_tbl = shared_rss->ind_tbl;\n \tfor (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {\n-\t\tuint32_t hrxq_idx;\n+\t\tstruct mlx5_hrxq *hrxq;\n \t\tuint64_t hash_fields = mlx5_rss_hash_fields[i];\n \t\tint tunnel = 0;\n \n-\t\t__flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);\n+\t\tflow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,\n+\t\t\t\t\t\t   &hash_fields);\n \t\tif (shared_rss->origin.level > 1) {\n \t\t\thash_fields |= IBV_RX_HASH_INNER;\n \t\t\ttunnel = 1;\n \t\t}\n \t\trss_desc.tunnel = tunnel;\n \t\trss_desc.hash_fields = hash_fields;\n-\t\thrxq_idx = mlx5_hrxq_get(dev, &rss_desc);\n-\t\tif (!hrxq_idx) {\n+\t\thrxq = mlx5_hrxq_get(dev, &rss_desc);\n+\t\tif (!hrxq) {\n \t\t\trte_flow_error_set\n \t\t\t\t(error, rte_errno,\n \t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -14726,14 +14733,14 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \t\t\tgoto error_hrxq_new;\n \t\t}\n \t\terr = __flow_dv_action_rss_hrxq_set\n-\t\t\t(shared_rss, hash_fields, hrxq_idx);\n+\t\t\t(shared_rss, hash_fields, hrxq->idx);\n \t\tMLX5_ASSERT(!err);\n \t}\n \treturn 0;\n error_hrxq_new:\n \terr = rte_errno;\n \t__flow_dv_action_rss_hrxqs_release(dev, shared_rss);\n-\tif (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))\n+\tif (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))\n \t\tshared_rss->ind_tbl = NULL;\n \trte_errno = err;\n \treturn -rte_errno;\n@@ -14764,18 +14771,14 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_shared_action_rss *shared_rss = NULL;\n-\tvoid *queue = NULL;\n \tstruct rte_flow_action_rss *origin;\n \tconst uint8_t *rss_key;\n-\tuint32_t queue_size = rss->queue_num * sizeof(uint16_t);\n \tuint32_t idx;\n \n \tRTE_SET_USED(conf);\n-\tqueue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),\n-\t\t\t    0, SOCKET_ID_ANY);\n \tshared_rss = mlx5_ipool_zmalloc\n \t\t\t (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);\n-\tif (!shared_rss || !queue) {\n+\tif (!shared_rss) {\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t   \"cannot allocate resource memory\");\n@@ -14787,18 +14790,6 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n \t\t\t\t   \"rss action number out of range\");\n \t\tgoto error_rss_init;\n \t}\n-\tshared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,\n-\t\t\t\t\t  sizeof(*shared_rss->ind_tbl),\n-\t\t\t\t\t  0, SOCKET_ID_ANY);\n-\tif (!shared_rss->ind_tbl) {\n-\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t   \"cannot allocate resource memory\");\n-\t\tgoto error_rss_init;\n-\t}\n-\tmemcpy(queue, rss->queue, queue_size);\n-\tshared_rss->ind_tbl->queues = queue;\n-\tshared_rss->ind_tbl->queues_n = rss->queue_num;\n \torigin = &shared_rss->origin;\n \torigin->func = rss->func;\n \torigin->level = rss->level;\n@@ -14809,10 +14800,12 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n \tmemcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);\n \torigin->key = &shared_rss->key[0];\n \torigin->key_len = MLX5_RSS_HASH_KEY_LEN;\n-\torigin->queue = queue;\n+\torigin->queue = rss->queue;\n \torigin->queue_num = rss->queue_num;\n \tif (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))\n \t\tgoto error_rss_init;\n+\t/* Update queue with indirect table queue memoyr. */\n+\torigin->queue = shared_rss->ind_tbl->queues;\n \trte_spinlock_init(&shared_rss->action_rss_sl);\n \t__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);\n \trte_spinlock_lock(&priv->shared_act_sl);\n@@ -14823,12 +14816,11 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n error_rss_init:\n \tif (shared_rss) {\n \t\tif (shared_rss->ind_tbl)\n-\t\t\tmlx5_free(shared_rss->ind_tbl);\n+\t\t\tmlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,\n+\t\t\t\t\t\t   !!dev->data->dev_started);\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t\t\tidx);\n \t}\n-\tif (queue)\n-\t\tmlx5_free(queue);\n \treturn 0;\n }\n \n@@ -14856,7 +14848,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);\n \tuint32_t old_refcnt = 1;\n \tint remaining;\n-\tuint16_t *queue = NULL;\n \n \tif (!shared_rss)\n \t\treturn rte_flow_error_set(error, EINVAL,\n@@ -14875,8 +14866,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"shared rss hrxq has references\");\n-\tqueue = shared_rss->ind_tbl->queues;\n-\tremaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,\n+\tremaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,\n \t\t\t\t\t       !!dev->data->dev_started);\n \tif (remaining)\n \t\treturn rte_flow_error_set(error, EBUSY,\n@@ -14884,7 +14874,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"shared rss indirection table has\"\n \t\t\t\t\t  \" references\");\n-\tmlx5_free(queue);\n \trte_spinlock_lock(&priv->shared_act_sl);\n \tILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t     &priv->rss_shared_actions, idx, shared_rss, next);\n@@ -16878,11 +16867,12 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n \t\tif (!rss_desc[i])\n \t\t\tcontinue;\n-\t\thrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);\n-\t\tif (!hrxq_idx[i]) {\n+\t\thrxq = mlx5_hrxq_get(dev, rss_desc[i]);\n+\t\tif (!hrxq) {\n \t\t\trte_spinlock_unlock(&mtr_policy->sl);\n \t\t\treturn NULL;\n \t\t}\n+\t\thrxq_idx[i] = hrxq->idx;\n \t}\n \tsub_policy_num = (mtr_policy->sub_policy_num >>\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex a825766245..e59d812072 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -7,6 +7,7 @@\n #include <mlx5_malloc.h>\n #include \"mlx5_defs.h\"\n #include \"mlx5_flow.h\"\n+#include \"mlx5_rx.h\"\n \n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \n@@ -89,6 +90,56 @@ flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)\n \tmlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);\n }\n \n+/**\n+ * Register queue/RSS action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] hws_flags\n+ *   DR action flags.\n+ * @param[in] action\n+ *   rte flow action.\n+ *\n+ * @return\n+ *    Table on success, NULL otherwise and rte_errno is set.\n+ */\n+static inline struct mlx5_hrxq*\n+flow_hw_tir_action_register(struct rte_eth_dev *dev,\n+\t\t\t    uint32_t hws_flags,\n+\t\t\t    const struct rte_flow_action *action)\n+{\n+\tstruct mlx5_flow_rss_desc rss_desc = {\n+\t\t.hws_flags = hws_flags,\n+\t};\n+\tstruct mlx5_hrxq *hrxq;\n+\n+\tif (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tconst struct rte_flow_action_queue *queue = action->conf;\n+\n+\t\trss_desc.const_q = &queue->index;\n+\t\trss_desc.queue_num = 1;\n+\t} else {\n+\t\tconst struct rte_flow_action_rss *rss = action->conf;\n+\n+\t\trss_desc.queue_num = rss->queue_num;\n+\t\trss_desc.const_q = rss->queue;\n+\t\tmemcpy(rss_desc.key,\n+\t\t       !rss->key ? rss_hash_default_key : rss->key,\n+\t\t       MLX5_RSS_HASH_KEY_LEN);\n+\t\trss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;\n+\t\trss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;\n+\t\tflow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);\n+\t\tflow_dv_action_rss_l34_hash_adjust(rss->types,\n+\t\t\t\t\t\t   &rss_desc.hash_fields);\n+\t\tif (rss->level > 1) {\n+\t\t\trss_desc.hash_fields |= IBV_RX_HASH_INNER;\n+\t\t\trss_desc.tunnel = 1;\n+\t\t}\n+\t}\n+\thrxq = mlx5_hrxq_get(dev, &rss_desc);\n+\treturn hrxq;\n+}\n+\n /**\n  * Destroy DR actions created by action template.\n  *\n@@ -260,6 +311,40 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t}\n \t\t\ti++;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tif (masks->conf) {\n+\t\t\t\tacts->tir = flow_hw_tir_action_register\n+\t\t\t\t(dev,\n+\t\t\t\t mlx5_hw_act_flag[!!attr->group][type],\n+\t\t\t\t actions);\n+\t\t\t\tif (!acts->tir)\n+\t\t\t\t\tgoto err;\n+\t\t\t\tacts->rule_acts[i].action =\n+\t\t\t\t\tacts->tir->action;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, i)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\ti++;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\tif (masks->conf) {\n+\t\t\t\tacts->tir = flow_hw_tir_action_register\n+\t\t\t\t(dev,\n+\t\t\t\t mlx5_hw_act_flag[!!attr->group][type],\n+\t\t\t\t actions);\n+\t\t\t\tif (!acts->tir)\n+\t\t\t\t\tgoto err;\n+\t\t\t\tacts->rule_acts[i].action =\n+\t\t\t\t\tacts->tir->action;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, i)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\ti++;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -313,6 +398,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tstruct rte_flow_attr attr = {\n \t\t\t.ingress = 1,\n \t};\n+\tuint32_t ft_flag;\n \n \tmemcpy(rule_acts, hw_acts->rule_acts,\n \t       sizeof(*rule_acts) * hw_acts->acts_num);\n@@ -320,6 +406,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tif (LIST_EMPTY(&hw_acts->act_list))\n \t\treturn 0;\n \tattr.group = table->grp->group_id;\n+\tft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];\n \tif (table->type == MLX5DR_TABLE_TYPE_FDB) {\n \t\tattr.transfer = 1;\n \t\tattr.ingress = 1;\n@@ -332,6 +419,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tLIST_FOREACH(act_data, &hw_acts->act_list, next) {\n \t\tuint32_t jump_group;\n \t\tstruct mlx5_hw_jump_action *jump;\n+\t\tstruct mlx5_hrxq *hrxq;\n \n \t\taction = &actions[act_data->action_src];\n \t\tMLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||\n@@ -353,6 +441,17 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tjob->flow->jump = jump;\n \t\t\tjob->flow->fate_type = MLX5_FLOW_FATE_JUMP;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\thrxq = flow_hw_tir_action_register(dev,\n+\t\t\t\t\tft_flag,\n+\t\t\t\t\taction);\n+\t\t\tif (!hrxq)\n+\t\t\t\treturn -1;\n+\t\t\trule_acts[act_data->action_dst].action = hrxq->action;\n+\t\t\tjob->flow->hrxq = hrxq;\n+\t\t\tjob->flow->fate_type = MLX5_FLOW_FATE_QUEUE;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -553,6 +652,8 @@ flow_hw_q_pull(struct rte_eth_dev *dev,\n \t\tif (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {\n \t\t\tif (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)\n \t\t\t\tflow_hw_jump_release(dev, job->flow->jump);\n+\t\t\telse if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)\n+\t\t\t\tmlx5_hrxq_obj_release(dev, job->flow->hrxq);\n \t\t\tmlx5_ipool_free(job->flow->table->flow, job->flow->idx);\n \t\t}\n \t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex 90ccb9aaff..f08aa7a770 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -1943,7 +1943,6 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\tMLX5_ASSERT(priv->drop_queue.hrxq);\n \t\t\thrxq = priv->drop_queue.hrxq;\n \t\t} else {\n-\t\t\tuint32_t hrxq_idx;\n \t\t\tstruct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;\n \n \t\t\tMLX5_ASSERT(rss_desc->queue_num);\n@@ -1952,9 +1951,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\trss_desc->tunnel = !!(handle->layers &\n \t\t\t\t\t      MLX5_FLOW_LAYER_TUNNEL);\n \t\t\trss_desc->shared_rss = 0;\n-\t\t\thrxq_idx = mlx5_hrxq_get(dev, rss_desc);\n-\t\t\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n-\t\t\t\t\t      hrxq_idx);\n+\t\t\thrxq = mlx5_hrxq_get(dev, rss_desc);\n \t\t\tif (!hrxq) {\n \t\t\t\trte_flow_error_set\n \t\t\t\t\t(error, rte_errno,\n@@ -1962,7 +1959,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t\t\t \"cannot get hash queue\");\n \t\t\t\tgoto error;\n \t\t\t}\n-\t\t\thandle->rix_hrxq = hrxq_idx;\n+\t\t\thandle->rix_hrxq = hrxq->idx;\n \t\t}\n \t\tMLX5_ASSERT(hrxq);\n \t\thandle->drv_flow = mlx5_glue->create_flow\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex cb5d51340d..468772ee27 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -225,9 +225,13 @@ int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);\n struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,\n \t\t\t\t\t\t  const uint16_t *queues,\n \t\t\t\t\t\t  uint32_t queues_n);\n+struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,\n+\t\t\t\t\t\t  const uint16_t *queues,\n+\t\t\t\t\t\t  uint32_t queues_n,\n+\t\t\t\t\t\t  bool standalone,\n+\t\t\t\t\t\t  bool ref_qs);\n int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_ind_table_obj *ind_tbl,\n-\t\t\t       bool standalone,\n \t\t\t       bool deref_rxqs);\n int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n \t\t\t     struct mlx5_ind_table_obj *ind_tbl,\n@@ -250,8 +254,9 @@ struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,\n \t\t\t\t\t   void *cb_ctx __rte_unused);\n void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,\n \t\t\t     struct mlx5_list_entry *entry);\n-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t       struct mlx5_flow_rss_desc *rss_desc);\n+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);\n int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);\n uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);\n enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 580d7ae868..a892675646 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -2284,8 +2284,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,\n  *   Pointer to Ethernet device.\n  * @param ind_table\n  *   Indirection table to release.\n- * @param standalone\n- *   Indirection table for Standalone queue.\n  * @param deref_rxqs\n  *   If true, then dereference RX queues related to indirection table.\n  *   Otherwise, no additional action will be taken.\n@@ -2296,7 +2294,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,\n int\n mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\t\t   struct mlx5_ind_table_obj *ind_tbl,\n-\t\t\t   bool standalone,\n \t\t\t   bool deref_rxqs)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -2304,7 +2301,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \n \trte_rwlock_write_lock(&priv->ind_tbls_lock);\n \tret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);\n-\tif (!ret && !standalone)\n+\tif (!ret)\n \t\tLIST_REMOVE(ind_tbl, next);\n \trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n \tif (ret)\n@@ -2413,7 +2410,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n  * @return\n  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.\n  */\n-static struct mlx5_ind_table_obj *\n+struct mlx5_ind_table_obj *\n mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \t\t       uint32_t queues_n, bool standalone, bool ref_qs)\n {\n@@ -2435,11 +2432,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \t\tmlx5_free(ind_tbl);\n \t\treturn NULL;\n \t}\n-\tif (!standalone) {\n-\t\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n+\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n+\tif (!standalone)\n \t\tLIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);\n-\t\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n-\t}\n+\telse\n+\t\tLIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);\n+\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n+\n \treturn ind_tbl;\n }\n \n@@ -2605,6 +2604,7 @@ mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,\n \n \treturn (hrxq->rss_key_len != rss_desc->key_len ||\n \t    memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||\n+\t    hrxq->hws_flags != rss_desc->hws_flags ||\n \t    hrxq->hash_fields != rss_desc->hash_fields ||\n \t    hrxq->ind_table->queues_n != rss_desc->queue_num ||\n \t    memcmp(hrxq->ind_table->queues, rss_desc->queue,\n@@ -2689,8 +2689,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,\n \t}\n \tif (ind_tbl != hrxq->ind_table) {\n \t\tMLX5_ASSERT(!hrxq->standalone);\n-\t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table,\n-\t\t\t\t\t   hrxq->standalone, true);\n+\t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table, true);\n \t\thrxq->ind_table = ind_tbl;\n \t}\n \thrxq->hash_fields = hash_fields;\n@@ -2700,8 +2699,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,\n \terr = rte_errno;\n \tif (ind_tbl != hrxq->ind_table) {\n \t\tMLX5_ASSERT(!hrxq->standalone);\n-\t\tmlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,\n-\t\t\t\t\t   true);\n+\t\tmlx5_ind_table_obj_release(dev, ind_tbl, true);\n \t}\n \trte_errno = err;\n \treturn -rte_errno;\n@@ -2713,12 +2711,16 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tmlx5_glue->destroy_flow_action(hrxq->action);\n+\tif (hrxq->hws_flags)\n+\t\tmlx5dr_action_destroy(hrxq->action);\n+\telse\n+\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n #endif\n \tpriv->obj_ops.hrxq_destroy(hrxq);\n \tif (!hrxq->standalone) {\n \t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table,\n-\t\t\t\t\t   hrxq->standalone, true);\n+\t\t\t\t\t   hrxq->hws_flags ?\n+\t\t\t\t\t   (!!dev->data->dev_started) : true);\n \t}\n \tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);\n }\n@@ -2762,11 +2764,12 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \tint ret;\n \n \tqueues_n = rss_desc->hash_fields ? queues_n : 1;\n-\tif (!ind_tbl)\n+\tif (!ind_tbl && !rss_desc->hws_flags)\n \t\tind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);\n \tif (!ind_tbl)\n \t\tind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,\n-\t\t\t\t\t\t standalone,\n+\t\t\t\t\t\t standalone ||\n+\t\t\t\t\t\t rss_desc->hws_flags,\n \t\t\t\t\t\t !!dev->data->dev_started);\n \tif (!ind_tbl)\n \t\treturn NULL;\n@@ -2778,6 +2781,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \thrxq->ind_table = ind_tbl;\n \thrxq->rss_key_len = rss_key_len;\n \thrxq->hash_fields = rss_desc->hash_fields;\n+\thrxq->hws_flags = rss_desc->hws_flags;\n \tmemcpy(hrxq->rss_key, rss_key, rss_key_len);\n \tret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);\n \tif (ret < 0)\n@@ -2785,7 +2789,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \treturn hrxq;\n error:\n \tif (!rss_desc->ind_tbl)\n-\t\tmlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);\n+\t\tmlx5_ind_table_obj_release(dev, ind_tbl, true);\n \tif (hrxq)\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n \treturn NULL;\n@@ -2839,13 +2843,13 @@ mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n  *   RSS configuration for the Rx hash queue.\n  *\n  * @return\n- *   An hash Rx queue index on success.\n+ *   An hash Rx queue on success.\n  */\n-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t       struct mlx5_flow_rss_desc *rss_desc)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq *hrxq;\n+\tstruct mlx5_hrxq *hrxq = NULL;\n \tstruct mlx5_list_entry *entry;\n \tstruct mlx5_flow_cb_ctx ctx = {\n \t\t.data = rss_desc,\n@@ -2856,12 +2860,10 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t} else {\n \t\tentry = mlx5_list_register(priv->hrxqs, &ctx);\n \t\tif (!entry)\n-\t\t\treturn 0;\n+\t\t\treturn NULL;\n \t\thrxq = container_of(entry, typeof(*hrxq), entry);\n \t}\n-\tif (hrxq)\n-\t\treturn hrxq->idx;\n-\treturn 0;\n+\treturn hrxq;\n }\n \n /**\n@@ -2870,17 +2872,15 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n  * @param dev\n  *   Pointer to Ethernet device.\n  * @param hrxq_idx\n- *   Index to Hash Rx queue to release.\n+ *   Hash Rx queue to release.\n  *\n  * @return\n  *   1 while a reference on it exists, 0 when freed.\n  */\n-int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq *hrxq;\n \n-\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n \tif (!hrxq)\n \t\treturn 0;\n \tif (!hrxq->standalone)\n@@ -2889,6 +2889,26 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n \treturn 0;\n }\n \n+/**\n+ * Release the hash Rx queue with index.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param hrxq_idx\n+ *   Index to Hash Rx queue to release.\n+ *\n+ * @return\n+ *   1 while a reference on it exists, 0 when freed.\n+ */\n+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq;\n+\n+\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n+\treturn mlx5_hrxq_obj_release(dev, hrxq);\n+}\n+\n /**\n  * Create a drop Rx Hash queue.\n  *\n",
    "prefixes": [
        "10/13"
    ]
}