get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/108210/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 108210,
    "url": "http://patches.dpdk.org/api/patches/108210/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220224031029.14049-12-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220224031029.14049-12-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220224031029.14049-12-suanmingm@nvidia.com",
    "date": "2022-02-24T03:10:26",
    "name": "[v3,11/14] net/mlx5: add queue and RSS action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ddb3523c73636f78e143a7471057180349e88a3b",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220224031029.14049-12-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 21839,
            "url": "http://patches.dpdk.org/api/series/21839/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21839",
            "date": "2022-02-24T03:10:16",
            "name": "net/mlx5: add hardware steering",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/21839/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/108210/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/108210/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 41386A0353;\n\tThu, 24 Feb 2022 04:11:43 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CCC41411AB;\n\tThu, 24 Feb 2022 04:11:15 +0100 (CET)",
            "from NAM02-SN1-obe.outbound.protection.outlook.com\n (mail-sn1anam02on2079.outbound.protection.outlook.com [40.107.96.79])\n by mails.dpdk.org (Postfix) with ESMTP id B23314118A\n for <dev@dpdk.org>; Thu, 24 Feb 2022 04:11:11 +0100 (CET)",
            "from MWHPR18CA0029.namprd18.prod.outlook.com (2603:10b6:320:31::15)\n by BYAPR12MB4981.namprd12.prod.outlook.com (2603:10b6:a03:10d::29)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5017.24; Thu, 24 Feb\n 2022 03:11:08 +0000",
            "from CO1NAM11FT050.eop-nam11.prod.protection.outlook.com\n (2603:10b6:320:31:cafe::40) by MWHPR18CA0029.outlook.office365.com\n (2603:10b6:320:31::15) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4951.19 via Frontend\n Transport; Thu, 24 Feb 2022 03:11:08 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n CO1NAM11FT050.mail.protection.outlook.com (10.13.174.79) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5017.22 via Frontend Transport; Thu, 24 Feb 2022 03:11:07 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 24 Feb 2022 03:11:06 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.9; Wed, 23 Feb 2022\n 19:11:04 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=PSShTocgvyRUBCJO55tsrDU2E3Z9AnhaPy1HmPBRiTVHSQgPrIBtG65JTpuGONjqgzWicCHgxfFeOBAZ0JTH3ije2/JAA+qjCsuXDFMeAci347zPMUddR+cbYANPEI/UcRBjphsLUpRmFIT2fGJX2SzdUA6sjR/1fidhBND4NYKTKk3kg0iSAHfzS0Pdr5t4TByngRM1cQq5pY+rofNLM8zsQZtawmNe4Hkcd1ITnG/h5fk1b3eHIMpbgHwrdS0QV4i3t4uXPJBT0GT0tJKVxueoswGxBhZxRH+p3FSHyRhJaCoPTbuS2dBoF6Gruau3jOle8pCHarTVIZjn+J85Ww==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=h6ZoFq4ou+1iAOf0RGX6GAgOS4biCVTTrizyXHFXFqI=;\n b=VTjNT53yID7+dNs1hSqwjnost2Rwpyu4EtNE3E05FL1G0MTSLB43d2dX1Wj2QBQUfO+p2U1RYim7rYvvDf3NDtZXmHVWedUzrBOoyBPrI6YbH/QxAsia2XQMagmyn+zoEZJInLAAPXHB66TVidU97lunuVOzufYrHggrH/vwptshgR2/UnXPhnILjEyU8hPkCE0BCzA05PN31hIwKqRkWhh2pi328u1HRNokulviVB8YNb/lc7pq+JPyVrXVYacoLPho5HJdbamRH8lrzp0HzbVaNDyLg9eGFRjp2d9hmvOcF3D/a7oMp3eiB07dFmeRZxgbQnuEPyzMPP8rDyqReA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=h6ZoFq4ou+1iAOf0RGX6GAgOS4biCVTTrizyXHFXFqI=;\n b=K+IGGyEpPJbPyg+03ZWHi9AYYuBC287ahxb46XOtuRAzbWr6HVzWEOWe//K8aklztnA36aLwAVwcIWP6GDbgwVu7dWs1UJRgZHedOeUIws4ytedlw74+6gxVqadMUWnA8vp36TmLG2hy6vTP2zIY8YQ/PDDxvNk6i+LS/ohEe4PmWWWtjczscdwDHMOVZ5oMflQtqD4TsQ2VKb4JjDxz3uexj4VL9ecfMsgH7tGam09nUROdYVx9sB0xamO/RYRp5IQWX3nkCRQywqgZeI0dfwJse5Tf5RagN2q1oVHRZRx3emAC7TPBKNKmQqH8Y0TZZOlP7b3Yv5crFFJXrqpdRA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v3 11/14] net/mlx5: add queue and RSS action",
        "Date": "Thu, 24 Feb 2022 05:10:26 +0200",
        "Message-ID": "<20220224031029.14049-12-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220224031029.14049-1-suanmingm@nvidia.com>",
        "References": "<20220210162926.20436-1-suanmingm@nvidia.com>\n <20220224031029.14049-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e845c9c5-b586-4413-b18e-08d9f7434cec",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB4981:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB4981C108277060540D2DB951C13D9@BYAPR12MB4981.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n vgwaDRWCYCrogQQ1FN4ViaYf9V2rSAWlwiOgLdXVBarby/DG2MEPxAU5tNdp8L6nPJJv+JgXn/J2U6XYq7Z2dKJye2W+h32hW2e5IalteSwP8NKma2TeaEAppMmPw2p+v4vY3syO7/lNAFMVVRVwLkwI0t5qSazZsNBzaYN1r8IP7gn3HOaTMPumVF4FMgCLsV02zpCarIa0hKwwtBbZn3cMU76cN4/xbxE8FUbBt8SrH34sETCJ2uIVRyrx2HzvjkVtoyUJYaoy2n7lTABB1dFyFqd56xbs1pxXAN64nggLNdrxSnphFCmbrgMuicj3OmdES27kdMhlOxrwRIHIh0sTvEtcrvOMbzgQuNp8O8l8A2ugzkCJShAjtKYDAUGH7pgfxPURg7VudMEw5TCnORJKl3zbSFyk499x2Jfq7iuX/dqYbfIzZ3yaMerfm6r+rfmydPKRzCPR9ibYxfStn/XOJzcDZpT9zYJZaV4oEtjTjsY2rclgHLpVzwvIXOP0Tr0aM8yk3FIWI/jyZrIqavl/4kwamx24GFcQDGUNdZw3WbRAendqQN67onSlgbjvugb7AVHOppo04oRnhs5J+3zQ+hBNDnAUXWjjx+KaoNz4rOBCAuuOa7GE2r64jHOPGMpaUMf4cvdzwBucunfl26bQMIVSL+b8ZIc237dZSqBT3sPxARZ7ENhw0sMxmITPlHsdU1Xg+OmCEa8zpIgSbKtt+NQ3KpW2UkJWLgjWbpQ=",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(46966006)(36840700001)(40470700004)(40460700003)(6666004)(5660300002)(30864003)(82310400004)(86362001)(2906002)(1076003)(47076005)(16526019)(186003)(36756003)(26005)(508600001)(36860700001)(426003)(336012)(83380400001)(6286002)(4326008)(8676002)(70586007)(70206006)(356005)(81166007)(2616005)(55016003)(8936002)(6636002)(54906003)(110136005)(7696005)(316002)(36900700001)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "24 Feb 2022 03:11:07.3679 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e845c9c5-b586-4413-b18e-08d9f7434cec",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT050.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB4981",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This commit adds the queue and RSS action. Similar to the jump action,\ndynamic ones will be added to the action construct list.\n\nDue to the queue and RSS action in template should not be destroyed\nduring port restart, the actions are created with standalone indirect\ntable as indirect action does. When port stops, detaches the indirect\ntable from action, when port starts, attaches the indirect table back\nto the action.\n\nOne more change is made to accelerate the action creation. Currently\nthe mlx5_hrxq_get() function returns the object index instead of object\npointer. This introduced an extra converting the index to the object by\ncalling mlx5_ipool_get() in most of the case. And that extra converting\nhurts multi-thread performance since mlx5_ipool_get() uses the global\nlock inside. As the hash Rx queue object itself also contains the index,\nreturns the object directly will achieve better performance without the\nglobal lock.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c   |  18 ++--\n drivers/net/mlx5/mlx5.h            |   4 +\n drivers/net/mlx5/mlx5_devx.c       |  10 ++\n drivers/net/mlx5/mlx5_flow.c       |  38 +++----\n drivers/net/mlx5/mlx5_flow.h       |   7 ++\n drivers/net/mlx5/mlx5_flow_dv.c    | 161 ++++++++++++++---------------\n drivers/net/mlx5/mlx5_flow_hw.c    | 101 ++++++++++++++++++\n drivers/net/mlx5/mlx5_flow_verbs.c |   7 +-\n drivers/net/mlx5/mlx5_rx.h         |   9 +-\n drivers/net/mlx5/mlx5_rxq.c        |  85 +++++++++------\n 10 files changed, 283 insertions(+), 157 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 0faf26f5b8..2e1606a733 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1521,6 +1521,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tpriv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);\n \tif (!priv->drop_queue.hrxq)\n \t\tgoto error;\n+\tpriv->hrxqs = mlx5_list_create(\"hrxq\", eth_dev, true,\n+\t\t\t\t       mlx5_hrxq_create_cb,\n+\t\t\t\t       mlx5_hrxq_match_cb,\n+\t\t\t\t       mlx5_hrxq_remove_cb,\n+\t\t\t\t       mlx5_hrxq_clone_cb,\n+\t\t\t\t       mlx5_hrxq_clone_free_cb);\n+\tif (!priv->hrxqs)\n+\t\tgoto error;\n+\trte_rwlock_init(&priv->ind_tbls_lock);\n \tif (priv->sh->config.dv_flow_en == 2)\n \t\treturn eth_dev;\n \t/* Port representor shares the same max priority with pf port. */\n@@ -1545,15 +1554,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\terr = ENOTSUP;\n \t\t\tgoto error;\n \t}\n-\tpriv->hrxqs = mlx5_list_create(\"hrxq\", eth_dev, true,\n-\t\t\t\t       mlx5_hrxq_create_cb,\n-\t\t\t\t       mlx5_hrxq_match_cb,\n-\t\t\t\t       mlx5_hrxq_remove_cb,\n-\t\t\t\t       mlx5_hrxq_clone_cb,\n-\t\t\t\t       mlx5_hrxq_clone_free_cb);\n-\tif (!priv->hrxqs)\n-\t\tgoto error;\n-\trte_rwlock_init(&priv->ind_tbls_lock);\n \t/* Query availability of metadata reg_c's. */\n \tif (!priv->sh->metadata_regc_check_flag) {\n \t\terr = mlx5_flow_discover_mreg_c(eth_dev);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex f3732958a2..f60a40d669 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1287,6 +1287,7 @@ struct mlx5_flow_rss_desc {\n \tuint64_t hash_fields; /* Verbs Hash fields. */\n \tuint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */\n \tuint32_t key_len; /**< RSS hash key len. */\n+\tuint32_t hws_flags; /**< HW steering action. */\n \tuint32_t tunnel; /**< Queue in tunnel. */\n \tuint32_t shared_rss; /**< Shared RSS index. */\n \tstruct mlx5_ind_table_obj *ind_tbl;\n@@ -1348,6 +1349,7 @@ struct mlx5_hrxq {\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \tvoid *action; /* DV QP action pointer. */\n #endif\n+\tuint32_t hws_flags; /* Hw steering flags. */\n \tuint64_t hash_fields; /* Verbs Hash fields. */\n \tuint32_t rss_key_len; /* Hash key length in bytes. */\n \tuint32_t idx; /* Hash Rx queue index. */\n@@ -1478,6 +1480,8 @@ struct mlx5_priv {\n \tLIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */\n \t/* Indirection tables. */\n \tLIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;\n+\t/* Standalone indirect tables. */\n+\tLIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;\n \t/* Pointer to next element. */\n \trte_rwlock_t ind_tbls_lock;\n \tuint32_t refcnt; /**< Reference counter. */\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex a9b8c2a1b7..8d151fa4ab 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -807,6 +807,14 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t\tgoto error;\n \t}\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+\tif (hrxq->hws_flags) {\n+\t\thrxq->action = mlx5dr_action_create_dest_tir\n+\t\t\t(priv->dr_ctx,\n+\t\t\t (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);\n+\t\tif (!hrxq->action)\n+\t\t\tgoto error;\n+\t\treturn 0;\n+\t}\n \tif (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,\n \t\t\t\t\t\t\t  &hrxq->action)) {\n \t\trte_errno = errno;\n@@ -1042,6 +1050,8 @@ mlx5_devx_drop_action_create(struct rte_eth_dev *dev)\n \t\tDRV_LOG(ERR, \"Cannot create drop RX queue\");\n \t\treturn ret;\n \t}\n+\tif (priv->sh->config.dv_flow_en == 2)\n+\t\treturn 0;\n \t/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */\n \tret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);\n \tif (ret != 0) {\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 1672939200..cbd8408e30 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -9439,14 +9439,10 @@ int\n mlx5_action_handle_attach(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_indexed_pool *ipool =\n-\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n-\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n \tint ret = 0;\n-\tuint32_t idx;\n+\tstruct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;\n \n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tconst char *message;\n \t\tuint32_t queue_idx;\n \n@@ -9462,9 +9458,7 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)\n \t}\n \tif (ret != 0)\n \t\treturn ret;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tret = mlx5_ind_table_obj_attach(dev, ind_tbl);\n \t\tif (ret != 0) {\n \t\t\tDRV_LOG(ERR, \"Port %u could not attach \"\n@@ -9473,13 +9467,12 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)\n \t\t\tgoto error;\n \t\t}\n \t}\n+\n \treturn 0;\n error:\n-\tshared_rss_last = shared_rss;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n-\t\tif (shared_rss == shared_rss_last)\n+\tind_tbl_last = ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n+\t\tif (ind_tbl == ind_tbl_last)\n \t\t\tbreak;\n \t\tif (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)\n \t\t\tDRV_LOG(CRIT, \"Port %u could not detach \"\n@@ -9502,15 +9495,10 @@ int\n mlx5_action_handle_detach(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_indexed_pool *ipool =\n-\t\t\tpriv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];\n-\tstruct mlx5_shared_action_rss *shared_rss, *shared_rss_last;\n \tint ret = 0;\n-\tuint32_t idx;\n-\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n+\tstruct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;\n \n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n \t\tret = mlx5_ind_table_obj_detach(dev, ind_tbl);\n \t\tif (ret != 0) {\n \t\t\tDRV_LOG(ERR, \"Port %u could not detach \"\n@@ -9521,11 +9509,9 @@ mlx5_action_handle_detach(struct rte_eth_dev *dev)\n \t}\n \treturn 0;\n error:\n-\tshared_rss_last = shared_rss;\n-\tILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {\n-\t\tstruct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;\n-\n-\t\tif (shared_rss == shared_rss_last)\n+\tind_tbl_last = ind_tbl;\n+\tLIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {\n+\t\tif (ind_tbl == ind_tbl_last)\n \t\t\tbreak;\n \t\tif (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)\n \t\t\tDRV_LOG(CRIT, \"Port %u could not attach \"\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 963dbd7806..70e6cf633f 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1024,6 +1024,7 @@ struct rte_flow_hw {\n \tunion {\n \t\t/* Jump action. */\n \t\tstruct mlx5_hw_jump_action *jump;\n+\t\tstruct mlx5_hrxq *hrxq; /* TIR action. */\n \t};\n \tstruct rte_flow_template_table *table; /* The table flow allcated from. */\n \tstruct mlx5dr_rule rule; /* HWS layer data struct. */\n@@ -1074,6 +1075,7 @@ struct mlx5_hw_actions {\n \t/* Dynamic action list. */\n \tLIST_HEAD(act_list, mlx5_action_construct_data) act_list;\n \tstruct mlx5_hw_jump_action *jump; /* Jump action. */\n+\tstruct mlx5_hrxq *tir; /* TIR action. */\n \tuint32_t acts_num:4; /* Total action number. */\n \t/* Translated DR action array from action template. */\n \tstruct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];\n@@ -1910,6 +1912,11 @@ struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,\n \t\t\t\t   struct mlx5_list_entry *entry, void *cb_ctx);\n void flow_dv_dest_array_clone_free_cb(void *tool_ctx,\n \t\t\t\t      struct mlx5_list_entry *entry);\n+void flow_dv_hashfields_set(uint64_t item_flags,\n+\t\t\t    struct mlx5_flow_rss_desc *rss_desc,\n+\t\t\t    uint64_t *hash_fields);\n+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,\n+\t\t\t\t\tuint64_t *hash_field);\n \n struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);\n void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex abd1c27538..d48726cf05 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -10975,78 +10975,83 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,\n /**\n  * Set the hash fields according to the @p flow information.\n  *\n- * @param[in] dev_flow\n- *   Pointer to the mlx5_flow.\n+ * @param[in] item_flags\n+ *   The match pattern item flags.\n  * @param[in] rss_desc\n  *   Pointer to the mlx5_flow_rss_desc.\n+ * @param[out] hash_fields\n+ *   Pointer to the RSS hash fields.\n  */\n-static void\n-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,\n-\t\t       struct mlx5_flow_rss_desc *rss_desc)\n+void\n+flow_dv_hashfields_set(uint64_t item_flags,\n+\t\t       struct mlx5_flow_rss_desc *rss_desc,\n+\t\t       uint64_t *hash_fields)\n {\n-\tuint64_t items = dev_flow->handle->layers;\n+\tuint64_t items = item_flags;\n+\tuint64_t fields = 0;\n \tint rss_inner = 0;\n \tuint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);\n \n-\tdev_flow->hash_fields = 0;\n+\t*hash_fields = 0;\n #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n \tif (rss_desc->level >= 2)\n \t\trss_inner = 1;\n #endif\n \tif ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||\n-\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {\n+\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||\n+\t     !items) {\n \t\tif (rss_types & MLX5_IPV4_LAYER_TYPES) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_IPV4;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_IPV4;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_IPV4_IBV_RX_HASH;\n \t\t}\n \t} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||\n-\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {\n+\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||\n+\t\t   !items) {\n \t\tif (rss_types & MLX5_IPV6_LAYER_TYPES) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_IPV6;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_IPV6;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_IPV6_IBV_RX_HASH;\n \t\t}\n \t}\n-\tif (dev_flow->hash_fields == 0)\n+\tif (fields == 0)\n \t\t/*\n \t\t * There is no match between the RSS types and the\n \t\t * L3 protocol (IPv4/IPv6) defined in the flow rule.\n \t\t */\n \t\treturn;\n \tif ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||\n-\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {\n+\t    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||\n+\t    !items) {\n \t\tif (rss_types & RTE_ETH_RSS_UDP) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_PORT_UDP;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_PORT_UDP;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_UDP_IBV_RX_HASH;\n \t\t}\n \t} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||\n-\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {\n+\t\t   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||\n+\t\t   !items) {\n \t\tif (rss_types & RTE_ETH_RSS_TCP) {\n \t\t\tif (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP;\n+\t\t\t\tfields |= IBV_RX_HASH_SRC_PORT_TCP;\n \t\t\telse if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)\n-\t\t\t\tdev_flow->hash_fields |=\n-\t\t\t\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n+\t\t\t\tfields |= IBV_RX_HASH_DST_PORT_TCP;\n \t\t\telse\n-\t\t\t\tdev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;\n+\t\t\t\tfields |= MLX5_TCP_IBV_RX_HASH;\n \t\t}\n \t}\n \tif (rss_inner)\n-\t\tdev_flow->hash_fields |= IBV_RX_HASH_INNER;\n+\t\tfields |= IBV_RX_HASH_INNER;\n+\t*hash_fields = fields;\n }\n \n /**\n@@ -11070,7 +11075,6 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,\n \t\t     struct mlx5_flow_rss_desc *rss_desc,\n \t\t     uint32_t *hrxq_idx)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_handle *dh = dev_flow->handle;\n \tstruct mlx5_hrxq *hrxq;\n \n@@ -11081,11 +11085,8 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,\n \trss_desc->shared_rss = 0;\n \tif (rss_desc->hash_fields == 0)\n \t\trss_desc->queue_num = 1;\n-\t*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);\n-\tif (!*hrxq_idx)\n-\t\treturn NULL;\n-\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n-\t\t\t      *hrxq_idx);\n+\thrxq = mlx5_hrxq_get(dev, rss_desc);\n+\t*hrxq_idx = hrxq ? hrxq->idx : 0;\n \treturn hrxq;\n }\n \n@@ -11631,7 +11632,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,\n \t\t\t * rss->level and rss.types should be set in advance\n \t\t\t * when expanding items for RSS.\n \t\t\t */\n-\t\t\tflow_dv_hashfields_set(dev_flow, rss_desc);\n+\t\t\tflow_dv_hashfields_set(dev_flow->handle->layers,\n+\t\t\t\t\t       rss_desc,\n+\t\t\t\t\t       &dev_flow->hash_fields);\n \t\t\thrxq = flow_dv_hrxq_prepare(dev, dev_flow,\n \t\t\t\t\t\t    rss_desc, &hrxq_idx);\n \t\t\tif (!hrxq)\n@@ -13655,7 +13658,9 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t */\n \thandle->layers |= item_flags;\n \tif (action_flags & MLX5_FLOW_ACTION_RSS)\n-\t\tflow_dv_hashfields_set(dev_flow, rss_desc);\n+\t\tflow_dv_hashfields_set(dev_flow->handle->layers,\n+\t\t\t\t       rss_desc,\n+\t\t\t\t       &dev_flow->hash_fields);\n \t/* If has RSS action in the sample action, the Sample/Mirror resource\n \t * should be registered after the hash filed be update.\n \t */\n@@ -14604,20 +14609,18 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,\n  * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share\n  * same slot in mlx5_rss_hash_fields.\n  *\n- * @param[in] rss\n- *   Pointer to the shared action RSS conf.\n+ * @param[in] rss_types\n+ *   RSS type.\n  * @param[in, out] hash_field\n  *   hash_field variable needed to be adjusted.\n  *\n  * @return\n  *   void\n  */\n-static void\n-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,\n-\t\t\t\t     uint64_t *hash_field)\n+void\n+flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,\n+\t\t\t\t   uint64_t *hash_field)\n {\n-\tuint64_t rss_types = rss->origin.types;\n-\n \tswitch (*hash_field & ~IBV_RX_HASH_INNER) {\n \tcase MLX5_RSS_HASH_IPV4:\n \t\tif (rss_types & MLX5_IPV4_LAYER_TYPES) {\n@@ -14700,12 +14703,15 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \tsize_t i;\n \tint err;\n \n-\tif (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,\n-\t\t\t\t     !!dev->data->dev_started)) {\n+\tshared_rss->ind_tbl = mlx5_ind_table_obj_new\n+\t\t\t      (dev, shared_rss->origin.queue,\n+\t\t\t       shared_rss->origin.queue_num,\n+\t\t\t       true,\n+\t\t\t       !!dev->data->dev_started);\n+\tif (!shared_rss->ind_tbl)\n \t\treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"cannot setup indirection table\");\n-\t}\n \tmemcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);\n \trss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;\n \trss_desc.const_q = shared_rss->origin.queue;\n@@ -14714,19 +14720,20 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \trss_desc.shared_rss = action_idx;\n \trss_desc.ind_tbl = shared_rss->ind_tbl;\n \tfor (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {\n-\t\tuint32_t hrxq_idx;\n+\t\tstruct mlx5_hrxq *hrxq;\n \t\tuint64_t hash_fields = mlx5_rss_hash_fields[i];\n \t\tint tunnel = 0;\n \n-\t\t__flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);\n+\t\tflow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,\n+\t\t\t\t\t\t   &hash_fields);\n \t\tif (shared_rss->origin.level > 1) {\n \t\t\thash_fields |= IBV_RX_HASH_INNER;\n \t\t\ttunnel = 1;\n \t\t}\n \t\trss_desc.tunnel = tunnel;\n \t\trss_desc.hash_fields = hash_fields;\n-\t\thrxq_idx = mlx5_hrxq_get(dev, &rss_desc);\n-\t\tif (!hrxq_idx) {\n+\t\thrxq = mlx5_hrxq_get(dev, &rss_desc);\n+\t\tif (!hrxq) {\n \t\t\trte_flow_error_set\n \t\t\t\t(error, rte_errno,\n \t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n@@ -14734,14 +14741,14 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \t\t\tgoto error_hrxq_new;\n \t\t}\n \t\terr = __flow_dv_action_rss_hrxq_set\n-\t\t\t(shared_rss, hash_fields, hrxq_idx);\n+\t\t\t(shared_rss, hash_fields, hrxq->idx);\n \t\tMLX5_ASSERT(!err);\n \t}\n \treturn 0;\n error_hrxq_new:\n \terr = rte_errno;\n \t__flow_dv_action_rss_hrxqs_release(dev, shared_rss);\n-\tif (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))\n+\tif (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))\n \t\tshared_rss->ind_tbl = NULL;\n \trte_errno = err;\n \treturn -rte_errno;\n@@ -14772,18 +14779,14 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_shared_action_rss *shared_rss = NULL;\n-\tvoid *queue = NULL;\n \tstruct rte_flow_action_rss *origin;\n \tconst uint8_t *rss_key;\n-\tuint32_t queue_size = rss->queue_num * sizeof(uint16_t);\n \tuint32_t idx;\n \n \tRTE_SET_USED(conf);\n-\tqueue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),\n-\t\t\t    0, SOCKET_ID_ANY);\n \tshared_rss = mlx5_ipool_zmalloc\n \t\t\t (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);\n-\tif (!shared_rss || !queue) {\n+\tif (!shared_rss) {\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t   \"cannot allocate resource memory\");\n@@ -14795,18 +14798,6 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n \t\t\t\t   \"rss action number out of range\");\n \t\tgoto error_rss_init;\n \t}\n-\tshared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,\n-\t\t\t\t\t  sizeof(*shared_rss->ind_tbl),\n-\t\t\t\t\t  0, SOCKET_ID_ANY);\n-\tif (!shared_rss->ind_tbl) {\n-\t\trte_flow_error_set(error, ENOMEM,\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t   \"cannot allocate resource memory\");\n-\t\tgoto error_rss_init;\n-\t}\n-\tmemcpy(queue, rss->queue, queue_size);\n-\tshared_rss->ind_tbl->queues = queue;\n-\tshared_rss->ind_tbl->queues_n = rss->queue_num;\n \torigin = &shared_rss->origin;\n \torigin->func = rss->func;\n \torigin->level = rss->level;\n@@ -14817,10 +14808,12 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n \tmemcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);\n \torigin->key = &shared_rss->key[0];\n \torigin->key_len = MLX5_RSS_HASH_KEY_LEN;\n-\torigin->queue = queue;\n+\torigin->queue = rss->queue;\n \torigin->queue_num = rss->queue_num;\n \tif (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))\n \t\tgoto error_rss_init;\n+\t/* Update queue with indirect table queue memoyr. */\n+\torigin->queue = shared_rss->ind_tbl->queues;\n \trte_spinlock_init(&shared_rss->action_rss_sl);\n \t__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);\n \trte_spinlock_lock(&priv->shared_act_sl);\n@@ -14831,12 +14824,11 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,\n error_rss_init:\n \tif (shared_rss) {\n \t\tif (shared_rss->ind_tbl)\n-\t\t\tmlx5_free(shared_rss->ind_tbl);\n+\t\t\tmlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,\n+\t\t\t\t\t\t   !!dev->data->dev_started);\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t\t\tidx);\n \t}\n-\tif (queue)\n-\t\tmlx5_free(queue);\n \treturn 0;\n }\n \n@@ -14864,7 +14856,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);\n \tuint32_t old_refcnt = 1;\n \tint remaining;\n-\tuint16_t *queue = NULL;\n \n \tif (!shared_rss)\n \t\treturn rte_flow_error_set(error, EINVAL,\n@@ -14883,8 +14874,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"shared rss hrxq has references\");\n-\tqueue = shared_rss->ind_tbl->queues;\n-\tremaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,\n+\tremaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,\n \t\t\t\t\t       !!dev->data->dev_started);\n \tif (remaining)\n \t\treturn rte_flow_error_set(error, EBUSY,\n@@ -14892,7 +14882,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n \t\t\t\t\t  NULL,\n \t\t\t\t\t  \"shared rss indirection table has\"\n \t\t\t\t\t  \" references\");\n-\tmlx5_free(queue);\n \trte_spinlock_lock(&priv->shared_act_sl);\n \tILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],\n \t\t     &priv->rss_shared_actions, idx, shared_rss, next);\n@@ -15071,7 +15060,7 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,\n \t    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);\n \tint ret = 0;\n \tvoid *queue = NULL;\n-\tuint16_t *queue_old = NULL;\n+\tvoid *queue_i = NULL;\n \tuint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);\n \tbool dev_started = !!dev->data->dev_started;\n \n@@ -15094,22 +15083,23 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,\n \tmemcpy(queue, action_conf->queue, queue_size);\n \tMLX5_ASSERT(shared_rss->ind_tbl);\n \trte_spinlock_lock(&shared_rss->action_rss_sl);\n-\tqueue_old = shared_rss->ind_tbl->queues;\n+\tqueue_i = shared_rss->ind_tbl->queues;\n \tret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,\n \t\t\t\t\tqueue, action_conf->queue_num,\n \t\t\t\t\ttrue /* standalone */,\n \t\t\t\t\tdev_started /* ref_new_qs */,\n \t\t\t\t\tdev_started /* deref_old_qs */);\n \tif (ret) {\n-\t\tmlx5_free(queue);\n \t\tret = rte_flow_error_set(error, rte_errno,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"cannot update indirection table\");\n \t} else {\n-\t\tmlx5_free(queue_old);\n-\t\tshared_rss->origin.queue = queue;\n+\t\t/* Restore the queue to indirect table internal queue. */\n+\t\tmemcpy(queue_i, queue, queue_size);\n+\t\tshared_rss->ind_tbl->queues = queue_i;\n \t\tshared_rss->origin.queue_num = action_conf->queue_num;\n \t}\n+\tmlx5_free(queue);\n \trte_spinlock_unlock(&shared_rss->action_rss_sl);\n \treturn ret;\n }\n@@ -16845,11 +16835,12 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,\n \tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n \t\tif (!rss_desc[i])\n \t\t\tcontinue;\n-\t\thrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);\n-\t\tif (!hrxq_idx[i]) {\n+\t\thrxq = mlx5_hrxq_get(dev, rss_desc[i]);\n+\t\tif (!hrxq) {\n \t\t\trte_spinlock_unlock(&mtr_policy->sl);\n \t\t\treturn NULL;\n \t\t}\n+\t\thrxq_idx[i] = hrxq->idx;\n \t}\n \tsub_policy_num = (mtr_policy->sub_policy_num >>\n \t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex f320d0db8c..0d49ab0bb2 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -7,6 +7,7 @@\n #include <mlx5_malloc.h>\n #include \"mlx5_defs.h\"\n #include \"mlx5_flow.h\"\n+#include \"mlx5_rx.h\"\n \n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \n@@ -95,6 +96,56 @@ flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)\n \tmlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);\n }\n \n+/**\n+ * Register queue/RSS action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] hws_flags\n+ *   DR action flags.\n+ * @param[in] action\n+ *   rte flow action.\n+ *\n+ * @return\n+ *    Table on success, NULL otherwise and rte_errno is set.\n+ */\n+static inline struct mlx5_hrxq*\n+flow_hw_tir_action_register(struct rte_eth_dev *dev,\n+\t\t\t    uint32_t hws_flags,\n+\t\t\t    const struct rte_flow_action *action)\n+{\n+\tstruct mlx5_flow_rss_desc rss_desc = {\n+\t\t.hws_flags = hws_flags,\n+\t};\n+\tstruct mlx5_hrxq *hrxq;\n+\n+\tif (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tconst struct rte_flow_action_queue *queue = action->conf;\n+\n+\t\trss_desc.const_q = &queue->index;\n+\t\trss_desc.queue_num = 1;\n+\t} else {\n+\t\tconst struct rte_flow_action_rss *rss = action->conf;\n+\n+\t\trss_desc.queue_num = rss->queue_num;\n+\t\trss_desc.const_q = rss->queue;\n+\t\tmemcpy(rss_desc.key,\n+\t\t       !rss->key ? rss_hash_default_key : rss->key,\n+\t\t       MLX5_RSS_HASH_KEY_LEN);\n+\t\trss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;\n+\t\trss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;\n+\t\tflow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);\n+\t\tflow_dv_action_rss_l34_hash_adjust(rss->types,\n+\t\t\t\t\t\t   &rss_desc.hash_fields);\n+\t\tif (rss->level > 1) {\n+\t\t\trss_desc.hash_fields |= IBV_RX_HASH_INNER;\n+\t\t\trss_desc.tunnel = 1;\n+\t\t}\n+\t}\n+\thrxq = mlx5_hrxq_get(dev, &rss_desc);\n+\treturn hrxq;\n+}\n+\n /**\n  * Destroy DR actions created by action template.\n  *\n@@ -266,6 +317,40 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t}\n \t\t\ti++;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tif (masks->conf) {\n+\t\t\t\tacts->tir = flow_hw_tir_action_register\n+\t\t\t\t(dev,\n+\t\t\t\t mlx5_hw_act_flag[!!attr->group][type],\n+\t\t\t\t actions);\n+\t\t\t\tif (!acts->tir)\n+\t\t\t\t\tgoto err;\n+\t\t\t\tacts->rule_acts[i].action =\n+\t\t\t\t\tacts->tir->action;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, i)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\ti++;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\tif (masks->conf) {\n+\t\t\t\tacts->tir = flow_hw_tir_action_register\n+\t\t\t\t(dev,\n+\t\t\t\t mlx5_hw_act_flag[!!attr->group][type],\n+\t\t\t\t actions);\n+\t\t\t\tif (!acts->tir)\n+\t\t\t\t\tgoto err;\n+\t\t\t\tacts->rule_acts[i].action =\n+\t\t\t\t\tacts->tir->action;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, i)) {\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\ti++;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -319,6 +404,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tstruct rte_flow_attr attr = {\n \t\t\t.ingress = 1,\n \t};\n+\tuint32_t ft_flag;\n \n \tmemcpy(rule_acts, hw_acts->rule_acts,\n \t       sizeof(*rule_acts) * hw_acts->acts_num);\n@@ -326,6 +412,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tif (LIST_EMPTY(&hw_acts->act_list))\n \t\treturn 0;\n \tattr.group = table->grp->group_id;\n+\tft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];\n \tif (table->type == MLX5DR_TABLE_TYPE_FDB) {\n \t\tattr.transfer = 1;\n \t\tattr.ingress = 1;\n@@ -338,6 +425,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tLIST_FOREACH(act_data, &hw_acts->act_list, next) {\n \t\tuint32_t jump_group;\n \t\tstruct mlx5_hw_jump_action *jump;\n+\t\tstruct mlx5_hrxq *hrxq;\n \n \t\taction = &actions[act_data->action_src];\n \t\tMLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||\n@@ -359,6 +447,17 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tjob->flow->jump = jump;\n \t\t\tjob->flow->fate_type = MLX5_FLOW_FATE_JUMP;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\thrxq = flow_hw_tir_action_register(dev,\n+\t\t\t\t\tft_flag,\n+\t\t\t\t\taction);\n+\t\t\tif (!hrxq)\n+\t\t\t\treturn -1;\n+\t\t\trule_acts[act_data->action_dst].action = hrxq->action;\n+\t\t\tjob->flow->hrxq = hrxq;\n+\t\t\tjob->flow->fate_type = MLX5_FLOW_FATE_QUEUE;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -565,6 +664,8 @@ flow_hw_pull(struct rte_eth_dev *dev,\n \t\tif (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {\n \t\t\tif (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)\n \t\t\t\tflow_hw_jump_release(dev, job->flow->jump);\n+\t\t\telse if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)\n+\t\t\t\tmlx5_hrxq_obj_release(dev, job->flow->hrxq);\n \t\t\tmlx5_ipool_free(job->flow->table->flow, job->flow->idx);\n \t\t}\n \t\tpriv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex 90ccb9aaff..f08aa7a770 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -1943,7 +1943,6 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\tMLX5_ASSERT(priv->drop_queue.hrxq);\n \t\t\thrxq = priv->drop_queue.hrxq;\n \t\t} else {\n-\t\t\tuint32_t hrxq_idx;\n \t\t\tstruct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;\n \n \t\t\tMLX5_ASSERT(rss_desc->queue_num);\n@@ -1952,9 +1951,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\trss_desc->tunnel = !!(handle->layers &\n \t\t\t\t\t      MLX5_FLOW_LAYER_TUNNEL);\n \t\t\trss_desc->shared_rss = 0;\n-\t\t\thrxq_idx = mlx5_hrxq_get(dev, rss_desc);\n-\t\t\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n-\t\t\t\t\t      hrxq_idx);\n+\t\t\thrxq = mlx5_hrxq_get(dev, rss_desc);\n \t\t\tif (!hrxq) {\n \t\t\t\trte_flow_error_set\n \t\t\t\t\t(error, rte_errno,\n@@ -1962,7 +1959,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\t\t\t \"cannot get hash queue\");\n \t\t\t\tgoto error;\n \t\t\t}\n-\t\t\thandle->rix_hrxq = hrxq_idx;\n+\t\t\thandle->rix_hrxq = hrxq->idx;\n \t\t}\n \t\tMLX5_ASSERT(hrxq);\n \t\thandle->drv_flow = mlx5_glue->create_flow\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 38335fd744..295dba063b 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -224,9 +224,13 @@ int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);\n struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,\n \t\t\t\t\t\t  const uint16_t *queues,\n \t\t\t\t\t\t  uint32_t queues_n);\n+struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,\n+\t\t\t\t\t\t  const uint16_t *queues,\n+\t\t\t\t\t\t  uint32_t queues_n,\n+\t\t\t\t\t\t  bool standalone,\n+\t\t\t\t\t\t  bool ref_qs);\n int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_ind_table_obj *ind_tbl,\n-\t\t\t       bool standalone,\n \t\t\t       bool deref_rxqs);\n int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n \t\t\t     struct mlx5_ind_table_obj *ind_tbl,\n@@ -249,8 +253,9 @@ struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,\n \t\t\t\t\t   void *cb_ctx __rte_unused);\n void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,\n \t\t\t     struct mlx5_list_entry *entry);\n-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t       struct mlx5_flow_rss_desc *rss_desc);\n+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);\n int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);\n uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);\n enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 809006f66a..e7284f9da9 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -2279,8 +2279,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,\n  *   Pointer to Ethernet device.\n  * @param ind_table\n  *   Indirection table to release.\n- * @param standalone\n- *   Indirection table for Standalone queue.\n  * @param deref_rxqs\n  *   If true, then dereference RX queues related to indirection table.\n  *   Otherwise, no additional action will be taken.\n@@ -2291,7 +2289,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,\n int\n mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \t\t\t   struct mlx5_ind_table_obj *ind_tbl,\n-\t\t\t   bool standalone,\n \t\t\t   bool deref_rxqs)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -2299,7 +2296,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,\n \n \trte_rwlock_write_lock(&priv->ind_tbls_lock);\n \tret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);\n-\tif (!ret && !standalone)\n+\tif (!ret)\n \t\tLIST_REMOVE(ind_tbl, next);\n \trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n \tif (ret)\n@@ -2408,7 +2405,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,\n  * @return\n  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.\n  */\n-static struct mlx5_ind_table_obj *\n+struct mlx5_ind_table_obj *\n mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \t\t       uint32_t queues_n, bool standalone, bool ref_qs)\n {\n@@ -2416,8 +2413,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \tstruct mlx5_ind_table_obj *ind_tbl;\n \tint ret;\n \n+\t/*\n+\t * Allocate maximum queues for shared action as queue number\n+\t * maybe modified later.\n+\t */\n \tind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +\n-\t\t\t      queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);\n+\t\t\t      (standalone ? priv->rxqs_n : queues_n) *\n+\t\t\t      sizeof(uint16_t), 0, SOCKET_ID_ANY);\n \tif (!ind_tbl) {\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n@@ -2430,11 +2432,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n \t\tmlx5_free(ind_tbl);\n \t\treturn NULL;\n \t}\n-\tif (!standalone) {\n-\t\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n+\trte_rwlock_write_lock(&priv->ind_tbls_lock);\n+\tif (!standalone)\n \t\tLIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);\n-\t\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n-\t}\n+\telse\n+\t\tLIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);\n+\trte_rwlock_write_unlock(&priv->ind_tbls_lock);\n+\n \treturn ind_tbl;\n }\n \n@@ -2600,6 +2604,7 @@ mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,\n \n \treturn (hrxq->rss_key_len != rss_desc->key_len ||\n \t    memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||\n+\t    hrxq->hws_flags != rss_desc->hws_flags ||\n \t    hrxq->hash_fields != rss_desc->hash_fields ||\n \t    hrxq->ind_table->queues_n != rss_desc->queue_num ||\n \t    memcmp(hrxq->ind_table->queues, rss_desc->queue,\n@@ -2684,8 +2689,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,\n \t}\n \tif (ind_tbl != hrxq->ind_table) {\n \t\tMLX5_ASSERT(!hrxq->standalone);\n-\t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table,\n-\t\t\t\t\t   hrxq->standalone, true);\n+\t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table, true);\n \t\thrxq->ind_table = ind_tbl;\n \t}\n \thrxq->hash_fields = hash_fields;\n@@ -2695,8 +2699,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,\n \terr = rte_errno;\n \tif (ind_tbl != hrxq->ind_table) {\n \t\tMLX5_ASSERT(!hrxq->standalone);\n-\t\tmlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,\n-\t\t\t\t\t   true);\n+\t\tmlx5_ind_table_obj_release(dev, ind_tbl, true);\n \t}\n \trte_errno = err;\n \treturn -rte_errno;\n@@ -2708,12 +2711,16 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tmlx5_glue->destroy_flow_action(hrxq->action);\n+\tif (hrxq->hws_flags)\n+\t\tmlx5dr_action_destroy(hrxq->action);\n+\telse\n+\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n #endif\n \tpriv->obj_ops.hrxq_destroy(hrxq);\n \tif (!hrxq->standalone) {\n \t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table,\n-\t\t\t\t\t   hrxq->standalone, true);\n+\t\t\t\t\t   hrxq->hws_flags ?\n+\t\t\t\t\t   (!!dev->data->dev_started) : true);\n \t}\n \tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);\n }\n@@ -2757,11 +2764,12 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \tint ret;\n \n \tqueues_n = rss_desc->hash_fields ? queues_n : 1;\n-\tif (!ind_tbl)\n+\tif (!ind_tbl && !rss_desc->hws_flags)\n \t\tind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);\n \tif (!ind_tbl)\n \t\tind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,\n-\t\t\t\t\t\t standalone,\n+\t\t\t\t\t\t standalone ||\n+\t\t\t\t\t\t rss_desc->hws_flags,\n \t\t\t\t\t\t !!dev->data->dev_started);\n \tif (!ind_tbl)\n \t\treturn NULL;\n@@ -2773,6 +2781,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \thrxq->ind_table = ind_tbl;\n \thrxq->rss_key_len = rss_key_len;\n \thrxq->hash_fields = rss_desc->hash_fields;\n+\thrxq->hws_flags = rss_desc->hws_flags;\n \tmemcpy(hrxq->rss_key, rss_key, rss_key_len);\n \tret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);\n \tif (ret < 0)\n@@ -2780,7 +2789,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,\n \treturn hrxq;\n error:\n \tif (!rss_desc->ind_tbl)\n-\t\tmlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);\n+\t\tmlx5_ind_table_obj_release(dev, ind_tbl, true);\n \tif (hrxq)\n \t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n \treturn NULL;\n@@ -2834,13 +2843,13 @@ mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n  *   RSS configuration for the Rx hash queue.\n  *\n  * @return\n- *   An hash Rx queue index on success.\n+ *   An hash Rx queue on success.\n  */\n-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t       struct mlx5_flow_rss_desc *rss_desc)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq *hrxq;\n+\tstruct mlx5_hrxq *hrxq = NULL;\n \tstruct mlx5_list_entry *entry;\n \tstruct mlx5_flow_cb_ctx ctx = {\n \t\t.data = rss_desc,\n@@ -2851,12 +2860,10 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t} else {\n \t\tentry = mlx5_list_register(priv->hrxqs, &ctx);\n \t\tif (!entry)\n-\t\t\treturn 0;\n+\t\t\treturn NULL;\n \t\thrxq = container_of(entry, typeof(*hrxq), entry);\n \t}\n-\tif (hrxq)\n-\t\treturn hrxq->idx;\n-\treturn 0;\n+\treturn hrxq;\n }\n \n /**\n@@ -2865,17 +2872,15 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,\n  * @param dev\n  *   Pointer to Ethernet device.\n  * @param hrxq_idx\n- *   Index to Hash Rx queue to release.\n+ *   Hash Rx queue to release.\n  *\n  * @return\n  *   1 while a reference on it exists, 0 when freed.\n  */\n-int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq *hrxq;\n \n-\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n \tif (!hrxq)\n \t\treturn 0;\n \tif (!hrxq->standalone)\n@@ -2884,6 +2889,26 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n \treturn 0;\n }\n \n+/**\n+ * Release the hash Rx queue with index.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param hrxq_idx\n+ *   Index to Hash Rx queue to release.\n+ *\n+ * @return\n+ *   1 while a reference on it exists, 0 when freed.\n+ */\n+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq;\n+\n+\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);\n+\treturn mlx5_hrxq_obj_release(dev, hrxq);\n+}\n+\n /**\n  * Create a drop Rx Hash queue.\n  *\n",
    "prefixes": [
        "v3",
        "11/14"
    ]
}