get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92249/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92249,
    "url": "http://patches.dpdk.org/api/patches/92249/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210427104354.4112-4-lizh@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210427104354.4112-4-lizh@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210427104354.4112-4-lizh@nvidia.com",
    "date": "2021-04-27T10:43:53",
    "name": "[v8,3/4] net/mlx5: prepare sub-policy for a flow with meter",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "c55d69ba96b2ef0387cb774aa3c8ecda26e81202",
    "submitter": {
        "id": 1967,
        "url": "http://patches.dpdk.org/api/people/1967/?format=api",
        "name": "Li Zhang",
        "email": "lizh@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210427104354.4112-4-lizh@nvidia.com/mbox/",
    "series": [
        {
            "id": 16698,
            "url": "http://patches.dpdk.org/api/series/16698/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=16698",
            "date": "2021-04-27T10:43:50",
            "name": "net/mlx5: support meter policy operations",
            "version": 8,
            "mbox": "http://patches.dpdk.org/series/16698/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/92249/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/92249/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1FC55A0548;\n\tTue, 27 Apr 2021 12:44:48 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3158741230;\n\tTue, 27 Apr 2021 12:44:36 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2062.outbound.protection.outlook.com [40.107.92.62])\n by mails.dpdk.org (Postfix) with ESMTP id 75B77411C5\n for <dev@dpdk.org>; Tue, 27 Apr 2021 12:44:29 +0200 (CEST)",
            "from BN9PR03CA0505.namprd03.prod.outlook.com (2603:10b6:408:130::30)\n by CY4PR1201MB2533.namprd12.prod.outlook.com (2603:10b6:903:d9::23)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4087.25; Tue, 27 Apr\n 2021 10:44:27 +0000",
            "from BN8NAM11FT017.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:130:cafe::a3) by BN9PR03CA0505.outlook.office365.com\n (2603:10b6:408:130::30) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4065.22 via Frontend\n Transport; Tue, 27 Apr 2021 10:44:27 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n BN8NAM11FT017.mail.protection.outlook.com (10.13.177.93) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4065.21 via Frontend Transport; Tue, 27 Apr 2021 10:44:27 +0000",
            "from nvidia.com (172.20.145.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 27 Apr\n 2021 10:44:24 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=ERwwzJ+yImIbME6CUPQ4ZcylJPe7f4UZO6Ts82RJyXfN7c+8/bHP+5sHd2p+pee9OlDVzKPG7G8xDn/Kdo2sFDbzqOoaiCzohSauDrrQS9Xl/QoeLZKtjybrPQyDtdqLoUb5TRCKg3n2qV/XiH+EYXeHYSaCuFjK9pto8v2WInPc8GSZCSZkRLwKcQl/QrHY76zjYNzQpPGjJe9gob3lWg4pBh3oWwypE9MNym4BWqzMIJx/tASbdiDjaQGeOEF/7C5oaeO85ik0Mp0Uwqulm5YO5jT15QVo8kRti+4Q6jyJag+96tBk5qLZqeU2P1ooYPefdfJgM1+bntF+oo9DRw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=cVwJGGOqJ+kMtvdyOyk435FwCBV5GSg1ATBXfniGFyU=;\n b=n9oJ0Rlxo188oqYEuzsDBd13D8TqSEZx9kRlmuf4NGHVslIpJT+h4tBqMrCIXLb/Y0/FVSCoeiKF31wAt64RcCLrD3+IUKfoSwK+78JA63A3WyJFrWNsmItEvuwFYYyJBfZA6KhtkCHDhhrDrvCT9oyPC1TrENd2W+qvEbGNg9pIPXfWwFfppuopu45JgjQiPC9widO/9UNUj6KigqGcYd0YWf8SlvXz0x/9cNnM1iRzxMUNQXuR7LqN6n+ujOgcOn2I4byhmqqzrePfw1U2xH7N8YpYKKFYhqcrMLpTEwnKYMp4ohFR0rQEff2Snq2XzWCtI6HqMrCC+AppLSjO0g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=none sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=cVwJGGOqJ+kMtvdyOyk435FwCBV5GSg1ATBXfniGFyU=;\n b=CcDKMCx02MtDwaxtegZ7LzpvZ+Jam5wCKpaCwlPtgacRv0JZ61QMawVGYxwOMRutjwLd1NYlGhWxdhxJGW9k7oD45OCeeacAUbqFZr/aiqArKeewf/JefDLnLDeiesB4WCe52ohKigwEz9oZKEKdJJeZ33M7Nnws7V2qsT/aJTG5UpBuoKUvCTPEFkzvEJTCGM0u5/zboTzeFqOfLNOGCTQuEaCJbc8RTTZoOypKxacYGuVTFNUJ/1RXLnyAXP+rA/YVbVI7xAW9U0VXC7/jHiUN6W0qS+jE9eBt+rU68JkZislgztmTGwSoBz0qxhbvbR1DA2mee5DskhZbOWBFIQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; monjalon.net; dkim=none (message not signed)\n header.d=none;monjalon.net; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Li Zhang <lizh@nvidia.com>",
        "To": "<dekelp@nvidia.com>, <orika@nvidia.com>, <viacheslavo@nvidia.com>,\n <matan@nvidia.com>, <shahafs@nvidia.com>",
        "CC": "<dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,\n <roniba@nvidia.com>",
        "Date": "Tue, 27 Apr 2021 13:43:53 +0300",
        "Message-ID": "<20210427104354.4112-4-lizh@nvidia.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20210427104354.4112-1-lizh@nvidia.com>",
        "References": "<20210401081624.1482490-1-lizh@nvidia.com>\n <20210427104354.4112-1-lizh@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.145.6]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "1c9f2fca-9874-4f64-c774-08d909696e21",
        "X-MS-TrafficTypeDiagnostic": "CY4PR1201MB2533:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <CY4PR1201MB2533D67147B2AFDB3BEB2348BF419@CY4PR1201MB2533.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:3513;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n T4J28ApUkwwbPrmgjP+GR4QfUOdAv5nrfZ/wjx2s7hoXuRK1OMNJfHFNY6BHUqpHIhepjbsN5wy7jAGnu22R7wQtFogLRbUMND053gEvVCCFIKVLpg34OrhhTMP+dHh2rmWJ7SUZvYYNsym81hxf4W/A5talNlg5cF8VYZFsCL7XEbO2zBjMD9WTuZ6q4MPULPlohAsxgGkVeX9s3CAVXgP39FzfprNmHauBSsWvEssUjajiQ44kcF/CZXaB+qtFYADn4Y/VnWpI+rTzfB9ldeADP11solvK2NJ+oSE/WDffAMtRZAwDgRSk3wakkXnEQ3l/eLx1v28SxSqQJZYb/+r9LkX8TyB8QPMWmxq9htVG7PXd2RqlDLtELKDyzLg8iIy5x7m3Kxi2gaRlkB6/BrUIyxjKymH48jYDf3vKXNfG9trLO7nsx0pCWo31XCn2TpshBom2tMYVuX+hoJ12Ejwe0cwPK68JDXaw4oDReX3oxT0ILKf5pAPw8hl2hRb/eDBo2ov/gN2oGuVAIR8m8vr8obwMOU1feKPgH2Giu6EivWtOpTqZl54jJh6aj01nUQ91wDArAKhwN7I/9OqBKK1VB/dab2Mw41Afx2hWQih5Lli+bnr24VD9f1L9ZsHkimo8+gE8U1CuPvoy3grMLxIkVXULNHXbsf7KQ8qtdh0=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(136003)(376002)(346002)(39860400002)(396003)(36840700001)(46966006)(70586007)(70206006)(336012)(316002)(16526019)(186003)(86362001)(36860700001)(26005)(6286002)(2616005)(426003)(107886003)(47076005)(55016002)(83380400001)(5660300002)(8676002)(2906002)(7696005)(82310400003)(4326008)(1076003)(478600001)(8936002)(82740400003)(110136005)(36906005)(36756003)(356005)(6636002)(7636003)(54906003)(6666004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "27 Apr 2021 10:44:27.1130 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 1c9f2fca-9874-4f64-c774-08d909696e21",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT017.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY4PR1201MB2533",
        "Subject": "[dpdk-dev] [PATCH v8 3/4] net/mlx5: prepare sub-policy for a flow\n with meter",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "When a flow has a RSS action, the driver splits\neach sub flow finally is configured with\na different HW TIR action.\n\nAny RSS action configured in meter policy may cause\na split in the flow configuration.\nTo save performance, any TIR action will be configured\nin different flow table, so policy can be split to\nsub-policies per TIR in the flow creation time.\n\nCreate a function to prepare the policy and\nits sub-policies for a configured flow with meter.\n\nSigned-off-by: Li Zhang <lizh@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.h    |  10 +++\n drivers/net/mlx5/mlx5_flow_dv.c | 145 ++++++++++++++++++++++++++++++++\n 2 files changed, 155 insertions(+)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 98f6132332..a80c7903a2 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1094,6 +1094,11 @@ typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,\n typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,\n \t\t\t\tstruct mlx5_flow_meter_info *fm);\n typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);\n+typedef struct mlx5_flow_meter_sub_policy *\n+\t(*mlx5_flow_meter_sub_policy_rss_prepare_t)\n+\t\t(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);\n typedef uint32_t (*mlx5_flow_mtr_alloc_t)\n \t\t\t\t\t    (struct rte_eth_dev *dev);\n typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,\n@@ -1186,6 +1191,7 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_destroy_policy_rules_t destroy_policy_rules;\n \tmlx5_flow_create_def_policy_t create_def_policy;\n \tmlx5_flow_destroy_def_policy_t destroy_def_policy;\n+\tmlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;\n \tmlx5_flow_counter_alloc_t counter_alloc;\n \tmlx5_flow_counter_free_t counter_free;\n \tmlx5_flow_counter_query_t counter_query;\n@@ -1417,6 +1423,10 @@ int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,\n void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_flow_meter_info *fm);\n void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);\n+struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare\n+\t\t(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);\n int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);\n int mlx5_action_handle_flush(struct rte_eth_dev *dev);\n void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 6e2a3e85f7..6bccdf5b16 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -14874,6 +14874,150 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \treturn -1;\n }\n \n+/**\n+ * Find the policy table for prefix table with RSS.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] mtr_policy\n+ *   Pointer to meter policy table.\n+ * @param[in] rss_desc\n+ *   Pointer to rss_desc\n+ * @return\n+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_flow_meter_sub_policy *\n+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,\n+\t\tstruct mlx5_flow_meter_policy *mtr_policy,\n+\t\tstruct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_meter_sub_policy *sub_policy = NULL;\n+\tuint32_t sub_policy_idx = 0;\n+\tuint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};\n+\tuint32_t i, j;\n+\tstruct mlx5_hrxq *hrxq;\n+\tstruct mlx5_flow_handle dh;\n+\tstruct mlx5_meter_policy_action_container *act_cnt;\n+\tuint32_t domain = MLX5_MTR_DOMAIN_INGRESS;\n+\tuint16_t sub_policy_num;\n+\n+\trte_spinlock_lock(&mtr_policy->sl);\n+\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n+\t\tif (!rss_desc[i])\n+\t\t\tcontinue;\n+\t\thrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);\n+\t\tif (!hrxq_idx[i]) {\n+\t\t\trte_spinlock_unlock(&mtr_policy->sl);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\tsub_policy_num = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\tfor (i = 0; i < sub_policy_num;\n+\t\ti++) {\n+\t\tfor (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {\n+\t\t\tif (rss_desc[j] &&\n+\t\t\t\thrxq_idx[j] !=\n+\t\t\tmtr_policy->sub_policys[domain][i]->rix_hrxq[j])\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tif (j >= MLX5_MTR_RTE_COLORS) {\n+\t\t\t/*\n+\t\t\t * Found the sub policy table with\n+\t\t\t * the same queue per color\n+\t\t\t */\n+\t\t\trte_spinlock_unlock(&mtr_policy->sl);\n+\t\t\tfor (j = 0; j < MLX5_MTR_RTE_COLORS; j++)\n+\t\t\t\tmlx5_hrxq_release(dev, hrxq_idx[j]);\n+\t\t\treturn mtr_policy->sub_policys[domain][i];\n+\t\t}\n+\t}\n+\t/* Create sub policy. */\n+\tif (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {\n+\t\t/* Reuse the first dummy sub_policy*/\n+\t\tsub_policy = mtr_policy->sub_policys[domain][0];\n+\t\tsub_policy_idx = sub_policy->idx;\n+\t} else {\n+\t\tsub_policy = mlx5_ipool_zmalloc\n+\t\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\t\t&sub_policy_idx);\n+\t\tif (!sub_policy ||\n+\t\t\tsub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {\n+\t\t\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++)\n+\t\t\t\tmlx5_hrxq_release(dev, hrxq_idx[i]);\n+\t\t\tgoto rss_sub_policy_error;\n+\t\t}\n+\t\tsub_policy->idx = sub_policy_idx;\n+\t\tsub_policy->main_policy = mtr_policy;\n+\t}\n+\tfor (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {\n+\t\tif (!rss_desc[i])\n+\t\t\tcontinue;\n+\t\tsub_policy->rix_hrxq[i] = hrxq_idx[i];\n+\t\t/*\n+\t\t * Overwrite the last action from\n+\t\t * RSS action to Queue action.\n+\t\t */\n+\t\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n+\t\t\t      hrxq_idx[i]);\n+\t\tif (!hrxq) {\n+\t\t\tDRV_LOG(ERR, \"Failed to create policy hrxq\");\n+\t\t\tgoto rss_sub_policy_error;\n+\t\t}\n+\t\tact_cnt = &mtr_policy->act_cnt[i];\n+\t\tif (act_cnt->rix_mark || act_cnt->modify_hdr) {\n+\t\t\tmemset(&dh, 0, sizeof(struct mlx5_flow_handle));\n+\t\t\tif (act_cnt->rix_mark)\n+\t\t\t\tdh.mark = 1;\n+\t\t\tdh.fate_action = MLX5_FLOW_FATE_QUEUE;\n+\t\t\tdh.rix_hrxq = hrxq_idx[i];\n+\t\t\tflow_drv_rxq_flags_set(dev, &dh);\n+\t\t}\n+\t}\n+\tif (__flow_dv_create_policy_acts_rules(dev, mtr_policy,\n+\t\tsub_policy, domain)) {\n+\t\tDRV_LOG(ERR, \"Failed to create policy \"\n+\t\t\t\"rules per domain.\");\n+\t\tgoto rss_sub_policy_error;\n+\t}\n+\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\ti = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\t\tmtr_policy->sub_policys[domain][i] = sub_policy;\n+\t\ti++;\n+\t\tif (i > MLX5_MTR_RSS_MAX_SUB_POLICY)\n+\t\t\tgoto rss_sub_policy_error;\n+\t\tmtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));\n+\t\tmtr_policy->sub_policy_num |=\n+\t\t\t(i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);\n+\t}\n+\trte_spinlock_unlock(&mtr_policy->sl);\n+\treturn sub_policy;\n+rss_sub_policy_error:\n+\tif (sub_policy) {\n+\t\t__flow_dv_destroy_sub_policy_rules(dev, sub_policy);\n+\t\tif (sub_policy != mtr_policy->sub_policys[domain][0]) {\n+\t\t\ti = (mtr_policy->sub_policy_num >>\n+\t\t\t(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &\n+\t\t\tMLX5_MTR_SUB_POLICY_NUM_MASK;\n+\t\t\tmtr_policy->sub_policys[domain][i] = NULL;\n+\t\t\tmlx5_ipool_free\n+\t\t\t(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\t\t\tsub_policy->idx);\n+\t\t}\n+\t}\n+\tif (sub_policy_idx)\n+\t\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],\n+\t\t\tsub_policy_idx);\n+\trte_spinlock_unlock(&mtr_policy->sl);\n+\treturn NULL;\n+}\n+\n /**\n  * Validate the batch counter support in root table.\n  *\n@@ -15464,6 +15608,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {\n \t.destroy_policy_rules = flow_dv_destroy_policy_rules,\n \t.create_def_policy = flow_dv_create_def_policy,\n \t.destroy_def_policy = flow_dv_destroy_def_policy,\n+\t.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,\n \t.counter_alloc = flow_dv_counter_allocate,\n \t.counter_free = flow_dv_counter_free,\n \t.counter_query = flow_dv_counter_query,\n",
    "prefixes": [
        "v8",
        "3/4"
    ]
}