get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/107948/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 107948,
    "url": "http://patches.dpdk.org/api/patches/107948/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220222085156.27137-14-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220222085156.27137-14-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220222085156.27137-14-suanmingm@nvidia.com",
    "date": "2022-02-22T08:51:55",
    "name": "[v2,13/14] net/mlx5: add indirect action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4db04b1408e660e350026bcc5c5f67cab93fbdfd",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220222085156.27137-14-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 21784,
            "url": "http://patches.dpdk.org/api/series/21784/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21784",
            "date": "2022-02-22T08:51:42",
            "name": "net/mlx5: add hardware steering",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/21784/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/107948/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/107948/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9E20DA0350;\n\tTue, 22 Feb 2022 09:53:51 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E6770411B8;\n\tTue, 22 Feb 2022 09:52:50 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2053.outbound.protection.outlook.com [40.107.243.53])\n by mails.dpdk.org (Postfix) with ESMTP id 83CD7411CE\n for <dev@dpdk.org>; Tue, 22 Feb 2022 09:52:46 +0100 (CET)",
            "from BN9PR03CA0584.namprd03.prod.outlook.com (2603:10b6:408:10d::19)\n by BL1PR12MB5708.namprd12.prod.outlook.com (2603:10b6:208:387::14)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4995.17; Tue, 22 Feb\n 2022 08:52:44 +0000",
            "from BN8NAM11FT039.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:10d:cafe::a0) by BN9PR03CA0584.outlook.office365.com\n (2603:10b6:408:10d::19) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5017.21 via Frontend\n Transport; Tue, 22 Feb 2022 08:52:44 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n BN8NAM11FT039.mail.protection.outlook.com (10.13.177.169) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4995.15 via Frontend Transport; Tue, 22 Feb 2022 08:52:44 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Tue, 22 Feb 2022 08:52:43 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.9; Tue, 22 Feb 2022\n 00:52:39 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=FOFWnOM1X8cS+GyL3t+0xvRlK14Vq8C0sJanyC5XLLuYmpQqX+13kaF33ztjyzr2mkXdL/nR8Wwns4xC+gUITRXEuUVewMRK4BCvigXpvkTLtiyy7Xc1Jfb9aVHFeYAU9z17lMfOYTRdCd4YXkaL1CrtBBPg6ip938ah28EvAXX22sifsjGHZrz88qOT+dg8M54ud/bXRk3Elv92yeeGu8tXnQCaVrXZRzlawqTX2fO9eFmxncrUpYcpYxMZpS2B0d/KSx0vR+P18rRTN+STJM+/HVKnqcYjjerq2Phr72bh8WWavYYHr8lhOxs+nltcOuxwuPNz/umxp1kImDJWzg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=hAHnDIJTj2aY1bz/5/1+sI4SQD1pRmVF0+WZjgG34i4=;\n b=MNeivTc+EHPUIP/WbKTwVjpu7GS9DU5Xc2wyRepuS2xpGSjYxMzEx8H6Lv8r0iVjvNZEtq0HNhqb8SdUrWeUl1B+Oc9vhFAomCacx19raL12YmeogOHbTglzevtHDVAUlApPIVfxpqCccMAb6A3ZFhbKv3jBzeTOJfLkDynnOmQgf5k24Vfezo9cCXHykVBgZ+HWYvmDoE33Sk6KbgDfdBtqRuSKzs2/0M6s5cnKx2b6pnmRYkeOqiDPgP81lXPGTV67zLrYhqjPpmRgCRelAFeBFybQrqaQhMG+vAuY+A9La5s28SGlcTXr6r5Bk2Hd/kuMxXPGQUZSUMd08SAmHQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=hAHnDIJTj2aY1bz/5/1+sI4SQD1pRmVF0+WZjgG34i4=;\n b=oEKZk2U6gu4XrcH3lmY8+6Gi54DXS62zrIbOUkfJODo8BWzW3aRdWADPOWCWmgeJZy6/sRBsAJG/C+X704mYBzlsBoXa5TlXQOTtQofuAQGAh5VzR8fn1tdTQNHmbQIIvKGHDoiMwi4rfChGRTPbXBFVUpLn9FaRempGsOHhjaAbg0EpNqsuDyZnvxMvHqE2jxjSwJHoqM0Y2pvrphud9KT44plPmfg5oNsq00E/tmKJm/j+VjpM2zdlH3JZQ1uDgJyH+M/TTkS9MKwyCiK5/5YTPl4HADC9nk6oQOX/Hrv8JLYdFOPS9nbs2N7WssQCKQ/OsChNrypk8I1mXs3kwQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v2 13/14] net/mlx5: add indirect action",
        "Date": "Tue, 22 Feb 2022 10:51:55 +0200",
        "Message-ID": "<20220222085156.27137-14-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220222085156.27137-1-suanmingm@nvidia.com>",
        "References": "<20220210162926.20436-1-suanmingm@nvidia.com>\n <20220222085156.27137-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail203.nvidia.com (10.129.68.9) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "fb347bef-b571-4491-a022-08d9f5e0b13c",
        "X-MS-TrafficTypeDiagnostic": "BL1PR12MB5708:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BL1PR12MB5708A0C36C25E6D13C009958C13B9@BL1PR12MB5708.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n mCeRKI098gzUCDalmoiXSuAOfmsNkxRQbip2nnZ6W5V5kZathtgkvAGX8mjas84jnnn5i7nVhzazsQZpD80iuMa+9Wjwwc6v9fQFso+YB3ubGrBQ0ZhVfKjjut5k2iT94QBlOsCjXmpcL0KdP/RlB7xaVdSdNjbiESXdoXP7bXrfsHrg5NgHX06aOCV6tCQweTOSMXN9H4j/Hk5nxnHV4lkStrSC720oiI2u4gID8hfQ2KnJvOu/9C9P7WaMhIhNLcOp0Drj9/tqkFRLRzErKobxGNeVDOu5Y/6iS+3hX6JCqwn4JfXJS427i5Tz0felQJas15MXhJV87+nLX/hn1s69ANi00pmoHlmuYd/noXt4a1JT1t/xFD57oG1+4eFoyCv9qOtSJR25osg0xv7kXOQmps1l3tcg+AGglp6B4FmwiK4i7SWLg7/fPyFRdwr/yUYQswqkIwoYUBYSEr/G5kxpeuZGkF+evB7BXePDU/pWshFEg+wGgpnPhkcJkE2GgI2PLzIIyGoXvG83ThcKdrCQRI2K/1kUpSS9Aoig4VCqmCcK1NwW/OMrdfokjxByIJrMhFxVpRjBgVR2tkfqIomVSVtkjRpDJfRw2DBMW5/Tjt/fTpB68pHmpZeAkLbocDzkbCwNZSBYL6dFnvffRs9z7lAQpG/+z1XFOfrlYvZsfwECyR4Js6FothOIy45KSoCgn3nok6mqa8wav2ZFAw==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(36840700001)(46966006)(40470700004)(336012)(508600001)(55016003)(36860700001)(30864003)(16526019)(316002)(82310400004)(70206006)(4326008)(8676002)(426003)(70586007)(2616005)(5660300002)(7696005)(26005)(186003)(6286002)(356005)(1076003)(83380400001)(40460700003)(81166007)(86362001)(8936002)(36756003)(2906002)(54906003)(47076005)(6636002)(6666004)(110136005)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "22 Feb 2022 08:52:44.2155 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n fb347bef-b571-4491-a022-08d9f5e0b13c",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT039.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BL1PR12MB5708",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "HW steering can support indirect action as well. With indirect action,\nthe flow can be created with more flexible shared RSS action selection.\nThis will can save the action template with different RSS actions.\n\nThis commit adds the flow queue operation callback for:\nrte_flow_async_action_handle_create();\nrte_flow_async_action_handle_destroy();\nrte_flow_async_action_handle_update();\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c    | 131 ++++++++++\n drivers/net/mlx5/mlx5_flow.h    |  59 +++++\n drivers/net/mlx5/mlx5_flow_dv.c |  21 +-\n drivers/net/mlx5/mlx5_flow_hw.c | 414 +++++++++++++++++++++++++++++++-\n 4 files changed, 612 insertions(+), 13 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 0b3134764d..bb4d2f6bae 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -879,6 +879,29 @@ mlx5_flow_push(struct rte_eth_dev *dev,\n \t       uint32_t queue,\n \t       struct rte_flow_error *error);\n \n+static struct rte_flow_action_handle *\n+mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t\t const struct rte_flow_indir_action_conf *conf,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t void *user_data,\n+\t\t\t\t struct rte_flow_error *error);\n+\n+static int\n+mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t\t struct rte_flow_action_handle *handle,\n+\t\t\t\t const void *update,\n+\t\t\t\t void *user_data,\n+\t\t\t\t struct rte_flow_error *error);\n+\n+static int\n+mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t  const struct rte_flow_op_attr *attr,\n+\t\t\t\t  struct rte_flow_action_handle *handle,\n+\t\t\t\t  void *user_data,\n+\t\t\t\t  struct rte_flow_error *error);\n+\n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n \t.create = mlx5_flow_create,\n@@ -911,6 +934,9 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.async_destroy = mlx5_flow_async_flow_destroy,\n \t.pull = mlx5_flow_pull,\n \t.push = mlx5_flow_push,\n+\t.async_action_handle_create = mlx5_flow_async_action_handle_create,\n+\t.async_action_handle_update = mlx5_flow_async_action_handle_update,\n+\t.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,\n };\n \n /* Tunnel information. */\n@@ -8364,6 +8390,111 @@ mlx5_flow_push(struct rte_eth_dev *dev,\n \treturn fops->push(dev, queue, error);\n }\n \n+/**\n+ * Create shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] conf\n+ *   Indirect action configuration.\n+ * @param[in] action\n+ *   rte_flow action detail.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   Action handle on success, NULL otherwise and rte_errno is set.\n+ */\n+static struct rte_flow_action_handle *\n+mlx5_flow_async_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t\t const struct rte_flow_indir_action_conf *conf,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t void *user_data,\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops =\n+\t\t\tflow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\n+\treturn fops->async_action_create(dev, queue, attr, conf, action,\n+\t\t\t\t\t user_data, error);\n+}\n+\n+/**\n+ * Update shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be updated.\n+ * @param[in] update\n+ *   Update value.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t     const struct rte_flow_op_attr *attr,\n+\t\t\t\t     struct rte_flow_action_handle *handle,\n+\t\t\t\t     const void *update,\n+\t\t\t\t     void *user_data,\n+\t\t\t\t     struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops =\n+\t\t\tflow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\n+\treturn fops->async_action_update(dev, queue, attr, handle,\n+\t\t\t\t\t update, user_data, error);\n+}\n+\n+/**\n+ * Destroy shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be destroyed.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_async_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t\t      const struct rte_flow_op_attr *attr,\n+\t\t\t\t      struct rte_flow_action_handle *handle,\n+\t\t\t\t      void *user_data,\n+\t\t\t\t      struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops =\n+\t\t\tflow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\n+\treturn fops->async_action_destroy(dev, queue, attr, handle,\n+\t\t\t\t\t  user_data, error);\n+}\n+\n /**\n  * Allocate a new memory for the counter values wrapped by all the needed\n  * management.\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex ec759c1aa4..9ac6745597 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -41,6 +41,7 @@ enum mlx5_rte_flow_action_type {\n \tMLX5_RTE_FLOW_ACTION_TYPE_AGE,\n \tMLX5_RTE_FLOW_ACTION_TYPE_COUNT,\n \tMLX5_RTE_FLOW_ACTION_TYPE_JUMP,\n+\tMLX5_RTE_FLOW_ACTION_TYPE_RSS,\n };\n \n #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 30\n@@ -1038,6 +1039,13 @@ struct mlx5_action_construct_data {\n \tuint32_t idx;  /* Data index. */\n \tuint16_t action_src; /* rte_flow_action src offset. */\n \tuint16_t action_dst; /* mlx5dr_rule_action dst offset. */\n+\tunion {\n+\t\tstruct {\n+\t\t\tuint64_t types; /* RSS hash types. */\n+\t\t\tuint32_t level; /* RSS level. */\n+\t\t\tuint32_t idx; /* Shared action index. */\n+\t\t} shared_rss;\n+\t};\n };\n \n /* Flow item template struct. */\n@@ -1046,6 +1054,7 @@ struct rte_flow_pattern_template {\n \t/* Template attributes. */\n \tstruct rte_flow_pattern_template_attr attr;\n \tstruct mlx5dr_match_template *mt; /* mlx5 match template. */\n+\tuint64_t item_flags; /* Item layer flags. */\n \tuint32_t refcnt;  /* Reference counter. */\n };\n \n@@ -1433,6 +1442,32 @@ typedef int (*mlx5_flow_push_t)\n \t\t\t uint32_t queue,\n \t\t\t struct rte_flow_error *error);\n \n+typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t const struct rte_flow_indir_action_conf *conf,\n+\t\t\t const struct rte_flow_action *action,\n+\t\t\t void *user_data,\n+\t\t\t struct rte_flow_error *error);\n+\n+typedef int (*mlx5_flow_async_action_handle_update_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t struct rte_flow_action_handle *handle,\n+\t\t\t const void *update,\n+\t\t\t void *user_data,\n+\t\t\t struct rte_flow_error *error);\n+\n+typedef int (*mlx5_flow_async_action_handle_destroy_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t uint32_t queue,\n+\t\t\t const struct rte_flow_op_attr *attr,\n+\t\t\t struct rte_flow_action_handle *handle,\n+\t\t\t void *user_data,\n+\t\t\t struct rte_flow_error *error);\n+\n struct mlx5_flow_driver_ops {\n \tmlx5_flow_validate_t validate;\n \tmlx5_flow_prepare_t prepare;\n@@ -1482,6 +1517,9 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_async_flow_destroy_t async_flow_destroy;\n \tmlx5_flow_pull_t pull;\n \tmlx5_flow_push_t push;\n+\tmlx5_flow_async_action_handle_create_t async_action_create;\n+\tmlx5_flow_async_action_handle_update_t async_action_update;\n+\tmlx5_flow_async_action_handle_destroy_t async_action_destroy;\n };\n \n /* mlx5_flow.c */\n@@ -1923,6 +1961,8 @@ void flow_dv_hashfields_set(uint64_t item_flags,\n \t\t\t    uint64_t *hash_fields);\n void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,\n \t\t\t\t\tuint64_t *hash_field);\n+uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,\n+\t\t\t\t\tconst uint64_t hash_fields);\n \n struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);\n void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);\n@@ -1973,4 +2013,23 @@ mlx5_get_tof(const struct rte_flow_item *items,\n \t     enum mlx5_tof_rule_type *rule_type);\n void\n flow_hw_resource_release(struct rte_eth_dev *dev);\n+int flow_dv_action_validate(struct rte_eth_dev *dev,\n+\t\t\t    const struct rte_flow_indir_action_conf *conf,\n+\t\t\t    const struct rte_flow_action *action,\n+\t\t\t    struct rte_flow_error *err);\n+struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,\n+\t\t      const struct rte_flow_indir_action_conf *conf,\n+\t\t      const struct rte_flow_action *action,\n+\t\t      struct rte_flow_error *err);\n+int flow_dv_action_destroy(struct rte_eth_dev *dev,\n+\t\t\t   struct rte_flow_action_handle *handle,\n+\t\t\t   struct rte_flow_error *error);\n+int flow_dv_action_update(struct rte_eth_dev *dev,\n+\t\t\t  struct rte_flow_action_handle *handle,\n+\t\t\t  const void *update,\n+\t\t\t  struct rte_flow_error *err);\n+int flow_dv_action_query(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_action_handle *handle,\n+\t\t\t void *data,\n+\t\t\t struct rte_flow_error *error);\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 15d8535e6e..b49b88a13f 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13835,9 +13835,9 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,\n  * @return\n  *   Valid hash RX queue index, otherwise 0.\n  */\n-static uint32_t\n-__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,\n-\t\t\t\t const uint64_t hash_fields)\n+uint32_t\n+flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,\n+\t\t\t       const uint64_t hash_fields)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_shared_action_rss *shared_rss =\n@@ -13965,7 +13965,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n \t\t\tstruct mlx5_hrxq *hrxq = NULL;\n \t\t\tuint32_t hrxq_idx;\n \n-\t\t\thrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,\n+\t\t\thrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,\n \t\t\t\t\t\trss_desc->shared_rss,\n \t\t\t\t\t\tdev_flow->hash_fields);\n \t\t\tif (hrxq_idx)\n@@ -14689,6 +14689,7 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \t\t\t   struct mlx5_shared_action_rss *shared_rss,\n \t\t\t   struct rte_flow_error *error)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_flow_rss_desc rss_desc = { 0 };\n \tsize_t i;\n \tint err;\n@@ -14709,6 +14710,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,\n \t/* Set non-zero value to indicate a shared RSS. */\n \trss_desc.shared_rss = action_idx;\n \trss_desc.ind_tbl = shared_rss->ind_tbl;\n+\tif (priv->sh->config.dv_flow_en == 2)\n+\t\trss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;\n \tfor (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {\n \t\tstruct mlx5_hrxq *hrxq;\n \t\tuint64_t hash_fields = mlx5_rss_hash_fields[i];\n@@ -14900,7 +14903,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,\n  *   A valid shared action handle in case of success, NULL otherwise and\n  *   rte_errno is set.\n  */\n-static struct rte_flow_action_handle *\n+struct rte_flow_action_handle *\n flow_dv_action_create(struct rte_eth_dev *dev,\n \t\t      const struct rte_flow_indir_action_conf *conf,\n \t\t      const struct rte_flow_action *action,\n@@ -14970,7 +14973,7 @@ flow_dv_action_create(struct rte_eth_dev *dev,\n  * @return\n  *   0 on success, otherwise negative errno value.\n  */\n-static int\n+int\n flow_dv_action_destroy(struct rte_eth_dev *dev,\n \t\t       struct rte_flow_action_handle *handle,\n \t\t       struct rte_flow_error *error)\n@@ -15180,7 +15183,7 @@ __flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx,\n  * @return\n  *   0 on success, otherwise negative errno value.\n  */\n-static int\n+int\n flow_dv_action_update(struct rte_eth_dev *dev,\n \t\t\tstruct rte_flow_action_handle *handle,\n \t\t\tconst void *update,\n@@ -15894,7 +15897,7 @@ flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,\n \t\t\t\t  \"counters are not available\");\n }\n \n-static int\n+int\n flow_dv_action_query(struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_action_handle *handle, void *data,\n \t\t     struct rte_flow_error *error)\n@@ -17584,7 +17587,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev)\n  * @return\n  *   0 on success, otherwise negative errno value.\n  */\n-static int\n+int\n flow_dv_action_validate(struct rte_eth_dev *dev,\n \t\t\tconst struct rte_flow_indir_action_conf *conf,\n \t\t\tconst struct rte_flow_action *action,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex a28e3c00b3..95df6e5190 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -62,6 +62,72 @@ flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)\n \tpriv->mark_enabled = enable;\n }\n \n+/**\n+ * Generate the pattern item flags.\n+ * Will be used for shared RSS action.\n+ *\n+ * @param[in] items\n+ *   Pointer to the list of items.\n+ *\n+ * @return\n+ *   Item flags.\n+ */\n+static uint64_t\n+flow_hw_rss_item_flags_get(const struct rte_flow_item items[])\n+{\n+\tuint64_t item_flags = 0;\n+\tuint64_t last_item = 0;\n+\n+\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n+\t\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\t\tint item_type = items->type;\n+\n+\t\tswitch (item_type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN_GPE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_MPLS;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GTP;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\titem_flags |= last_item;\n+\t}\n+\treturn item_flags;\n+}\n+\n /**\n  * Register destination table DR jump action.\n  *\n@@ -266,6 +332,96 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,\n \treturn 0;\n }\n \n+/**\n+ * Append shared RSS action to the dynamic action list.\n+ *\n+ * @param[in] priv\n+ *   Pointer to the port private data structure.\n+ * @param[in] acts\n+ *   Pointer to the template HW steering DR actions.\n+ * @param[in] type\n+ *   Action type.\n+ * @param[in] action_src\n+ *   Offset of source rte flow action.\n+ * @param[in] action_dst\n+ *   Offset of destination DR action.\n+ * @param[in] idx\n+ *   Shared RSS index.\n+ * @param[in] rss\n+ *   Pointer to the shared RSS info.\n+ *\n+ * @return\n+ *    0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static __rte_always_inline int\n+__flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,\n+\t\t\t\t     struct mlx5_hw_actions *acts,\n+\t\t\t\t     enum rte_flow_action_type type,\n+\t\t\t\t     uint16_t action_src,\n+\t\t\t\t     uint16_t action_dst,\n+\t\t\t\t     uint32_t idx,\n+\t\t\t\t     struct mlx5_shared_action_rss *rss)\n+{\tstruct mlx5_action_construct_data *act_data;\n+\n+\tact_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);\n+\tif (!act_data)\n+\t\treturn -1;\n+\tact_data->shared_rss.level = rss->origin.level;\n+\tact_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :\n+\t\t\t\t     rss->origin.types;\n+\tact_data->shared_rss.idx = idx;\n+\tLIST_INSERT_HEAD(&acts->act_list, act_data, next);\n+\treturn 0;\n+}\n+\n+/**\n+ * Translate shared indirect action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev data structure.\n+ * @param[in] action\n+ *   Pointer to the shared indirect rte_flow action.\n+ * @param[in] acts\n+ *   Pointer to the template HW steering DR actions.\n+ * @param[in] action_src\n+ *   Offset of source rte flow action.\n+ * @param[in] action_dst\n+ *   Offset of destination DR action.\n+ *\n+ * @return\n+ *    0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static __rte_always_inline int\n+flow_hw_shared_action_translate(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct rte_flow_action *action,\n+\t\t\t\tstruct mlx5_hw_actions *acts,\n+\t\t\t\tuint16_t action_src,\n+\t\t\t\tuint16_t action_dst)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_shared_action_rss *shared_rss;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)action->conf;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\tuint32_t idx = act_idx &\n+\t\t       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n+\t\tshared_rss = mlx5_ipool_get\n+\t\t  (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);\n+\t\tif (!shared_rss || __flow_hw_act_data_shared_rss_append\n+\t\t    (priv, acts,\n+\t\t    (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,\n+\t\t    action_src, action_dst, idx, shared_rss))\n+\t\t\treturn -1;\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n+\t\tbreak;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Translate rte_flow actions to DR action.\n  *\n@@ -316,6 +472,20 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \tfor (i = 0; !actions_end; actions++, masks++) {\n \t\tswitch (actions->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n+\t\t\tif (!attr->group) {\n+\t\t\t\tDRV_LOG(ERR, \"Indirect action is not supported in root table.\");\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\tif (actions->conf && masks->conf) {\n+\t\t\t\tif (flow_hw_shared_action_translate\n+\t\t\t\t(dev, actions, acts, actions - action_start, i))\n+\t\t\t\t\tgoto err;\n+\t\t\t} else if (__flow_hw_act_data_general_append\n+\t\t\t\t\t(priv, acts, actions->type,\n+\t\t\t\t\t actions - action_start, i)){\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t\ti++;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n@@ -407,6 +577,115 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t  \"fail to create rte table\");\n }\n \n+/**\n+ * Get shared indirect action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev data structure.\n+ * @param[in] act_data\n+ *   Pointer to the recorded action construct data.\n+ * @param[in] item_flags\n+ *   The matcher itme_flags used for RSS lookup.\n+ * @param[in] rule_act\n+ *   Pointer to the shared action's destination rule DR action.\n+ *\n+ * @return\n+ *    0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static __rte_always_inline int\n+flow_hw_shared_action_get(struct rte_eth_dev *dev,\n+\t\t\t  struct mlx5_action_construct_data *act_data,\n+\t\t\t  const uint64_t item_flags,\n+\t\t\t  struct mlx5dr_rule_action *rule_act)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_rss_desc rss_desc = { 0 };\n+\tuint64_t hash_fields = 0;\n+\tuint32_t hrxq_idx = 0;\n+\tstruct mlx5_hrxq *hrxq = NULL;\n+\tint act_type = act_data->type;\n+\n+\tswitch (act_type) {\n+\tcase MLX5_RTE_FLOW_ACTION_TYPE_RSS:\n+\t\trss_desc.level = act_data->shared_rss.level;\n+\t\trss_desc.types = act_data->shared_rss.types;\n+\t\tflow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);\n+\t\thrxq_idx = flow_dv_action_rss_hrxq_lookup\n+\t\t\t(dev, act_data->shared_rss.idx, hash_fields);\n+\t\tif (hrxq_idx)\n+\t\t\thrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],\n+\t\t\t\t\t      hrxq_idx);\n+\t\tif (hrxq) {\n+\t\t\trule_act->action = hrxq->action;\n+\t\t\treturn 0;\n+\t\t}\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\",\n+\t\t\tact_data->type);\n+\t\tbreak;\n+\t}\n+\treturn -1;\n+}\n+\n+/**\n+ * Construct shared indirect action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev data structure.\n+ * @param[in] action\n+ *   Pointer to the shared indirect rte_flow action.\n+ * @param[in] table\n+ *   Pointer to the flow table.\n+ * @param[in] it_idx\n+ *   Item template index the action template refer to.\n+ * @param[in] rule_act\n+ *   Pointer to the shared action's destination rule DR action.\n+ *\n+ * @return\n+ *    0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static __rte_always_inline int\n+flow_hw_shared_action_construct(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct rte_flow_action *action,\n+\t\t\t\tstruct rte_flow_template_table *table,\n+\t\t\t\tconst uint8_t it_idx,\n+\t\t\t\tstruct mlx5dr_rule_action *rule_act)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_action_construct_data act_data;\n+\tstruct mlx5_shared_action_rss *shared_rss;\n+\tuint32_t act_idx = (uint32_t)(uintptr_t)action->conf;\n+\tuint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;\n+\tuint32_t idx = act_idx &\n+\t\t       ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);\n+\tuint64_t item_flags;\n+\n+\tmemset(&act_data, 0, sizeof(act_data));\n+\tswitch (type) {\n+\tcase MLX5_INDIRECT_ACTION_TYPE_RSS:\n+\t\tact_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;\n+\t\tshared_rss = mlx5_ipool_get\n+\t\t\t(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);\n+\t\tif (!shared_rss)\n+\t\t\treturn -1;\n+\t\tact_data.shared_rss.idx = idx;\n+\t\tact_data.shared_rss.level = shared_rss->origin.level;\n+\t\tact_data.shared_rss.types = !shared_rss->origin.types ?\n+\t\t\t\t\t    RTE_ETH_RSS_IP :\n+\t\t\t\t\t    shared_rss->origin.types;\n+\t\titem_flags = table->its[it_idx]->item_flags;\n+\t\tif (flow_hw_shared_action_get\n+\t\t\t\t(dev, &act_data, item_flags, rule_act))\n+\t\t\treturn -1;\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(WARNING, \"Unsupported shared action type:%d\", type);\n+\t\tbreak;\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Construct flow action array.\n  *\n@@ -419,6 +698,8 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n  *   Pointer to job descriptor.\n  * @param[in] hw_acts\n  *   Pointer to translated actions from template.\n+ * @param[in] it_idx\n+ *   Item template index the action template refer to.\n  * @param[in] actions\n  *   Array of rte_flow action need to be checked.\n  * @param[in] rule_acts\n@@ -432,7 +713,8 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n static __rte_always_inline int\n flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t  struct mlx5_hw_q_job *job,\n-\t\t\t  struct mlx5_hw_actions *hw_acts,\n+\t\t\t  const struct mlx5_hw_actions *hw_acts,\n+\t\t\t  const uint8_t it_idx,\n \t\t\t  const struct rte_flow_action actions[],\n \t\t\t  struct mlx5dr_rule_action *rule_acts,\n \t\t\t  uint32_t *acts_num)\n@@ -464,14 +746,19 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \tLIST_FOREACH(act_data, &hw_acts->act_list, next) {\n \t\tuint32_t jump_group;\n \t\tuint32_t tag;\n+\t\tuint64_t item_flags;\n \t\tstruct mlx5_hw_jump_action *jump;\n \t\tstruct mlx5_hrxq *hrxq;\n \n \t\taction = &actions[act_data->action_src];\n \t\tMLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||\n \t\t\t    (int)action->type == act_data->type);\n-\t\tswitch (action->type) {\n+\t\tswitch (act_data->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_INDIRECT:\n+\t\t\tif (flow_hw_shared_action_construct\n+\t\t\t\t\t(dev, action, table, it_idx,\n+\t\t\t\t\t &rule_acts[act_data->action_dst]))\n+\t\t\t\treturn -1;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n@@ -504,6 +791,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tjob->flow->hrxq = hrxq;\n \t\t\tjob->flow->fate_type = MLX5_FLOW_FATE_QUEUE;\n \t\t\tbreak;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\titem_flags = table->its[it_idx]->item_flags;\n+\t\t\tif (flow_hw_shared_action_get\n+\t\t\t\t(dev, act_data, item_flags,\n+\t\t\t\t &rule_acts[act_data->action_dst]))\n+\t\t\t\treturn -1;\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -589,8 +883,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \trule_attr.user_data = job;\n \thw_acts = &table->ats[action_template_index].acts;\n \t/* Construct the flow action array based on the input actions.*/\n-\tflow_hw_actions_construct(dev, job, hw_acts, actions,\n-\t\t\t\t  rule_acts, &acts_num);\n+\tflow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,\n+\t\t\t\t  actions, rule_acts, &acts_num);\n \tret = mlx5dr_rule_create(table->matcher,\n \t\t\t\t pattern_template_index, items,\n \t\t\t\t rule_acts, acts_num,\n@@ -1237,6 +1531,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,\n \t\t\t\t   \"cannot create match template\");\n \t\treturn NULL;\n \t}\n+\tit->item_flags = flow_hw_rss_item_flags_get(items);\n \t__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);\n \tLIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);\n \treturn it;\n@@ -1685,6 +1980,109 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tpriv->nb_queue = 0;\n }\n \n+/**\n+ * Create shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] conf\n+ *   Indirect action configuration.\n+ * @param[in] action\n+ *   rte_flow action detail.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   Action handle on success, NULL otherwise and rte_errno is set.\n+ */\n+static struct rte_flow_action_handle *\n+flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t     const struct rte_flow_op_attr *attr,\n+\t\t\t     const struct rte_flow_indir_action_conf *conf,\n+\t\t\t     const struct rte_flow_action *action,\n+\t\t\t     void *user_data,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tRTE_SET_USED(queue);\n+\tRTE_SET_USED(attr);\n+\tRTE_SET_USED(user_data);\n+\treturn flow_dv_action_create(dev, conf, action, error);\n+}\n+\n+/**\n+ * Update shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be updated.\n+ * @param[in] update\n+ *   Update value.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t     const struct rte_flow_op_attr *attr,\n+\t\t\t     struct rte_flow_action_handle *handle,\n+\t\t\t     const void *update,\n+\t\t\t     void *user_data,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tRTE_SET_USED(queue);\n+\tRTE_SET_USED(attr);\n+\tRTE_SET_USED(user_data);\n+\treturn flow_dv_action_update(dev, handle, update, error);\n+}\n+\n+/**\n+ * Destroy shared action.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] queue\n+ *   Which queue to be used..\n+ * @param[in] attr\n+ *   Operation attribute.\n+ * @param[in] handle\n+ *   Action handle to be destroyed.\n+ * @param[in] user_data\n+ *   Pointer to the user_data.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, negative value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n+\t\t\t      const struct rte_flow_op_attr *attr,\n+\t\t\t      struct rte_flow_action_handle *handle,\n+\t\t\t      void *user_data,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tRTE_SET_USED(queue);\n+\tRTE_SET_USED(attr);\n+\tRTE_SET_USED(user_data);\n+\treturn flow_dv_action_destroy(dev, handle, error);\n+}\n+\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -1698,6 +2096,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.async_flow_destroy = flow_hw_async_flow_destroy,\n \t.pull = flow_hw_pull,\n \t.push = flow_hw_push,\n+\t.async_action_create = flow_hw_action_handle_create,\n+\t.async_action_destroy = flow_hw_action_handle_destroy,\n+\t.async_action_update = flow_hw_action_handle_update,\n+\t.action_validate = flow_dv_action_validate,\n+\t.action_create = flow_dv_action_create,\n+\t.action_destroy = flow_dv_action_destroy,\n+\t.action_update = flow_dv_action_update,\n+\t.action_query = flow_dv_action_query,\n };\n \n #endif\n",
    "prefixes": [
        "v2",
        "13/14"
    ]
}