get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118822/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118822,
    "url": "http://patches.dpdk.org/api/patches/118822/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-17-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020155749.16643-17-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020155749.16643-17-valex@nvidia.com",
    "date": "2022-10-20T15:57:46",
    "name": "[v6,16/18] net/mlx5/hws: Add HWS action object",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "63a7463a9ff1395db32925fe851f098edeab61b8",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-17-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25345,
            "url": "http://patches.dpdk.org/api/series/25345/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25345",
            "date": "2022-10-20T15:57:30",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/25345/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118822/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/118822/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 38565A0553;\n\tThu, 20 Oct 2022 18:00:59 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 318D642BFB;\n\tThu, 20 Oct 2022 17:59:41 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2061.outbound.protection.outlook.com [40.107.223.61])\n by mails.dpdk.org (Postfix) with ESMTP id ECB5842B80\n for <dev@dpdk.org>; Thu, 20 Oct 2022 17:59:38 +0200 (CEST)",
            "from DM6PR21CA0002.namprd21.prod.outlook.com (2603:10b6:5:174::12)\n by CY8PR12MB7681.namprd12.prod.outlook.com (2603:10b6:930:84::19) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.33; Thu, 20 Oct\n 2022 15:59:35 +0000",
            "from DM6NAM11FT006.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:174:cafe::13) by DM6PR21CA0002.outlook.office365.com\n (2603:10b6:5:174::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5769.6 via Frontend\n Transport; Thu, 20 Oct 2022 15:59:35 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT006.mail.protection.outlook.com (10.13.173.104) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5746.16 via Frontend Transport; Thu, 20 Oct 2022 15:59:35 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Thu, 20 Oct\n 2022 08:59:28 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Thu, 20 Oct\n 2022 08:59:26 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=gzfgoydR2TXj/pGxPV8hUhQIDE3TeiDGhSGuQuYUMHs4YkXaLxb8Q628jc+DrtDfBMYLMBgyWWFT4Bp/7xZfQZf7W41NYCkGiIQXWzlRc/BfX8xewEUBNRkUVlrCisedqOfW+I+JpLstpgPx2dX/rrFJAsFSVi88VxknFfzMz/+QmJxvL3vKvgk2Zt/OO3t9WnYtM5XaX/yofmTZXBLvEE65HZ9GVOOrt7cZtuAfym/7GKDRl8ZghbzuHMCIGldBWnkR48KI+8LNxlLoAsWYcjYIXNDf5OvZ65DlgxUssSSClyUOpYG+albfJOjJRa4NqVc0afg/HaHe2HXbffQ35w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=EPpEl7lmwyNEQqtJGLeRYD8wZtWXAyGo8iKMu5XEsKw=;\n b=je4H6R/JkiZ+In4umnZ/UlSJQRBELU/VsbiVsgPDodYvcoslSZZ65a+RtBSyZz3yROoNITGQDa3ntokk4PTDo1XnHFaxrYnjulphFfsDAKho/tSW6ipfayL7X/v3WgW+oTJ6FsgNDCOnOUxP3/IAzDrclr+wE5zTgavOs/fz5C51zvsd/wnu9UXZEBGslniYcBW0THhdnN1dj/WkXXWJpIhtXcfggX4d0lLYA4uySXsJ4lKUTtPG9O+sC6fYpzeAeQ6euwd3nhchx/rGyqk6Mu+s3tj9CNCTJp4a4JpRAJFNlmL+d0VVbNAbbyKc90PkyAHFjci2vlTB8aX4lk8EWg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=none (sender ip is\n 216.228.117.160) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=fail (p=reject sp=reject pct=100) action=oreject\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=EPpEl7lmwyNEQqtJGLeRYD8wZtWXAyGo8iKMu5XEsKw=;\n b=iPFXVFFqWDq4WnVXUtmQPb0vKGMtbuRJz02CN5HbhoTLbNaTYcTiSKDb9SYgT7Jn3asccUWKwUyxJGyvv0iw8n3ReGYCjUhocT6bXQeqzZ8ER8if250FaQJ+/uPd5D6BOfze2VFfTwT/nUheaGwRSNvFcuQi8QjOYBCywF0OlEmE5ieTSPhPUK5KIagKi5lbpQs3Ze0OWT3wSVapMVBSHNhnc9o81BxeCsKICYeGaPZxqv7+njbeO4b209Dexe766xujoU4JAeW/Gg23CRobGoqTUJF16lgcItoz61/IwD2jVnUS3pa1SyjeXraDSRghufERy5dzn4Mpph6KE6Mwrg==",
        "X-MS-Exchange-Authentication-Results": "spf=none (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=fail action=oreject header.from=nvidia.com;",
        "Received-SPF": "None (protection.outlook.com: nvidia.com does not designate\n permitted sender hosts)",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>",
        "Subject": "[v6 16/18] net/mlx5/hws: Add HWS action object",
        "Date": "Thu, 20 Oct 2022 18:57:46 +0300",
        "Message-ID": "<20221020155749.16643-17-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221020155749.16643-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221020155749.16643-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT006:EE_|CY8PR12MB7681:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "4b161460-5a1c-4a55-a377-08dab2b415a9",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n SJVGQKhO7ss2ss+zXRxevCJQsH1NArvxgAmitT5LOr01OfavXLvaeKD78yZodptvjglynKIQkxKaSC5U0szbdTq40XLtrQIOqhyBPe5H5uIUowsz4QL8AqcONQsKNVryO4I98Iod7bpMOy2SFP2/+HWYQzTqYkaO4u31NdQ0HW5nPwmB/r4iv3sP9Fbowzvosiu47fSYNcFFQmhF2UhXhA9SE/PYbXERPw0sEyjSq90wmrYWmV7FtXSiko+tu/brs0pMZZZlgaaNBHxCYLL1pidUJsTHvQvIrPcQL36X8u/bD6I4kGkGMrqcl+1kxovsLgTs55EpL1/QqyfplhY+c7+Q8PF3a1sxJgrv6OJ5UKo7qI68AsWhDzQ4ahtIu1rghHdfzH/LiL08L/k9+ZlAugx6+6HwO37XYWJK40RZXvP/K83+7OJa7EYHSifh7/bdXvzuqtatGuvnAuKRT+TzEHk//6OQ83NFAgrRdkfzU+oy54xeTrsyLV67UJyaBNnrGBVlvZQGiYWC/BhJUiEk/4yO3B/c+0fm0waVW/BqI6xSPdZgOHCgLeJP0dcvHr03Sr5QkUx0XpzbxgvWKMY4FVbcIrp3elVD8qviiN5lT8kRthA0h0QkaIWeXY8rc2+GtyrBK6buXF9EQa/F7KbbdzIifKj567DlqLsz/Bl6gHOmBlT3Lz07twiIMM6xYSLgIWm6p6D4gX38s6g+edRI+9jTxJRngE2p7NeJ5yT/pP9JIsd0GK1vZuRT4C9tsEw0xotNJZKWPrGOuItAeDFZeA==",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(396003)(39860400002)(376002)(346002)(136003)(451199015)(46966006)(36840700001)(40470700004)(82310400005)(7636003)(36756003)(2616005)(478600001)(30864003)(5660300002)(356005)(83380400001)(70206006)(6666004)(107886003)(47076005)(70586007)(4326008)(8676002)(55016003)(8936002)(41300700001)(7696005)(6286002)(86362001)(36860700001)(336012)(54906003)(6636002)(426003)(26005)(110136005)(16526019)(40460700003)(40480700001)(316002)(1076003)(186003)(2906002)(82740400003)(559001)(579004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Oct 2022 15:59:35.2236 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 4b161460-5a1c-4a55-a377-08dab2b415a9",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT006.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CY8PR12MB7681",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Erez Shitrit <erezsh@nvidia.com>\n\nAction objects are used for executing different HW actions\nover packets. Each action contains the HW resources and parameters\nneeded for action use over the HW when creating a rule.\n\nSigned-off-by: Erez Shitrit <erezsh@nvidia.com>\nSigned-off-by: Alex Vesker <valex@nvidia.com>\n---\n drivers/net/mlx5/hws/mlx5dr_action.c  | 2237 +++++++++++++++++++++++++\n drivers/net/mlx5/hws/mlx5dr_action.h  |  253 +++\n drivers/net/mlx5/hws/mlx5dr_pat_arg.c |  511 ++++++\n drivers/net/mlx5/hws/mlx5dr_pat_arg.h |   83 +\n 4 files changed, 3084 insertions(+)\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_action.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_action.h\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_pat_arg.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_pat_arg.h",
    "diff": "diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c\nnew file mode 100644\nindex 0000000000..755d5d09cf\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_action.c\n@@ -0,0 +1,2237 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include \"mlx5dr_internal.h\"\n+\n+#define WIRE_PORT 0xFFFF\n+\n+#define MLX5DR_ACTION_METER_INIT_COLOR_OFFSET 1\n+\n+/* This is the maximum allowed action order for each table type:\n+ *\t TX: POP_VLAN, CTR, ASO_METER, AS_CT, PUSH_VLAN, MODIFY, ENCAP, Term\n+ *\t RX: TAG, DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY,\n+ *\t     ENCAP, Term\n+ *\tFDB: DECAP, POP_VLAN, CTR, ASO_METER, ASO_CT, PUSH_VLAN, MODIFY,\n+ *\t     ENCAP, Term\n+ */\n+static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_MAX] = {\n+\t[MLX5DR_TABLE_TYPE_NIC_RX] = {\n+\t\tBIT(MLX5DR_ACTION_TYP_TAG),\n+\t\tBIT(MLX5DR_ACTION_TYP_TNL_L2_TO_L2) |\n+\t\tBIT(MLX5DR_ACTION_TYP_TNL_L3_TO_L2),\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_CTR),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_METER),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_CT),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_MODIFY_HDR),\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L2) |\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L3),\n+\t\tBIT(MLX5DR_ACTION_TYP_FT) |\n+\t\tBIT(MLX5DR_ACTION_TYP_MISS) |\n+\t\tBIT(MLX5DR_ACTION_TYP_TIR) |\n+\t\tBIT(MLX5DR_ACTION_TYP_DROP),\n+\t\tBIT(MLX5DR_ACTION_TYP_LAST),\n+\t},\n+\t[MLX5DR_TABLE_TYPE_NIC_TX] = {\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_CTR),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_METER),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_CT),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_MODIFY_HDR),\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L2) |\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L3),\n+\t\tBIT(MLX5DR_ACTION_TYP_FT) |\n+\t\tBIT(MLX5DR_ACTION_TYP_MISS) |\n+\t\tBIT(MLX5DR_ACTION_TYP_DROP),\n+\t\tBIT(MLX5DR_ACTION_TYP_LAST),\n+\t},\n+\t[MLX5DR_TABLE_TYPE_FDB] = {\n+\t\tBIT(MLX5DR_ACTION_TYP_TNL_L2_TO_L2) |\n+\t\tBIT(MLX5DR_ACTION_TYP_TNL_L3_TO_L2),\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_POP_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_CTR),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_METER),\n+\t\tBIT(MLX5DR_ACTION_TYP_ASO_CT),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_PUSH_VLAN),\n+\t\tBIT(MLX5DR_ACTION_TYP_MODIFY_HDR),\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L2) |\n+\t\tBIT(MLX5DR_ACTION_TYP_L2_TO_TNL_L3),\n+\t\tBIT(MLX5DR_ACTION_TYP_FT) |\n+\t\tBIT(MLX5DR_ACTION_TYP_MISS) |\n+\t\tBIT(MLX5DR_ACTION_TYP_VPORT) |\n+\t\tBIT(MLX5DR_ACTION_TYP_DROP),\n+\t\tBIT(MLX5DR_ACTION_TYP_LAST),\n+\t},\n+};\n+\n+static int mlx5dr_action_get_shared_stc_nic(struct mlx5dr_context *ctx,\n+\t\t\t\t\t    enum mlx5dr_context_shared_stc_type stc_type,\n+\t\t\t\t\t    uint8_t tbl_type)\n+{\n+\tstruct mlx5dr_cmd_stc_modify_attr stc_attr = {0};\n+\tstruct mlx5dr_action_shared_stc *shared_stc;\n+\tint ret;\n+\n+\tpthread_spin_lock(&ctx->ctrl_lock);\n+\tif (ctx->common_res[tbl_type].shared_stc[stc_type]) {\n+\t\trte_atomic32_add(&ctx->common_res[tbl_type].shared_stc[stc_type]->refcount, 1);\n+\t\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\t\treturn 0;\n+\t}\n+\n+\tshared_stc = simple_calloc(1, sizeof(*shared_stc));\n+\tif (!shared_stc) {\n+\t\tDR_LOG(ERR, \"Failed to allocate memory for shared STCs\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto unlock_and_out;\n+\t}\n+\tswitch (stc_type) {\n+\tcase MLX5DR_CONTEXT_SHARED_STC_DECAP:\n+\t\tstc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;\n+\t\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\t\tstc_attr.remove_header.decap = 0;\n+\t\tstc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;\n+\t\tstc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;\n+\t\tbreak;\n+\tcase MLX5DR_CONTEXT_SHARED_STC_POP:\n+\t\tstc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;\n+\t\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\t\tstc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;\n+\t\tstc_attr.remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN;\n+\t\tbreak;\n+\tdefault:\n+\t\tDR_LOG(ERR, \"No such type : stc_type\\n\");\n+\t\tassert(false);\n+\t\trte_errno = EINVAL;\n+\t\tgoto unlock_and_out;\n+\t}\n+\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &shared_stc->remove_header);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate shared decap l2 STC\");\n+\t\tgoto free_shared_stc;\n+\t}\n+\n+\tctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;\n+\n+\trte_atomic32_init(&ctx->common_res[tbl_type].shared_stc[stc_type]->refcount);\n+\trte_atomic32_set(&ctx->common_res[tbl_type].shared_stc[stc_type]->refcount, 1);\n+\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\n+\treturn 0;\n+\n+free_shared_stc:\n+\tsimple_free(shared_stc);\n+unlock_and_out:\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\treturn rte_errno;\n+}\n+\n+static void mlx5dr_action_put_shared_stc_nic(struct mlx5dr_context *ctx,\n+\t\t\t\t\t     enum mlx5dr_context_shared_stc_type stc_type,\n+\t\t\t\t\t     uint8_t tbl_type)\n+{\n+\tstruct mlx5dr_action_shared_stc *shared_stc;\n+\n+\tpthread_spin_lock(&ctx->ctrl_lock);\n+\tif (!rte_atomic32_dec_and_test(&ctx->common_res[tbl_type].shared_stc[stc_type]->refcount)) {\n+\t\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\t\treturn;\n+\t}\n+\n+\tshared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];\n+\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &shared_stc->remove_header);\n+\tsimple_free(shared_stc);\n+\tctx->common_res[tbl_type].shared_stc[stc_type] = NULL;\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+}\n+\n+static int mlx5dr_action_get_shared_stc(struct mlx5dr_action *action,\n+\t\t\t\t\tenum mlx5dr_context_shared_stc_type stc_type)\n+{\n+\tstruct mlx5dr_context *ctx = action->ctx;\n+\tint ret;\n+\n+\tif (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) {\n+\t\tassert(false);\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) {\n+\t\tret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to allocate memory for RX shared STCs (type: %d)\",\n+\t\t\t       stc_type);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) {\n+\t\tret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to allocate memory for TX shared STCs(type: %d)\",\n+\t\t\t       stc_type);\n+\t\t\tgoto clean_nic_rx_stc;\n+\t\t}\n+\t}\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) {\n+\t\tret = mlx5dr_action_get_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to allocate memory for FDB shared STCs (type: %d)\",\n+\t\t\t       stc_type);\n+\t\t\tgoto clean_nic_tx_stc;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+clean_nic_tx_stc:\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX)\n+\t\tmlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX);\n+clean_nic_rx_stc:\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX)\n+\t\tmlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX);\n+\n+\treturn ret;\n+}\n+\n+static void mlx5dr_action_put_shared_stc(struct mlx5dr_action *action,\n+\t\t\t\t\t enum mlx5dr_context_shared_stc_type stc_type)\n+{\n+\tstruct mlx5dr_context *ctx = action->ctx;\n+\n+\tif (stc_type >= MLX5DR_CONTEXT_SHARED_STC_MAX) {\n+\t\tassert(false);\n+\t\treturn;\n+\t}\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX)\n+\t\tmlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_RX);\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX)\n+\t\tmlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_NIC_TX);\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB)\n+\t\tmlx5dr_action_put_shared_stc_nic(ctx, stc_type, MLX5DR_TABLE_TYPE_FDB);\n+}\n+\n+static void mlx5dr_action_print_combo(enum mlx5dr_action_type *user_actions)\n+{\n+\tDR_LOG(ERR, \"Invalid action_type sequence\");\n+\twhile (*user_actions != MLX5DR_ACTION_TYP_LAST) {\n+\t\tDR_LOG(ERR, \"%s\", mlx5dr_debug_action_type_to_str(*user_actions));\n+\t\tuser_actions++;\n+\t}\n+}\n+\n+bool mlx5dr_action_check_combo(enum mlx5dr_action_type *user_actions,\n+\t\t\t       enum mlx5dr_table_type table_type)\n+{\n+\tconst uint32_t *order_arr = action_order_arr[table_type];\n+\tuint8_t order_idx = 0;\n+\tuint8_t user_idx = 0;\n+\tbool valid_combo;\n+\n+\twhile (order_arr[order_idx] != BIT(MLX5DR_ACTION_TYP_LAST)) {\n+\t\t/* User action order validated move to next user action */\n+\t\tif (BIT(user_actions[user_idx]) & order_arr[order_idx])\n+\t\t\tuser_idx++;\n+\n+\t\t/* Iterate to the next supported action in the order */\n+\t\torder_idx++;\n+\t}\n+\n+\t/* Combination is valid if all user action were processed */\n+\tvalid_combo = user_actions[user_idx] == MLX5DR_ACTION_TYP_LAST;\n+\tif (!valid_combo)\n+\t\tmlx5dr_action_print_combo(user_actions);\n+\n+\treturn valid_combo;\n+}\n+\n+int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[],\n+\t\t\t\t  uint32_t num_actions,\n+\t\t\t\t  struct mlx5dv_flow_action_attr *attr)\n+{\n+\tstruct mlx5dr_action *action;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < num_actions; i++) {\n+\t\taction = rule_actions[i].action;\n+\n+\t\tswitch (action->type) {\n+\t\tcase MLX5DR_ACTION_TYP_FT:\n+\t\tcase MLX5DR_ACTION_TYP_TIR:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX;\n+\t\t\tattr[i].obj = action->devx_obj;\n+\t\t\tbreak;\n+\t\tcase MLX5DR_ACTION_TYP_TAG:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_TAG;\n+\t\t\tattr[i].tag_value = rule_actions[i].tag.value;\n+\t\t\tbreak;\n+#ifdef HAVE_MLX5_DR_CREATE_ACTION_DEFAULT_MISS\n+\t\tcase MLX5DR_ACTION_TYP_MISS:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_DEFAULT_MISS;\n+\t\t\tbreak;\n+#endif\n+\t\tcase MLX5DR_ACTION_TYP_DROP:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_DROP;\n+\t\t\tbreak;\n+\t\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\t\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\t\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\tcase MLX5DR_ACTION_TYP_MODIFY_HDR:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n+\t\t\tattr[i].action = action->flow_action;\n+\t\t\tbreak;\n+#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS\n+\t\tcase MLX5DR_ACTION_TYP_CTR:\n+\t\t\tattr[i].type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;\n+\t\t\tattr[i].obj = action->devx_obj;\n+\n+\t\t\tif (rule_actions[i].counter.offset) {\n+\t\t\t\tDR_LOG(ERR, \"Counter offset not supported over root\");\n+\t\t\t\trte_errno = ENOTSUP;\n+\t\t\t\treturn rte_errno;\n+\t\t\t}\n+\t\t\tbreak;\n+#endif\n+\t\tdefault:\n+\t\t\tDR_LOG(ERR, \"Found unsupported action type: %d\", action->type);\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static bool mlx5dr_action_fixup_stc_attr(struct mlx5dr_cmd_stc_modify_attr *stc_attr,\n+\t\t\t\t\t struct mlx5dr_cmd_stc_modify_attr *fixup_stc_attr,\n+\t\t\t\t\t enum mlx5dr_table_type table_type,\n+\t\t\t\t\t bool is_mirror)\n+{\n+\tstruct mlx5dr_devx_obj *devx_obj;\n+\tbool use_fixup = false;\n+\tuint32_t fw_tbl_type;\n+\n+\tfw_tbl_type = mlx5dr_table_get_res_fw_ft_type(table_type, is_mirror);\n+\n+\tswitch (stc_attr->action_type) {\n+\tcase MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:\n+\t\tif (!is_mirror)\n+\t\t\tdevx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_attr->ste_table.ste_pool,\n+\t\t\t\t\t\t\t\t       &stc_attr->ste_table.ste);\n+\t\telse\n+\t\t\tdevx_obj =\n+\t\t\tmlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_attr->ste_table.ste_pool,\n+\t\t\t\t\t\t\t\t   &stc_attr->ste_table.ste);\n+\n+\t\t*fixup_stc_attr = *stc_attr;\n+\t\tfixup_stc_attr->ste_table.ste_obj_id = devx_obj->id;\n+\t\tuse_fixup = true;\n+\t\tbreak;\n+\n+\tcase MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:\n+\t\tif (stc_attr->vport.vport_num != WIRE_PORT)\n+\t\t\tbreak;\n+\n+\t\tif (fw_tbl_type == FS_FT_FDB_RX) {\n+\t\t\t/* The FW doesn't allow to go back to wire in RX, so change it to DROP */\n+\t\t\tfixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;\n+\t\t\tfixup_stc_attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\t\tfixup_stc_attr->stc_offset = stc_attr->stc_offset;\n+\t\t} else if (fw_tbl_type == FS_FT_FDB_TX) {\n+\t\t\t/*The FW doesn't allow to go to wire in the TX by JUMP_TO_VPORT*/\n+\t\t\tfixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;\n+\t\t\tfixup_stc_attr->action_offset = stc_attr->action_offset;\n+\t\t\tfixup_stc_attr->stc_offset = stc_attr->stc_offset;\n+\t\t\tfixup_stc_attr->vport.vport_num = 0;\n+\t\t\tfixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;\n+\t\t}\n+\t\tuse_fixup = true;\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn use_fixup;\n+}\n+\n+int mlx5dr_action_alloc_single_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   struct mlx5dr_cmd_stc_modify_attr *stc_attr,\n+\t\t\t\t   uint32_t table_type,\n+\t\t\t\t   struct mlx5dr_pool_chunk *stc)\n+{\n+\tstruct mlx5dr_cmd_stc_modify_attr cleanup_stc_attr = {0};\n+\tstruct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type];\n+\tstruct mlx5dr_cmd_stc_modify_attr fixup_stc_attr = {0};\n+\tstruct mlx5dr_devx_obj *devx_obj_0;\n+\tbool use_fixup;\n+\tint ret;\n+\n+\tret = mlx5dr_pool_chunk_alloc(stc_pool, stc);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate single action STC\");\n+\t\treturn ret;\n+\t}\n+\n+\tstc_attr->stc_offset = stc->offset;\n+\tdevx_obj_0 = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc);\n+\n+\t/* According to table/action limitation change the stc_attr */\n+\tuse_fixup = mlx5dr_action_fixup_stc_attr(stc_attr, &fixup_stc_attr, table_type, false);\n+\tret = mlx5dr_cmd_stc_modify(devx_obj_0, use_fixup ? &fixup_stc_attr : stc_attr);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to modify STC action_type %d tbl_type %d\",\n+\t\t       stc_attr->action_type, table_type);\n+\t\tgoto free_chunk;\n+\t}\n+\n+\t/* Modify the FDB peer */\n+\tif (table_type == MLX5DR_TABLE_TYPE_FDB) {\n+\t\tstruct mlx5dr_devx_obj *devx_obj_1;\n+\n+\t\tdevx_obj_1 = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc);\n+\n+\t\tuse_fixup = mlx5dr_action_fixup_stc_attr(stc_attr, &fixup_stc_attr,\n+\t\t\t\t\t\t\t table_type, true);\n+\t\tret = mlx5dr_cmd_stc_modify(devx_obj_1, use_fixup ? &fixup_stc_attr : stc_attr);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to modify peer STC action_type %d tbl_type %d\",\n+\t\t\t       stc_attr->action_type, table_type);\n+\t\t\tgoto clean_devx_obj_0;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+clean_devx_obj_0:\n+\tcleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;\n+\tcleanup_stc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\tcleanup_stc_attr.stc_offset = stc->offset;\n+\tmlx5dr_cmd_stc_modify(devx_obj_0, &cleanup_stc_attr);\n+free_chunk:\n+\tmlx5dr_pool_chunk_free(stc_pool, stc);\n+\treturn rte_errno;\n+}\n+\n+void mlx5dr_action_free_single_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   uint32_t table_type,\n+\t\t\t\t   struct mlx5dr_pool_chunk *stc)\n+{\n+\tstruct mlx5dr_pool *stc_pool = ctx->stc_pool[table_type];\n+\tstruct mlx5dr_cmd_stc_modify_attr stc_attr = {0};\n+\tstruct mlx5dr_devx_obj *devx_obj;\n+\n+\t/* Modify the STC not to point to an object */\n+\tstc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\tstc_attr.stc_offset = stc->offset;\n+\tdevx_obj = mlx5dr_pool_chunk_get_base_devx_obj(stc_pool, stc);\n+\tmlx5dr_cmd_stc_modify(devx_obj, &stc_attr);\n+\n+\tif (table_type == MLX5DR_TABLE_TYPE_FDB) {\n+\t\tdevx_obj = mlx5dr_pool_chunk_get_base_devx_obj_mirror(stc_pool, stc);\n+\t\tmlx5dr_cmd_stc_modify(devx_obj, &stc_attr);\n+\t}\n+\n+\tmlx5dr_pool_chunk_free(stc_pool, stc);\n+}\n+\n+static uint32_t mlx5dr_action_get_mh_stc_type(__be64 pattern)\n+{\n+\tuint8_t action_type = MLX5_GET(set_action_in, &pattern, action_type);\n+\n+\tswitch (action_type) {\n+\tcase MLX5_MODIFICATION_TYPE_SET:\n+\t\treturn MLX5_IFC_STC_ACTION_TYPE_SET;\n+\tcase MLX5_MODIFICATION_TYPE_ADD:\n+\t\treturn MLX5_IFC_STC_ACTION_TYPE_ADD;\n+\tcase MLX5_MODIFICATION_TYPE_COPY:\n+\t\treturn MLX5_IFC_STC_ACTION_TYPE_COPY;\n+\tdefault:\n+\t\tassert(false);\n+\t\tDR_LOG(ERR, \"Unsupported action type: 0x%x\\n\", action_type);\n+\t\trte_errno = ENOTSUP;\n+\t\treturn MLX5_IFC_STC_ACTION_TYPE_NOP;\n+\t}\n+}\n+\n+static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,\n+\t\t\t\t\tstruct mlx5dr_devx_obj *obj,\n+\t\t\t\t\tstruct mlx5dr_cmd_stc_modify_attr *attr)\n+{\n+\tswitch (action->type) {\n+\tcase MLX5DR_ACTION_TYP_TAG:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_DROP:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_MISS:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\t/* TODO Need to support default miss for FDB */\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_CTR:\n+\t\tattr->id = obj->id;\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW0;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TIR:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\tattr->dest_tir_num = obj->id;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\tcase MLX5DR_ACTION_TYP_MODIFY_HDR:\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tif (action->modify_header.num_of_actions == 1) {\n+\t\t\tattr->modify_action.data = action->modify_header.single_action;\n+\t\t\tattr->action_type = mlx5dr_action_get_mh_stc_type(attr->modify_action.data);\n+\n+\t\t\tif (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||\n+\t\t\t    attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)\n+\t\t\t\tMLX5_SET(set_action_in, &attr->modify_action.data, data, 0);\n+\t\t} else {\n+\t\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;\n+\t\t\tattr->modify_header.arg_id = action->modify_header.arg_obj->id;\n+\t\t\tattr->modify_header.pattern_id = action->modify_header.pattern_obj->id;\n+\t\t}\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_FT:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\tattr->dest_table_id = obj->id;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\t\tattr->remove_header.decap = 1;\n+\t\tattr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;\n+\t\tattr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tattr->insert_header.encap = 1;\n+\t\tattr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;\n+\t\tattr->insert_header.arg_id = action->reformat.arg_obj->id;\n+\t\tattr->insert_header.header_size = action->reformat.header_size;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tattr->insert_header.encap = 1;\n+\t\tattr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;\n+\t\tattr->insert_header.arg_id = action->reformat.arg_obj->id;\n+\t\tattr->insert_header.header_size = action->reformat.header_size;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_ASO_METER:\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;\n+\t\tattr->aso.aso_type = ASO_OPC_MOD_POLICER;\n+\t\tattr->aso.devx_obj_id = obj->id;\n+\t\tattr->aso.return_reg_id = action->aso.return_reg_id;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_ASO_CT:\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;\n+\t\tattr->aso.aso_type = ASO_OPC_MOD_CONNECTION_TRACKING;\n+\t\tattr->aso.devx_obj_id = obj->id;\n+\t\tattr->aso.return_reg_id = action->aso.return_reg_id;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_VPORT:\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;\n+\t\tattr->vport.vport_num = action->vport.vport_num;\n+\t\tattr->vport.esw_owner_vhca_id =\taction->vport.esw_owner_vhca_id;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_POP_VLAN:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\t\tattr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;\n+\t\tattr->remove_words.num_of_words = MLX5DR_ACTION_HDR_LEN_L2_VLAN / 2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_PUSH_VLAN:\n+\t\tattr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;\n+\t\tattr->action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\t\tattr->insert_header.encap = 0;\n+\t\tattr->insert_header.is_inline = 1;\n+\t\tattr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;\n+\t\tattr->insert_header.insert_offset = MLX5DR_ACTION_HDR_LEN_L2_MACS;\n+\t\tattr->insert_header.header_size = MLX5DR_ACTION_HDR_LEN_L2_VLAN;\n+\t\tbreak;\n+\tdefault:\n+\t\tDR_LOG(ERR, \"Invalid action type %d\", action->type);\n+\t\tassert(false);\n+\t}\n+}\n+\n+static int\n+mlx5dr_action_create_stcs(struct mlx5dr_action *action,\n+\t\t\t  struct mlx5dr_devx_obj *obj)\n+{\n+\tstruct mlx5dr_cmd_stc_modify_attr stc_attr = {0};\n+\tstruct mlx5dr_context *ctx = action->ctx;\n+\tint ret;\n+\n+\tmlx5dr_action_fill_stc_attr(action, obj, &stc_attr);\n+\n+\t/* Block unsupported parallel devx obj modify over the same base */\n+\tpthread_spin_lock(&ctx->ctrl_lock);\n+\n+\t/* Allocate STC for RX */\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX) {\n+\t\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr,\n+\t\t\t\t\t\t     MLX5DR_TABLE_TYPE_NIC_RX,\n+\t\t\t\t\t\t     &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]);\n+\t\tif (ret)\n+\t\t\tgoto out_err;\n+\t}\n+\n+\t/* Allocate STC for TX */\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX) {\n+\t\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr,\n+\t\t\t\t\t\t     MLX5DR_TABLE_TYPE_NIC_TX,\n+\t\t\t\t\t\t     &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]);\n+\t\tif (ret)\n+\t\t\tgoto free_nic_rx_stc;\n+\t}\n+\n+\t/* Allocate STC for FDB */\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB) {\n+\t\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr,\n+\t\t\t\t\t\t     MLX5DR_TABLE_TYPE_FDB,\n+\t\t\t\t\t\t     &action->stc[MLX5DR_TABLE_TYPE_FDB]);\n+\t\tif (ret)\n+\t\t\tgoto free_nic_tx_stc;\n+\t}\n+\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\n+\treturn 0;\n+\n+free_nic_tx_stc:\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX)\n+\t\tmlx5dr_action_free_single_stc(ctx,\n+\t\t\t\t\t      MLX5DR_TABLE_TYPE_NIC_TX,\n+\t\t\t\t\t      &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]);\n+free_nic_rx_stc:\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX)\n+\t\tmlx5dr_action_free_single_stc(ctx,\n+\t\t\t\t\t      MLX5DR_TABLE_TYPE_NIC_RX,\n+\t\t\t\t\t      &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]);\n+out_err:\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\treturn rte_errno;\n+}\n+\n+static void\n+mlx5dr_action_destroy_stcs(struct mlx5dr_action *action)\n+{\n+\tstruct mlx5dr_context *ctx = action->ctx;\n+\n+\t/* Block unsupported parallel devx obj modify over the same base */\n+\tpthread_spin_lock(&ctx->ctrl_lock);\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_RX)\n+\t\tmlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_RX,\n+\t\t\t\t\t      &action->stc[MLX5DR_TABLE_TYPE_NIC_RX]);\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_TX)\n+\t\tmlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_NIC_TX,\n+\t\t\t\t\t      &action->stc[MLX5DR_TABLE_TYPE_NIC_TX]);\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_HWS_FDB)\n+\t\tmlx5dr_action_free_single_stc(ctx, MLX5DR_TABLE_TYPE_FDB,\n+\t\t\t\t\t      &action->stc[MLX5DR_TABLE_TYPE_FDB]);\n+\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+}\n+\n+static bool\n+mlx5dr_action_is_root_flags(uint32_t flags)\n+{\n+\treturn flags & (MLX5DR_ACTION_FLAG_ROOT_RX |\n+\t\t\tMLX5DR_ACTION_FLAG_ROOT_TX |\n+\t\t\tMLX5DR_ACTION_FLAG_ROOT_FDB);\n+}\n+\n+static bool\n+mlx5dr_action_is_hws_flags(uint32_t flags)\n+{\n+\treturn flags & (MLX5DR_ACTION_FLAG_HWS_RX |\n+\t\t\tMLX5DR_ACTION_FLAG_HWS_TX |\n+\t\t\tMLX5DR_ACTION_FLAG_HWS_FDB);\n+}\n+\n+static struct mlx5dr_action *\n+mlx5dr_action_create_generic(struct mlx5dr_context *ctx,\n+\t\t\t     uint32_t flags,\n+\t\t\t     enum mlx5dr_action_type action_type)\n+{\n+\tstruct mlx5dr_action *action;\n+\n+\tif (!mlx5dr_action_is_root_flags(flags) &&\n+\t    !mlx5dr_action_is_hws_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Action flags must specify root or non root (HWS)\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = simple_calloc(1, sizeof(*action));\n+\tif (!action) {\n+\t\tDR_LOG(ERR, \"Failed to allocate memory for action [%d]\", action_type);\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\taction->ctx = ctx;\n+\taction->flags = flags;\n+\taction->type = action_type;\n+\n+\treturn action;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,\n+\t\t\t\tstruct mlx5dr_table *tbl,\n+\t\t\t\tuint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_table_is_root(tbl)) {\n+\t\tDR_LOG(ERR, \"Root table cannot be set as destination\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\tif (mlx5dr_action_is_hws_flags(flags) &&\n+\t    mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Same action cannot be used for root and non root\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_FT);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\taction->devx_obj = tbl->ft->obj;\n+\t} else {\n+\t\tret = mlx5dr_action_create_stcs(action, tbl->ft);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,\n+\t\t\t      struct mlx5dr_devx_obj *obj,\n+\t\t\t      uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_action_is_hws_flags(flags) &&\n+\t    mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Same action cannot be used for root and non root\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TIR);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\taction->devx_obj = obj->obj;\n+\t} else {\n+\t\tret = mlx5dr_action_create_stcs(action, obj);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_dest_drop(struct mlx5dr_context *ctx,\n+\t\t\t       uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DROP);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_hws_flags(flags)) {\n+\t\tret = mlx5dr_action_create_stcs(action, NULL);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_default_miss(struct mlx5dr_context *ctx,\n+\t\t\t\t  uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_MISS);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_hws_flags(flags)) {\n+\t\tret = mlx5dr_action_create_stcs(action, NULL);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_tag(struct mlx5dr_context *ctx,\n+\t\t\t uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_TAG);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_hws_flags(flags)) {\n+\t\tret = mlx5dr_action_create_stcs(action, NULL);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+static struct mlx5dr_action *\n+mlx5dr_action_create_aso(struct mlx5dr_context *ctx,\n+\t\t\t enum mlx5dr_action_type action_type,\n+\t\t\t struct mlx5dr_devx_obj *devx_obj,\n+\t\t\t uint8_t return_reg_id,\n+\t\t\t uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"ASO action cannot be used over root table\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, action_type);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\taction->aso.devx_obj = devx_obj;\n+\taction->aso.return_reg_id = return_reg_id;\n+\n+\tret = mlx5dr_action_create_stcs(action, devx_obj);\n+\tif (ret)\n+\t\tgoto free_action;\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_aso_meter(struct mlx5dr_context *ctx,\n+\t\t\t       struct mlx5dr_devx_obj *devx_obj,\n+\t\t\t       uint8_t return_reg_id,\n+\t\t\t       uint32_t flags)\n+{\n+\treturn mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_METER,\n+\t\t\t\t\tdevx_obj, return_reg_id, flags);\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_aso_ct(struct mlx5dr_context *ctx,\n+\t\t\t    struct mlx5dr_devx_obj *devx_obj,\n+\t\t\t    uint8_t return_reg_id,\n+\t\t\t    uint32_t flags)\n+{\n+\treturn mlx5dr_action_create_aso(ctx, MLX5DR_ACTION_TYP_ASO_CT,\n+\t\t\t\t\tdevx_obj, return_reg_id, flags);\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_counter(struct mlx5dr_context *ctx,\n+\t\t\t     struct mlx5dr_devx_obj *obj,\n+\t\t\t     uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_action_is_hws_flags(flags) &&\n+\t    mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Same action cannot be used for root and non root\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_CTR);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\taction->devx_obj = obj->obj;\n+\t} else {\n+\t\tret = mlx5dr_action_create_stcs(action, obj);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+static int mlx5dr_action_create_dest_vport_hws(struct mlx5dr_context *ctx,\n+\t\t\t\t\t       struct mlx5dr_action *action,\n+\t\t\t\t\t       uint32_t ib_port_num)\n+{\n+\tstruct mlx5dr_cmd_query_vport_caps vport_caps = {0};\n+\tint ret;\n+\n+\tret = mlx5dr_cmd_query_ib_port(ctx->ibv_ctx, &vport_caps, ib_port_num);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed querying port %d\\n\", ib_port_num);\n+\t\treturn ret;\n+\t}\n+\taction->vport.vport_num = vport_caps.vport_num;\n+\taction->vport.esw_owner_vhca_id = vport_caps.esw_owner_vhca_id;\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed creating stc for port %d\\n\", ib_port_num);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_dest_vport(struct mlx5dr_context *ctx,\n+\t\t\t\tuint32_t ib_port_num,\n+\t\t\t\tuint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (!(flags & MLX5DR_ACTION_FLAG_HWS_FDB)) {\n+\t\tDR_LOG(ERR, \"Vport action is supported for FDB only\\n\");\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_VPORT);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tret = mlx5dr_action_create_dest_vport_hws(ctx, action, ib_port_num);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create vport action HWS\\n\");\n+\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Push vlan action not supported for root\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_PUSH_VLAN);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed creating stc for push vlan\\n\");\n+\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\tDR_LOG(ERR, \"Pop vlan action not supported for root\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_POP_VLAN);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create remove stc for reformat\");\n+\t\tgoto free_action;\n+\t}\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed creating stc for pop vlan\\n\");\n+\t\tgoto free_shared;\n+\t}\n+\n+\treturn action;\n+\n+free_shared:\n+\tmlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+static int\n+mlx5dr_action_conv_reformat_type_to_action(uint32_t reformat_type,\n+\t\t\t\t\t   enum mlx5dr_action_type *action_type)\n+{\n+\tswitch (reformat_type) {\n+\tcase MLX5DR_ACTION_REFORMAT_TYPE_TNL_L2_TO_L2:\n+\t\t*action_type = MLX5DR_ACTION_TYP_TNL_L2_TO_L2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L2:\n+\t\t*action_type = MLX5DR_ACTION_TYP_L2_TO_TNL_L2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_REFORMAT_TYPE_TNL_L3_TO_L2:\n+\t\t*action_type = MLX5DR_ACTION_TYP_TNL_L3_TO_L2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_REFORMAT_TYPE_L2_TO_TNL_L3:\n+\t\t*action_type = MLX5DR_ACTION_TYP_L2_TO_TNL_L3;\n+\t\tbreak;\n+\tdefault:\n+\t\tDR_LOG(ERR, \"Invalid reformat type requested\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\treturn 0;\n+}\n+\n+static void\n+mlx5dr_action_conv_reformat_to_verbs(uint32_t action_type,\n+\t\t\t\t     uint32_t *verb_reformat_type)\n+{\n+\tswitch (action_type) {\n+\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\t\t*verb_reformat_type =\n+\t\t\tMLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\t*verb_reformat_type =\n+\t\t\tMLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\t\t*verb_reformat_type =\n+\t\t\tMLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\t*verb_reformat_type =\n+\t\t\tMLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;\n+\t\tbreak;\n+\t}\n+}\n+\n+static int\n+mlx5dr_action_conv_flags_to_ft_type(uint32_t flags, enum mlx5dv_flow_table_type *ft_type)\n+{\n+\tif (flags & MLX5DR_ACTION_FLAG_ROOT_RX) {\n+\t\t*ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n+\t} else if (flags & MLX5DR_ACTION_FLAG_ROOT_TX) {\n+\t\t*ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;\n+#ifdef HAVE_MLX5DV_FLOW_MATCHER_FT_TYPE\n+\t} else if (flags & MLX5DR_ACTION_FLAG_ROOT_FDB) {\n+\t\t*ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;\n+#endif\n+\t} else {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_action_create_reformat_root(struct mlx5dr_action *action,\n+\t\t\t\t   size_t data_sz,\n+\t\t\t\t   void *data)\n+{\n+\tenum mlx5dv_flow_table_type ft_type = 0; /*fix compilation warn*/\n+\tuint32_t verb_reformat_type = 0;\n+\tint ret;\n+\n+\t/* Convert action to FT type and verbs reformat type */\n+\tret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type);\n+\tif (ret)\n+\t\treturn rte_errno;\n+\n+\tmlx5dr_action_conv_reformat_to_verbs(action->type, &verb_reformat_type);\n+\n+\t/* Create the reformat type for root table */\n+\taction->flow_action =\n+\t\tmlx5_glue->dv_create_flow_action_packet_reformat_root(action->ctx->ibv_ctx,\n+\t\t\t\t\t\t\t\t      data_sz,\n+\t\t\t\t\t\t\t\t      data,\n+\t\t\t\t\t\t\t\t      verb_reformat_type,\n+\t\t\t\t\t\t\t\t      ft_type);\n+\tif (!action->flow_action) {\n+\t\trte_errno = errno;\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int mlx5dr_action_handle_reformat_args(struct mlx5dr_context *ctx,\n+\t\t\t\t\t      size_t data_sz,\n+\t\t\t\t\t      void *data,\n+\t\t\t\t\t      uint32_t bulk_size,\n+\t\t\t\t\t      struct mlx5dr_action *action)\n+{\n+\tuint32_t args_log_size;\n+\tint ret;\n+\n+\tif (data_sz % 2 != 0) {\n+\t\tDR_LOG(ERR, \"Data size should be multiply of 2\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\taction->reformat.header_size = data_sz;\n+\n+\targs_log_size = mlx5dr_arg_data_size_to_arg_log_size(data_sz);\n+\tif (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) {\n+\t\tDR_LOG(ERR, \"Data size is bigger than supported\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\targs_log_size += bulk_size;\n+\n+\tif (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size)) {\n+\t\tDR_LOG(ERR, \"Arg size %d does not fit FW requests\",\n+\t\t       args_log_size);\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\taction->reformat.arg_obj = mlx5dr_cmd_arg_create(ctx->ibv_ctx,\n+\t\t\t\t\t\t\t args_log_size,\n+\t\t\t\t\t\t\t ctx->pd_num);\n+\tif (!action->reformat.arg_obj) {\n+\t\tDR_LOG(ERR, \"Failed to create arg for reformat\");\n+\t\treturn rte_errno;\n+\t}\n+\n+\t/* When INLINE need to write the arg data */\n+\tif (action->flags & MLX5DR_ACTION_FLAG_SHARED) {\n+\t\tret = mlx5dr_arg_write_inline_arg_data(ctx,\n+\t\t\t\t\t\t       action->reformat.arg_obj->id,\n+\t\t\t\t\t\t       data,\n+\t\t\t\t\t\t       data_sz);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to write inline arg for reformat\");\n+\t\t\tgoto free_arg;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+free_arg:\n+\tmlx5dr_cmd_destroy_obj(action->reformat.arg_obj);\n+\treturn ret;\n+}\n+\n+static int mlx5dr_action_handle_l2_to_tunnel_l2(struct mlx5dr_context *ctx,\n+\t\t\t\t\t\tsize_t data_sz,\n+\t\t\t\t\t\tvoid *data,\n+\t\t\t\t\t\tuint32_t bulk_size,\n+\t\t\t\t\t\tstruct mlx5dr_action *action)\n+{\n+\tint ret;\n+\n+\tret = mlx5dr_action_handle_reformat_args(ctx, data_sz, data, bulk_size,\n+\t\t\t\t\t\t action);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create args for reformat\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create stc for reformat\");\n+\t\tgoto free_arg;\n+\t}\n+\n+\treturn 0;\n+\n+free_arg:\n+\tmlx5dr_cmd_destroy_obj(action->reformat.arg_obj);\n+\treturn ret;\n+}\n+\n+static int mlx5dr_action_get_shared_stc_offset(struct mlx5dr_context_common_res *common_res,\n+\t\t\t\t\t       enum mlx5dr_context_shared_stc_type stc_type)\n+{\n+\treturn common_res->shared_stc[stc_type]->remove_header.offset;\n+}\n+\n+static int mlx5dr_action_handle_l2_to_tunnel_l3(struct mlx5dr_context *ctx,\n+\t\t\t\t\t\tsize_t data_sz,\n+\t\t\t\t\t\tvoid *data,\n+\t\t\t\t\t\tuint32_t bulk_size,\n+\t\t\t\t\t\tstruct mlx5dr_action *action)\n+{\n+\tint ret;\n+\n+\tret = mlx5dr_action_handle_reformat_args(ctx, data_sz, data, bulk_size,\n+\t\t\t\t\t\t action);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create args for reformat\");\n+\t\treturn ret;\n+\t}\n+\n+\t/* The action is remove-l2-header + insert-l3-header */\n+\tret = mlx5dr_action_get_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create remove stc for reformat\");\n+\t\tgoto free_arg;\n+\t}\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create insert stc for reformat\");\n+\t\tgoto down_shared;\n+\t}\n+\n+\treturn 0;\n+\n+down_shared:\n+\tmlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP);\n+free_arg:\n+\tmlx5dr_cmd_destroy_obj(action->reformat.arg_obj);\n+\treturn ret;\n+}\n+\n+static void mlx5dr_action_prepare_decap_l3_actions(size_t data_sz,\n+\t\t\t\t\t\t   uint8_t *mh_data,\n+\t\t\t\t\t\t   int *num_of_actions)\n+{\n+\tint actions;\n+\tuint32_t i;\n+\n+\t/* Remove L2L3 outer headers */\n+\tMLX5_SET(stc_ste_param_remove, mh_data, action_type,\n+\t\t MLX5_MODIFICATION_TYPE_REMOVE);\n+\tMLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);\n+\tMLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,\n+\t\t MLX5_HEADER_ANCHOR_PACKET_START);\n+\tMLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,\n+\t\t MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);\n+\tmh_data += MLX5DR_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */\n+\tactions = 1;\n+\n+\t/* Add the new header using inline action 4Byte at a time, the header\n+\t * is added in reversed order to the beginning of the packet to avoid\n+\t * incorrect parsing by the HW. Since header is 14B or 18B an extra\n+\t * two bytes are padded and later removed.\n+\t */\n+\tfor (i = 0; i < data_sz / MLX5DR_ACTION_INLINE_DATA_SIZE + 1; i++) {\n+\t\tMLX5_SET(stc_ste_param_insert, mh_data, action_type,\n+\t\t\t MLX5_MODIFICATION_TYPE_INSERT);\n+\t\tMLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);\n+\t\tMLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,\n+\t\t\t MLX5_HEADER_ANCHOR_PACKET_START);\n+\t\tMLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);\n+\t\tmh_data += MLX5DR_ACTION_DOUBLE_SIZE;\n+\t\tactions++;\n+\t}\n+\n+\t/* Remove first 2 extra bytes */\n+\tMLX5_SET(stc_ste_param_remove_words, mh_data, action_type,\n+\t\t MLX5_MODIFICATION_TYPE_REMOVE_WORDS);\n+\tMLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,\n+\t\t MLX5_HEADER_ANCHOR_PACKET_START);\n+\t/* The hardware expects here size in words (2 bytes) */\n+\tMLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);\n+\tactions++;\n+\n+\t*num_of_actions = actions;\n+}\n+\n+static int\n+mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_context *ctx,\n+\t\t\t\t     size_t data_sz,\n+\t\t\t\t     void *data,\n+\t\t\t\t     uint32_t bulk_size,\n+\t\t\t\t     struct mlx5dr_action *action)\n+{\n+\tuint8_t mh_data[MLX5DR_ACTION_REFORMAT_DATA_SIZE] = {0};\n+\tint num_of_actions;\n+\tint mh_data_size;\n+\tint ret;\n+\n+\tif (data_sz != MLX5DR_ACTION_HDR_LEN_L2 &&\n+\t    data_sz != MLX5DR_ACTION_HDR_LEN_L2_W_VLAN) {\n+\t\tDR_LOG(ERR, \"Data size is not supported for decap-l3\\n\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tmlx5dr_action_prepare_decap_l3_actions(data_sz, mh_data, &num_of_actions);\n+\n+\tmh_data_size = num_of_actions * MLX5DR_MODIFY_ACTION_SIZE;\n+\n+\tret = mlx5dr_pat_arg_create_modify_header(ctx, action, mh_data_size,\n+\t\t\t\t\t\t  (__be64 *)mh_data, bulk_size);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed allocating modify-header for decap-l3\\n\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret)\n+\t\tgoto free_mh_obj;\n+\n+\tif (action->flags & MLX5DR_ACTION_FLAG_SHARED) {\n+\t\tmlx5dr_action_prepare_decap_l3_data(data, mh_data, num_of_actions);\n+\t\tret = mlx5dr_arg_write_inline_arg_data(ctx,\n+\t\t\t\t\t\t       action->modify_header.arg_obj->id,\n+\t\t\t\t\t\t       (uint8_t *)mh_data,\n+\t\t\t\t\t\t       num_of_actions *\n+\t\t\t\t\t\t       MLX5DR_MODIFY_ACTION_SIZE);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed writing INLINE arg decap_l3\");\n+\t\t\tgoto clean_stc;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+clean_stc:\n+\tmlx5dr_action_destroy_stcs(action);\n+free_mh_obj:\n+\tmlx5dr_pat_arg_destroy_modify_header(ctx, action);\n+\treturn ret;\n+}\n+\n+static int\n+mlx5dr_action_create_reformat_hws(struct mlx5dr_context *ctx,\n+\t\t\t\t  size_t data_sz,\n+\t\t\t\t  void *data,\n+\t\t\t\t  uint32_t bulk_size,\n+\t\t\t\t  struct mlx5dr_action *action)\n+{\n+\tint ret;\n+\n+\tswitch (action->type) {\n+\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\t\tret = mlx5dr_action_create_stcs(action, NULL);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\tret = mlx5dr_action_handle_l2_to_tunnel_l2(ctx, data_sz, data, bulk_size, action);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\tret = mlx5dr_action_handle_l2_to_tunnel_l3(ctx, data_sz, data, bulk_size, action);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\t\tret = mlx5dr_action_handle_tunnel_l3_to_l2(ctx, data_sz, data, bulk_size, action);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tassert(false);\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_reformat(struct mlx5dr_context *ctx,\n+\t\t\t      enum mlx5dr_action_reformat_type reformat_type,\n+\t\t\t      size_t data_sz,\n+\t\t\t      void *inline_data,\n+\t\t\t      uint32_t log_bulk_size,\n+\t\t\t      uint32_t flags)\n+{\n+\tenum mlx5dr_action_type action_type;\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\tret = mlx5dr_action_conv_reformat_type_to_action(reformat_type, &action_type);\n+\tif (ret)\n+\t\treturn NULL;\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, action_type);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\tif (log_bulk_size) {\n+\t\t\tDR_LOG(ERR, \"Bulk reformat not supported over root\");\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\tgoto free_action;\n+\t\t}\n+\n+\t\tret = mlx5dr_action_create_reformat_root(action, data_sz, inline_data);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\n+\t\treturn action;\n+\t}\n+\n+\tif (!mlx5dr_action_is_hws_flags(flags) ||\n+\t    ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) {\n+\t\tDR_LOG(ERR, \"Reformat flags don't fit HWS (flags: %x0x)\\n\",\n+\t\t\tflags);\n+\t\trte_errno = EINVAL;\n+\t\tgoto free_action;\n+\t}\n+\n+\tret = mlx5dr_action_create_reformat_hws(ctx, data_sz, inline_data, log_bulk_size, action);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create reformat.\\n\");\n+\t\trte_errno = EINVAL;\n+\t\tgoto free_action;\n+\t}\n+\n+\treturn action;\n+\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+static int\n+mlx5dr_action_create_modify_header_root(struct mlx5dr_action *action,\n+\t\t\t\t\tsize_t actions_sz,\n+\t\t\t\t\t__be64 *actions)\n+{\n+\tenum mlx5dv_flow_table_type ft_type = 0;\n+\tint ret;\n+\n+\tret = mlx5dr_action_conv_flags_to_ft_type(action->flags, &ft_type);\n+\tif (ret)\n+\t\treturn rte_errno;\n+\n+\taction->flow_action =\n+\t\tmlx5_glue->dv_create_flow_action_modify_header_root(action->ctx->ibv_ctx,\n+\t\t\t\t\t\t\t\t    actions_sz,\n+\t\t\t\t\t\t\t\t    (uint64_t *)actions,\n+\t\t\t\t\t\t\t\t    ft_type);\n+\tif (!action->flow_action) {\n+\t\trte_errno = errno;\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct mlx5dr_action *\n+mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,\n+\t\t\t\t   size_t pattern_sz,\n+\t\t\t\t   __be64 pattern[],\n+\t\t\t\t   uint32_t log_bulk_size,\n+\t\t\t\t   uint32_t flags)\n+{\n+\tstruct mlx5dr_action *action;\n+\tint ret;\n+\n+\taction = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_MODIFY_HDR);\n+\tif (!action)\n+\t\treturn NULL;\n+\n+\tif (mlx5dr_action_is_root_flags(flags)) {\n+\t\tif (log_bulk_size) {\n+\t\t\tDR_LOG(ERR, \"Bulk modify-header not supported over root\");\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\tgoto free_action;\n+\t\t}\n+\t\tret = mlx5dr_action_create_modify_header_root(action, pattern_sz, pattern);\n+\t\tif (ret)\n+\t\t\tgoto free_action;\n+\n+\t\treturn action;\n+\t}\n+\n+\tif (!mlx5dr_action_is_hws_flags(flags) ||\n+\t    ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) {\n+\t\tDR_LOG(ERR, \"Flags don't fit hws (flags: %x0x, log_bulk_size: %d)\\n\",\n+\t\t\tflags, log_bulk_size);\n+\t\trte_errno = EINVAL;\n+\t\tgoto free_action;\n+\t}\n+\n+\tif (pattern_sz / MLX5DR_MODIFY_ACTION_SIZE == 1) {\n+\t\t/* Optimize single modiy action to be used inline */\n+\t\taction->modify_header.single_action = pattern[0];\n+\t\taction->modify_header.num_of_actions = 1;\n+\t\taction->modify_header.single_action_type =\n+\t\t\tMLX5_GET(set_action_in, pattern, action_type);\n+\t} else {\n+\t\t/* Use multi action pattern and argument */\n+\t\tret = mlx5dr_pat_arg_create_modify_header(ctx, action, pattern_sz,\n+\t\t\t\t\t\t\t  pattern, log_bulk_size);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed allocating modify-header\\n\");\n+\t\t\tgoto free_action;\n+\t\t}\n+\t}\n+\n+\tret = mlx5dr_action_create_stcs(action, NULL);\n+\tif (ret)\n+\t\tgoto free_mh_obj;\n+\n+\treturn action;\n+\n+free_mh_obj:\n+\tif (action->modify_header.num_of_actions > 1)\n+\t\tmlx5dr_pat_arg_destroy_modify_header(ctx, action);\n+free_action:\n+\tsimple_free(action);\n+\treturn NULL;\n+}\n+\n+static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)\n+{\n+\tswitch (action->type) {\n+\tcase MLX5DR_ACTION_TYP_TIR:\n+\tcase MLX5DR_ACTION_TYP_MISS:\n+\tcase MLX5DR_ACTION_TYP_TAG:\n+\tcase MLX5DR_ACTION_TYP_DROP:\n+\tcase MLX5DR_ACTION_TYP_CTR:\n+\tcase MLX5DR_ACTION_TYP_FT:\n+\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\tcase MLX5DR_ACTION_TYP_ASO_METER:\n+\tcase MLX5DR_ACTION_TYP_ASO_CT:\n+\tcase MLX5DR_ACTION_TYP_PUSH_VLAN:\n+\t\tmlx5dr_action_destroy_stcs(action);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_POP_VLAN:\n+\t\tmlx5dr_action_destroy_stcs(action);\n+\t\tmlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\tcase MLX5DR_ACTION_TYP_MODIFY_HDR:\n+\t\tmlx5dr_action_destroy_stcs(action);\n+\t\tif (action->modify_header.num_of_actions > 1)\n+\t\t\tmlx5dr_pat_arg_destroy_modify_header(action->ctx, action);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\tmlx5dr_action_destroy_stcs(action);\n+\t\tmlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_DECAP);\n+\t\tmlx5dr_cmd_destroy_obj(action->reformat.arg_obj);\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\tmlx5dr_action_destroy_stcs(action);\n+\t\tmlx5dr_cmd_destroy_obj(action->reformat.arg_obj);\n+\t\tbreak;\n+\t}\n+}\n+\n+static void mlx5dr_action_destroy_root(struct mlx5dr_action *action)\n+{\n+\tswitch (action->type) {\n+\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\tcase MLX5DR_ACTION_TYP_MODIFY_HDR:\n+\t\tibv_destroy_flow_action(action->flow_action);\n+\t\tbreak;\n+\t}\n+}\n+\n+int mlx5dr_action_destroy(struct mlx5dr_action *action)\n+{\n+\tif (mlx5dr_action_is_root_flags(action->flags))\n+\t\tmlx5dr_action_destroy_root(action);\n+\telse\n+\t\tmlx5dr_action_destroy_hws(action);\n+\n+\tsimple_free(action);\n+\treturn 0;\n+}\n+\n+/* Called under pthread_spin_lock(&ctx->ctrl_lock) */\n+int mlx5dr_action_get_default_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t  uint8_t tbl_type)\n+{\n+\tstruct mlx5dr_cmd_stc_modify_attr stc_attr = {0};\n+\tstruct mlx5dr_action_default_stc *default_stc;\n+\tint ret;\n+\n+\tif (ctx->common_res[tbl_type].default_stc) {\n+\t\tctx->common_res[tbl_type].default_stc->refcount++;\n+\t\treturn 0;\n+\t}\n+\n+\tdefault_stc = simple_calloc(1, sizeof(*default_stc));\n+\tif (!default_stc) {\n+\t\tDR_LOG(ERR, \"Failed to allocate memory for default STCs\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tstc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW0;\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &default_stc->nop_ctr);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate default counter STC\");\n+\t\tgoto free_default_stc;\n+\t}\n+\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW5;\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &default_stc->nop_dw5);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate default NOP DW5 STC\");\n+\t\tgoto free_nop_ctr;\n+\t}\n+\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW6;\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &default_stc->nop_dw6);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate default NOP DW6 STC\");\n+\t\tgoto free_nop_dw5;\n+\t}\n+\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_DW7;\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &default_stc->nop_dw7);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate default NOP DW7 STC\");\n+\t\tgoto free_nop_dw6;\n+\t}\n+\n+\tstc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;\n+\tstc_attr.action_offset = MLX5DR_ACTION_OFFSET_HIT;\n+\tret = mlx5dr_action_alloc_single_stc(ctx, &stc_attr, tbl_type,\n+\t\t\t\t\t     &default_stc->default_hit);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate default allow STC\");\n+\t\tgoto free_nop_dw7;\n+\t}\n+\n+\tctx->common_res[tbl_type].default_stc = default_stc;\n+\tctx->common_res[tbl_type].default_stc->refcount++;\n+\n+\treturn 0;\n+\n+free_nop_dw7:\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);\n+free_nop_dw6:\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);\n+free_nop_dw5:\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);\n+free_nop_ctr:\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);\n+free_default_stc:\n+\tsimple_free(default_stc);\n+\treturn rte_errno;\n+}\n+\n+void mlx5dr_action_put_default_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   uint8_t tbl_type)\n+{\n+\tstruct mlx5dr_action_default_stc *default_stc;\n+\n+\tdefault_stc = ctx->common_res[tbl_type].default_stc;\n+\n+\tdefault_stc = ctx->common_res[tbl_type].default_stc;\n+\tif (--default_stc->refcount)\n+\t\treturn;\n+\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);\n+\tmlx5dr_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);\n+\tsimple_free(default_stc);\n+\tctx->common_res[tbl_type].default_stc = NULL;\n+}\n+\n+static void mlx5dr_action_modify_write(struct mlx5dr_send_engine *queue,\n+\t\t\t\t       uint32_t arg_idx,\n+\t\t\t\t       uint8_t *arg_data,\n+\t\t\t\t       uint16_t num_of_actions)\n+{\n+\tmlx5dr_arg_write(queue, NULL, arg_idx, arg_data,\n+\t\t\t num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);\n+}\n+\n+void\n+mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst,\n+\t\t\t\t    uint16_t num_of_actions)\n+{\n+\tuint8_t *e_src;\n+\tint i;\n+\n+\t/* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes\n+\t * copy from end of src to the start of dst.\n+\t * move to the end, 2 is the leftover from 14B or 18B\n+\t */\n+\tif (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)\n+\t\te_src = src + MLX5DR_ACTION_HDR_LEN_L2;\n+\telse\n+\t\te_src = src + MLX5DR_ACTION_HDR_LEN_L2_W_VLAN;\n+\n+\t/* Move dst over the first remove action + zero data */\n+\tdst += MLX5DR_ACTION_DOUBLE_SIZE;\n+\t/* Move dst over the first insert ctrl action */\n+\tdst += MLX5DR_ACTION_DOUBLE_SIZE / 2;\n+\t/* Actions:\n+\t * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.\n+\t * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.\n+\t * the loop is without the last insertion.\n+\t */\n+\tfor (i = 0; i < num_of_actions - 3; i++) {\n+\t\te_src -= MLX5DR_ACTION_INLINE_DATA_SIZE;\n+\t\tmemcpy(dst, e_src, MLX5DR_ACTION_INLINE_DATA_SIZE); /* data */\n+\t\tdst += MLX5DR_ACTION_DOUBLE_SIZE;\n+\t}\n+\t/* Copy the last 2 bytes after a gap of 2 bytes which will be removed */\n+\te_src -= MLX5DR_ACTION_INLINE_DATA_SIZE / 2;\n+\tdst += MLX5DR_ACTION_INLINE_DATA_SIZE / 2;\n+\tmemcpy(dst, e_src, 2);\n+}\n+\n+static struct mlx5dr_actions_wqe_setter *\n+mlx5dr_action_setter_find_first(struct mlx5dr_actions_wqe_setter *setter,\n+\t\t\t\tuint8_t req_flags)\n+{\n+\t/* Use a new setter if requested flags are taken */\n+\twhile (setter->flags & req_flags)\n+\t\tsetter++;\n+\n+\t/* Use current setter in required flags are not used */\n+\treturn setter;\n+}\n+\n+static void\n+mlx5dr_action_apply_stc(struct mlx5dr_actions_apply_data *apply,\n+\t\t\tenum mlx5dr_action_stc_idx stc_idx,\n+\t\t\tuint8_t action_idx)\n+{\n+\tstruct mlx5dr_action *action = apply->rule_action[action_idx].action;\n+\n+\tapply->wqe_ctrl->stc_ix[stc_idx] =\n+\t\thtobe32(action->stc[apply->tbl_type].offset);\n+}\n+\n+static void\n+mlx5dr_action_setter_push_vlan(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t       struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\n+\trule_action = &apply->rule_action[setter->idx_double];\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;\n+\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+}\n+\n+static void\n+mlx5dr_action_setter_modify_header(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t   struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\tstruct mlx5dr_action *action;\n+\tuint32_t arg_sz, arg_idx;\n+\tuint8_t *single_action;\n+\n+\trule_action = &apply->rule_action[setter->idx_double];\n+\taction = rule_action->action;\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\n+\tif (action->modify_header.num_of_actions == 1) {\n+\t\tif (action->modify_header.single_action_type ==\n+\t\t    MLX5_MODIFICATION_TYPE_COPY) {\n+\t\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0;\n+\t\t\treturn;\n+\t\t}\n+\n+\t\tif (action->flags & MLX5DR_ACTION_FLAG_SHARED)\n+\t\t\tsingle_action = (uint8_t *)&action->modify_header.single_action;\n+\t\telse\n+\t\t\tsingle_action = rule_action->modify_header.data;\n+\n+\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] =\n+\t\t\t*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);\n+\t} else {\n+\t\t/* Argument offset multiple with number of args per these actions */\n+\t\targ_sz = mlx5dr_arg_get_arg_size(action->modify_header.num_of_actions);\n+\t\targ_idx = rule_action->modify_header.offset * arg_sz;\n+\n+\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx);\n+\n+\t\tif (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) {\n+\t\t\tapply->require_dep = 1;\n+\t\t\tmlx5dr_action_modify_write(apply->queue,\n+\t\t\t\t\t\t   action->modify_header.arg_obj->id + arg_idx,\n+\t\t\t\t\t\t   rule_action->modify_header.data,\n+\t\t\t\t\t\t   action->modify_header.num_of_actions);\n+\t\t}\n+\t}\n+}\n+\n+static void\n+mlx5dr_action_setter_insert_ptr(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\tstruct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\tuint32_t arg_idx, arg_sz;\n+\n+\trule_action = &apply->rule_action[setter->idx_double];\n+\n+\t/* Argument offset multiple on args required for header size */\n+\targ_sz = mlx5dr_arg_data_size_to_arg_size(rule_action->action->reformat.header_size);\n+\targ_idx = rule_action->reformat.offset * arg_sz;\n+\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx);\n+\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+\n+\tif (!(rule_action->action->flags & MLX5DR_ACTION_FLAG_SHARED)) {\n+\t\tapply->require_dep = 1;\n+\t\tmlx5dr_arg_write(apply->queue, NULL,\n+\t\t\t\t rule_action->action->reformat.arg_obj->id + arg_idx,\n+\t\t\t\t rule_action->reformat.data,\n+\t\t\t\t rule_action->action->reformat.header_size);\n+\t}\n+}\n+\n+static void\n+mlx5dr_action_setter_tnl_l3_to_l2(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t  struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\tstruct mlx5dr_action *action;\n+\tuint32_t arg_sz, arg_idx;\n+\n+\trule_action = &apply->rule_action[setter->idx_double];\n+\taction = rule_action->action;\n+\n+\t/* Argument offset multiple on args required for num of actions */\n+\targ_sz = mlx5dr_arg_get_arg_size(action->modify_header.num_of_actions);\n+\targ_idx = rule_action->reformat.offset * arg_sz;\n+\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(arg_idx);\n+\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+\n+\tif (!(action->flags & MLX5DR_ACTION_FLAG_SHARED)) {\n+\t\tapply->require_dep = 1;\n+\t\tmlx5dr_arg_decapl3_write(apply->queue,\n+\t\t\t\t\t action->modify_header.arg_obj->id + arg_idx,\n+\t\t\t\t\t rule_action->reformat.data,\n+\t\t\t\t\t action->modify_header.num_of_actions);\n+\t}\n+}\n+\n+static void\n+mlx5dr_action_setter_aso(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\tuint32_t exe_aso_ctrl;\n+\tuint32_t offset;\n+\n+\trule_action = &apply->rule_action[setter->idx_double];\n+\n+\tswitch (rule_action->action->type) {\n+\tcase MLX5DR_ACTION_TYP_ASO_METER:\n+\t\t/* exe_aso_ctrl format:\n+\t\t * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]\n+\t\t */\n+\t\toffset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;\n+\t\texe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;\n+\t\texe_aso_ctrl |= rule_action->aso_meter.init_color <<\n+\t\t\t\tMLX5DR_ACTION_METER_INIT_COLOR_OFFSET;\n+\t\tbreak;\n+\tcase MLX5DR_ACTION_TYP_ASO_CT:\n+\t\t/* exe_aso_ctrl CT format:\n+\t\t * [STC only and reserved bits 31b][direction 1b]\n+\t\t */\n+\t\toffset = rule_action->aso_ct.offset / MLX5_ASO_CT_NUM_PER_OBJ;\n+\t\texe_aso_ctrl = rule_action->aso_ct.direction;\n+\t\tbreak;\n+\tdefault:\n+\t\tDR_LOG(ERR, \"Unsupported ASO action type: %d\", rule_action->action->type);\n+\t\trte_errno = ENOTSUP;\n+\t\treturn;\n+\t}\n+\n+\t/* aso_object_offset format: [24B] */\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = htobe32(offset);\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = htobe32(exe_aso_ctrl);\n+\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW6, setter->idx_double);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+}\n+\n+static void\n+mlx5dr_action_setter_tag(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\n+\trule_action = &apply->rule_action[setter->idx_single];\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = htobe32(rule_action->tag.value);\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single);\n+}\n+\n+static void\n+mlx5dr_action_setter_ctrl_ctr(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t      struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tstruct mlx5dr_rule_action *rule_action;\n+\n+\trule_action = &apply->rule_action[setter->idx_ctr];\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW0] = htobe32(rule_action->counter.offset);\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_CTRL, setter->idx_ctr);\n+}\n+\n+static void\n+mlx5dr_action_setter_single(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t    struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_DW5, setter->idx_single);\n+}\n+\n+static void\n+mlx5dr_action_setter_single_double_pop(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t       __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] =\n+\t\thtobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res,\n+\t\t\t\t\t\t    MLX5DR_CONTEXT_SHARED_STC_POP));\n+}\n+\n+static void\n+mlx5dr_action_setter_hit(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0;\n+\tmlx5dr_action_apply_stc(apply, MLX5DR_ACTION_STC_IDX_HIT, setter->idx_hit);\n+}\n+\n+static void\n+mlx5dr_action_setter_default_hit(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] =\n+\t\thtobe32(apply->common_res->default_stc->default_hit.offset);\n+}\n+\n+static void\n+mlx5dr_action_setter_hit_next_action(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t     __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_HIT_LSB] = htobe32(apply->next_direct_idx << 6);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_HIT] = htobe32(apply->jump_to_action_stc);\n+}\n+\n+static void\n+mlx5dr_action_setter_common_decap(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t  __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] =\n+\t\thtobe32(mlx5dr_action_get_shared_stc_offset(apply->common_res,\n+\t\t\t\t\t\t\t    MLX5DR_CONTEXT_SHARED_STC_DECAP));\n+}\n+\n+int mlx5dr_action_template_process(struct mlx5dr_action_template *at)\n+{\n+\tstruct mlx5dr_actions_wqe_setter *start_setter = at->setters + 1;\n+\tenum mlx5dr_action_type *action_type = at->action_type_arr;\n+\tstruct mlx5dr_actions_wqe_setter *setter = at->setters;\n+\tstruct mlx5dr_actions_wqe_setter *pop_setter = NULL;\n+\tstruct mlx5dr_actions_wqe_setter *last_setter;\n+\tint i;\n+\n+\t/* Note: Given action combination must be valid */\n+\n+\t/* Check if action were already processed */\n+\tif (at->num_of_action_stes)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < MLX5DR_ACTION_MAX_STE; i++)\n+\t\tsetter[i].set_hit = &mlx5dr_action_setter_hit_next_action;\n+\n+\t/* The same action template setters can be used with jumbo or match\n+\t * STE, to support both cases we reseve the first setter for cases\n+\t * with jumbo STE to allow jump to the first action STE.\n+\t * This extra setter can be reduced in some cases on rule creation.\n+\t */\n+\tsetter = start_setter;\n+\tlast_setter = start_setter;\n+\n+\tfor (i = 0; i < at->num_actions; i++) {\n+\t\tswitch (action_type[i]) {\n+\t\tcase MLX5DR_ACTION_TYP_DROP:\n+\t\tcase MLX5DR_ACTION_TYP_TIR:\n+\t\tcase MLX5DR_ACTION_TYP_FT:\n+\t\tcase MLX5DR_ACTION_TYP_VPORT:\n+\t\tcase MLX5DR_ACTION_TYP_MISS:\n+\t\t\t/* Hit action */\n+\t\t\tlast_setter->flags |= ASF_HIT;\n+\t\t\tlast_setter->set_hit = &mlx5dr_action_setter_hit;\n+\t\t\tlast_setter->idx_hit = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_POP_VLAN:\n+\t\t\t/* Single remove header to header */\n+\t\t\tif (pop_setter) {\n+\t\t\t\t/* We have 2 pops, use the shared */\n+\t\t\t\tpop_setter->set_single = &mlx5dr_action_setter_single_double_pop;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_MODIFY);\n+\t\t\tsetter->flags |= ASF_SINGLE1 | ASF_REPARSE | ASF_REMOVE;\n+\t\t\tsetter->set_single = &mlx5dr_action_setter_single;\n+\t\t\tsetter->idx_single = i;\n+\t\t\tpop_setter = setter;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_PUSH_VLAN:\n+\t\t\t/* Double insert inline */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);\n+\t\t\tsetter->flags |= ASF_DOUBLE | ASF_REPARSE | ASF_MODIFY;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_push_vlan;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_MODIFY_HDR:\n+\t\t\t/* Double modify header list */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);\n+\t\t\tsetter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_REPARSE;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_modify_header;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_ASO_METER:\n+\t\tcase MLX5DR_ACTION_TYP_ASO_CT:\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE);\n+\t\t\tsetter->flags |= ASF_DOUBLE;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_aso;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_TNL_L2_TO_L2:\n+\t\t\t/* Single remove header to header */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_MODIFY);\n+\t\t\tsetter->flags |= ASF_SINGLE1 | ASF_REMOVE | ASF_REPARSE;\n+\t\t\tsetter->set_single = &mlx5dr_action_setter_single;\n+\t\t\tsetter->idx_single = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L2:\n+\t\t\t/* Double insert header with pointer */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_DOUBLE);\n+\t\t\tsetter->flags |= ASF_DOUBLE | ASF_REPARSE;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_insert_ptr;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_L2_TO_TNL_L3:\n+\t\t\t/* Single remove + Double insert header with pointer */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter, ASF_SINGLE1 | ASF_DOUBLE);\n+\t\t\tsetter->flags |= ASF_SINGLE1 | ASF_DOUBLE | ASF_REPARSE | ASF_REMOVE;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_insert_ptr;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tsetter->set_single = &mlx5dr_action_setter_common_decap;\n+\t\t\tsetter->idx_single = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_TNL_L3_TO_L2:\n+\t\t\t/* Double modify header list with remove and push inline */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(last_setter,\n+\t\t\t\t\t\t\t\t ASF_DOUBLE | ASF_REMOVE);\n+\t\t\tsetter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_REPARSE;\n+\t\t\tsetter->set_double = &mlx5dr_action_setter_tnl_l3_to_l2;\n+\t\t\tsetter->idx_double = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_TAG:\n+\t\t\t/* Single TAG action, search for any room from the start */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(start_setter, ASF_SINGLE1);\n+\t\t\tsetter->flags |= ASF_SINGLE1;\n+\t\t\tsetter->set_single = &mlx5dr_action_setter_tag;\n+\t\t\tsetter->idx_single = i;\n+\t\t\tbreak;\n+\n+\t\tcase MLX5DR_ACTION_TYP_CTR:\n+\t\t\t/* Control counter action\n+\t\t\t * TODO: Current counter executed first. Support is needed\n+\t\t\t *\t for single ation counter action which is done last.\n+\t\t\t *\t Example: Decap + CTR\n+\t\t\t */\n+\t\t\tsetter = mlx5dr_action_setter_find_first(start_setter, ASF_CTR);\n+\t\t\tsetter->flags |= ASF_CTR;\n+\t\t\tsetter->set_ctr = &mlx5dr_action_setter_ctrl_ctr;\n+\t\t\tsetter->idx_ctr = i;\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tDR_LOG(ERR, \"Unsupported action type: %d\", action_type[i]);\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\tassert(false);\n+\t\t\treturn rte_errno;\n+\t\t}\n+\n+\t\tlast_setter = RTE_MAX(setter, last_setter);\n+\t}\n+\n+\t/* Set default hit on the last STE if no hit action provided */\n+\tif (!(last_setter->flags & ASF_HIT))\n+\t\tlast_setter->set_hit = &mlx5dr_action_setter_default_hit;\n+\n+\tat->num_of_action_stes = last_setter - start_setter + 1;\n+\n+\t/* Check if action template doesn't require any action DWs */\n+\tat->only_term = (at->num_of_action_stes == 1) &&\n+\t\t!(last_setter->flags & ~(ASF_CTR | ASF_HIT));\n+\n+\treturn 0;\n+}\n+\n+struct mlx5dr_action_template *\n+mlx5dr_action_template_create(const enum mlx5dr_action_type action_type[])\n+{\n+\tstruct mlx5dr_action_template *at;\n+\tuint8_t num_actions = 0;\n+\tint i;\n+\n+\tat = simple_calloc(1, sizeof(*at));\n+\tif (!at) {\n+\t\tDR_LOG(ERR, \"Failed to allocate action template\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\twhile (action_type[num_actions++] != MLX5DR_ACTION_TYP_LAST)\n+\t\t;\n+\n+\tat->num_actions = num_actions - 1;\n+\tat->action_type_arr = simple_calloc(num_actions, sizeof(*action_type));\n+\tif (!at->action_type_arr) {\n+\t\tDR_LOG(ERR, \"Failed to allocate action type array\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto free_at;\n+\t}\n+\n+\tfor (i = 0; i < num_actions; i++)\n+\t\tat->action_type_arr[i] = action_type[i];\n+\n+\treturn at;\n+\n+free_at:\n+\tsimple_free(at);\n+\treturn NULL;\n+}\n+\n+int mlx5dr_action_template_destroy(struct mlx5dr_action_template *at)\n+{\n+\tsimple_free(at->action_type_arr);\n+\tsimple_free(at);\n+\treturn 0;\n+}\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h\nnew file mode 100644\nindex 0000000000..f14d91f994\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_action.h\n@@ -0,0 +1,253 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_ACTION_H_\n+#define MLX5DR_ACTION_H_\n+\n+/* Max number of STEs needed for a rule (including match) */\n+#define MLX5DR_ACTION_MAX_STE 7\n+\n+enum mlx5dr_action_stc_idx {\n+\tMLX5DR_ACTION_STC_IDX_CTRL = 0,\n+\tMLX5DR_ACTION_STC_IDX_HIT = 1,\n+\tMLX5DR_ACTION_STC_IDX_DW5 = 2,\n+\tMLX5DR_ACTION_STC_IDX_DW6 = 3,\n+\tMLX5DR_ACTION_STC_IDX_DW7 = 4,\n+\tMLX5DR_ACTION_STC_IDX_MAX = 5,\n+\t/* STC Jumvo STE combo: CTR, Hit */\n+\tMLX5DR_ACTION_STC_IDX_LAST_JUMBO_STE = 1,\n+\t/* STC combo1: CTR, SINGLE, DOUBLE, Hit */\n+\tMLX5DR_ACTION_STC_IDX_LAST_COMBO1 = 3,\n+\t/* STC combo2: CTR, 3 x SINGLE, Hit */\n+\tMLX5DR_ACTION_STC_IDX_LAST_COMBO2 = 4,\n+};\n+\n+enum mlx5dr_action_offset {\n+\tMLX5DR_ACTION_OFFSET_DW0 = 0,\n+\tMLX5DR_ACTION_OFFSET_DW5 = 5,\n+\tMLX5DR_ACTION_OFFSET_DW6 = 6,\n+\tMLX5DR_ACTION_OFFSET_DW7 = 7,\n+\tMLX5DR_ACTION_OFFSET_HIT = 3,\n+\tMLX5DR_ACTION_OFFSET_HIT_LSB = 4,\n+};\n+\n+enum {\n+\tMLX5DR_ACTION_DOUBLE_SIZE = 8,\n+\tMLX5DR_ACTION_INLINE_DATA_SIZE = 4,\n+\tMLX5DR_ACTION_HDR_LEN_L2_MACS = 12,\n+\tMLX5DR_ACTION_HDR_LEN_L2_VLAN = 4,\n+\tMLX5DR_ACTION_HDR_LEN_L2_ETHER = 2,\n+\tMLX5DR_ACTION_HDR_LEN_L2 = (MLX5DR_ACTION_HDR_LEN_L2_MACS +\n+\t\t\t\t    MLX5DR_ACTION_HDR_LEN_L2_ETHER),\n+\tMLX5DR_ACTION_HDR_LEN_L2_W_VLAN = (MLX5DR_ACTION_HDR_LEN_L2 +\n+\t\t\t\t\t   MLX5DR_ACTION_HDR_LEN_L2_VLAN),\n+\tMLX5DR_ACTION_REFORMAT_DATA_SIZE = 64,\n+\tDECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,\n+\tDECAP_L3_NUM_ACTIONS_W_VLAN = 7,\n+};\n+\n+enum mlx5dr_action_setter_flag {\n+\tASF_SINGLE1 = 1 << 0,\n+\tASF_SINGLE2 = 1 << 1,\n+\tASF_SINGLE3 = 1 << 2,\n+\tASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,\n+\tASF_REPARSE = 1 << 3,\n+\tASF_REMOVE = 1 << 4,\n+\tASF_MODIFY = 1 << 5,\n+\tASF_CTR = 1 << 6,\n+\tASF_HIT = 1 << 7,\n+};\n+\n+struct mlx5dr_action_default_stc {\n+\tstruct mlx5dr_pool_chunk nop_ctr;\n+\tstruct mlx5dr_pool_chunk nop_dw5;\n+\tstruct mlx5dr_pool_chunk nop_dw6;\n+\tstruct mlx5dr_pool_chunk nop_dw7;\n+\tstruct mlx5dr_pool_chunk default_hit;\n+\tuint32_t refcount;\n+};\n+\n+struct mlx5dr_action_shared_stc {\n+\tstruct mlx5dr_pool_chunk remove_header;\n+\trte_atomic32_t refcount;\n+};\n+\n+struct mlx5dr_actions_apply_data {\n+\tstruct mlx5dr_send_engine *queue;\n+\tstruct mlx5dr_rule_action *rule_action;\n+\tuint32_t *wqe_data;\n+\tstruct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;\n+\tuint32_t jump_to_action_stc;\n+\tstruct mlx5dr_context_common_res *common_res;\n+\tenum mlx5dr_table_type tbl_type;\n+\tuint32_t next_direct_idx;\n+\tuint8_t require_dep;\n+};\n+\n+struct mlx5dr_actions_wqe_setter;\n+\n+typedef void (*mlx5dr_action_setter_fp)\n+\t(struct mlx5dr_actions_apply_data *apply,\n+\t struct mlx5dr_actions_wqe_setter *setter);\n+\n+struct mlx5dr_actions_wqe_setter {\n+\tmlx5dr_action_setter_fp set_single;\n+\tmlx5dr_action_setter_fp set_double;\n+\tmlx5dr_action_setter_fp set_hit;\n+\tmlx5dr_action_setter_fp set_ctr;\n+\tuint8_t idx_single;\n+\tuint8_t idx_double;\n+\tuint8_t idx_ctr;\n+\tuint8_t idx_hit;\n+\tuint8_t flags;\n+};\n+\n+struct mlx5dr_action_template {\n+\tstruct mlx5dr_actions_wqe_setter setters[MLX5DR_ACTION_MAX_STE];\n+\tenum mlx5dr_action_type *action_type_arr;\n+\tuint8_t num_of_action_stes;\n+\tuint8_t num_actions;\n+\tuint8_t only_term;\n+};\n+\n+struct mlx5dr_action {\n+\tuint8_t type;\n+\tuint8_t flags;\n+\tstruct mlx5dr_context *ctx;\n+\tunion {\n+\t\tstruct {\n+\t\t\tstruct mlx5dr_pool_chunk stc[MLX5DR_TABLE_TYPE_MAX];\n+\t\t\tunion {\n+\t\t\t\tstruct {\n+\t\t\t\t\tstruct mlx5dr_devx_obj *pattern_obj;\n+\t\t\t\t\tstruct mlx5dr_devx_obj *arg_obj;\n+\t\t\t\t\t__be64 single_action;\n+\t\t\t\t\tuint8_t single_action_type;\n+\t\t\t\t\tuint16_t num_of_actions;\n+\t\t\t\t} modify_header;\n+\t\t\t\tstruct {\n+\t\t\t\t\tstruct mlx5dr_devx_obj *arg_obj;\n+\t\t\t\t\tuint32_t header_size;\n+\t\t\t\t} reformat;\n+\t\t\t\tstruct {\n+\t\t\t\t\tstruct mlx5dr_devx_obj *devx_obj;\n+\t\t\t\t\tuint8_t return_reg_id;\n+\t\t\t\t} aso;\n+\t\t\t\tstruct {\n+\t\t\t\t\tuint16_t vport_num;\n+\t\t\t\t\tuint16_t esw_owner_vhca_id;\n+\t\t\t\t} vport;\n+\t\t\t};\n+\t\t};\n+\n+\t\tstruct ibv_flow_action *flow_action;\n+\t\tstruct mlx5dv_devx_obj *devx_obj;\n+\t\tstruct ibv_qp *qp;\n+\t};\n+};\n+\n+int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[],\n+\t\t\t\t  uint32_t num_actions,\n+\t\t\t\t  struct mlx5dv_flow_action_attr *attr);\n+\n+int mlx5dr_action_get_default_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t  uint8_t tbl_type);\n+\n+void mlx5dr_action_put_default_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   uint8_t tbl_type);\n+\n+void mlx5dr_action_prepare_decap_l3_data(uint8_t *src, uint8_t *dst,\n+\t\t\t\t\t uint16_t num_of_actions);\n+\n+int mlx5dr_action_template_process(struct mlx5dr_action_template *at);\n+\n+bool mlx5dr_action_check_combo(enum mlx5dr_action_type *user_actions,\n+\t\t\t       enum mlx5dr_table_type table_type);\n+\n+int mlx5dr_action_alloc_single_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   struct mlx5dr_cmd_stc_modify_attr *stc_attr,\n+\t\t\t\t   uint32_t table_type,\n+\t\t\t\t   struct mlx5dr_pool_chunk *stc);\n+\n+void mlx5dr_action_free_single_stc(struct mlx5dr_context *ctx,\n+\t\t\t\t   uint32_t table_type,\n+\t\t\t\t   struct mlx5dr_pool_chunk *stc);\n+\n+static inline void\n+mlx5dr_action_setter_default_single(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t    __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] =\n+\t\thtobe32(apply->common_res->default_stc->nop_dw5.offset);\n+}\n+\n+static inline void\n+mlx5dr_action_setter_default_double(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t    __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] =\n+\t\thtobe32(apply->common_res->default_stc->nop_dw6.offset);\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] =\n+\t\thtobe32(apply->common_res->default_stc->nop_dw7.offset);\n+}\n+\n+static inline void\n+mlx5dr_action_setter_default_ctr(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t\t __rte_unused struct mlx5dr_actions_wqe_setter *setter)\n+{\n+\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW0] = 0;\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_CTRL] =\n+\t\thtobe32(apply->common_res->default_stc->nop_ctr.offset);\n+}\n+\n+static inline void\n+mlx5dr_action_apply_setter(struct mlx5dr_actions_apply_data *apply,\n+\t\t\t   struct mlx5dr_actions_wqe_setter *setter,\n+\t\t\t   bool is_jumbo)\n+{\n+\tuint8_t num_of_actions;\n+\n+\t/* Set control counter */\n+\tif (setter->flags & ASF_CTR)\n+\t\tsetter->set_ctr(apply, setter);\n+\telse\n+\t\tmlx5dr_action_setter_default_ctr(apply, setter);\n+\n+\t/* Set single and double on match */\n+\tif (!is_jumbo) {\n+\t\tif (setter->flags & ASF_SINGLE1)\n+\t\t\tsetter->set_single(apply, setter);\n+\t\telse\n+\t\t\tmlx5dr_action_setter_default_single(apply, setter);\n+\n+\t\tif (setter->flags & ASF_DOUBLE)\n+\t\t\tsetter->set_double(apply, setter);\n+\t\telse\n+\t\t\tmlx5dr_action_setter_default_double(apply, setter);\n+\n+\t\tnum_of_actions = setter->flags & ASF_DOUBLE ?\n+\t\t\tMLX5DR_ACTION_STC_IDX_LAST_COMBO1 :\n+\t\t\tMLX5DR_ACTION_STC_IDX_LAST_COMBO2;\n+\t} else {\n+\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW5] = 0;\n+\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW6] = 0;\n+\t\tapply->wqe_data[MLX5DR_ACTION_OFFSET_DW7] = 0;\n+\t\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW5] = 0;\n+\t\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW6] = 0;\n+\t\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_DW7] = 0;\n+\t\tnum_of_actions = MLX5DR_ACTION_STC_IDX_LAST_JUMBO_STE;\n+\t}\n+\n+\t/* Set next/final hit action */\n+\tsetter->set_hit(apply, setter);\n+\n+\t/* Set number of actions */\n+\tapply->wqe_ctrl->stc_ix[MLX5DR_ACTION_STC_IDX_CTRL] |=\n+\t\thtobe32(num_of_actions << 29);\n+}\n+\n+#endif /* MLX5DR_ACTION_H_ */\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c\nnew file mode 100644\nindex 0000000000..46fdc8ce68\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c\n@@ -0,0 +1,511 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include \"mlx5dr_internal.h\"\n+\n+enum mlx5dr_arg_chunk_size\n+mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size)\n+{\n+\t/* Return the roundup of log2(data_size) */\n+\tif (data_size <= MLX5DR_ARG_DATA_SIZE)\n+\t\treturn MLX5DR_ARG_CHUNK_SIZE_1;\n+\tif (data_size <= MLX5DR_ARG_DATA_SIZE * 2)\n+\t\treturn MLX5DR_ARG_CHUNK_SIZE_2;\n+\tif (data_size <= MLX5DR_ARG_DATA_SIZE * 4)\n+\t\treturn MLX5DR_ARG_CHUNK_SIZE_3;\n+\tif (data_size <= MLX5DR_ARG_DATA_SIZE * 8)\n+\t\treturn MLX5DR_ARG_CHUNK_SIZE_4;\n+\n+\treturn MLX5DR_ARG_CHUNK_SIZE_MAX;\n+}\n+\n+uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size)\n+{\n+\treturn BIT(mlx5dr_arg_data_size_to_arg_log_size(data_size));\n+}\n+\n+enum mlx5dr_arg_chunk_size\n+mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions)\n+{\n+\treturn mlx5dr_arg_data_size_to_arg_log_size(num_of_actions *\n+\t\t\t\t\t\t    MLX5DR_MODIFY_ACTION_SIZE);\n+}\n+\n+uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions)\n+{\n+\treturn BIT(mlx5dr_arg_get_arg_log_size(num_of_actions));\n+}\n+\n+/* Cache and cache element handling */\n+int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache)\n+{\n+\tstruct mlx5dr_pattern_cache *new_cache;\n+\n+\tnew_cache = simple_calloc(1, sizeof(*new_cache));\n+\tif (!new_cache) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\tLIST_INIT(&new_cache->head);\n+\tpthread_spin_init(&new_cache->lock, PTHREAD_PROCESS_PRIVATE);\n+\n+\t*cache = new_cache;\n+\n+\treturn 0;\n+}\n+\n+void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache)\n+{\n+\tsimple_free(cache);\n+}\n+\n+static bool mlx5dr_pat_compare_pattern(enum mlx5dr_action_type cur_type,\n+\t\t\t\t       int cur_num_of_actions,\n+\t\t\t\t       __be64 cur_actions[],\n+\t\t\t\t       enum mlx5dr_action_type type,\n+\t\t\t\t       int num_of_actions,\n+\t\t\t\t       __be64 actions[])\n+{\n+\tint i;\n+\n+\tif (cur_num_of_actions != num_of_actions || cur_type != type)\n+\t\treturn false;\n+\n+\t /* All decap-l3 look the same, only change is the num of actions */\n+\tif (type == MLX5DR_ACTION_TYP_TNL_L3_TO_L2)\n+\t\treturn true;\n+\n+\tfor (i = 0; i < num_of_actions; i++) {\n+\t\tu8 action_id =\n+\t\t\tMLX5_GET(set_action_in, &actions[i], action_type);\n+\n+\t\tif (action_id == MLX5_MODIFICATION_TYPE_COPY) {\n+\t\t\tif (actions[i] != cur_actions[i])\n+\t\t\t\treturn false;\n+\t\t} else {\n+\t\t\t/* Compare just the control, not the values */\n+\t\t\tif ((__be32)actions[i] !=\n+\t\t\t    (__be32)cur_actions[i])\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\n+\treturn true;\n+}\n+\n+static struct mlx5dr_pat_cached_pattern *\n+mlx5dr_pat_find_cached_pattern(struct mlx5dr_pattern_cache *cache,\n+\t\t\t       struct mlx5dr_action *action,\n+\t\t\t       uint16_t num_of_actions,\n+\t\t\t       __be64 *actions)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pat;\n+\n+\tLIST_FOREACH(cached_pat, &cache->head, next) {\n+\t\tif (mlx5dr_pat_compare_pattern(cached_pat->type,\n+\t\t\t\t\t       cached_pat->mh_data.num_of_actions,\n+\t\t\t\t\t       (__be64 *)cached_pat->mh_data.data,\n+\t\t\t\t\t       action->type,\n+\t\t\t\t\t       num_of_actions,\n+\t\t\t\t\t       actions))\n+\t\t\treturn cached_pat;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static struct mlx5dr_pat_cached_pattern *\n+mlx5dr_pat_get_existing_cached_pattern(struct mlx5dr_pattern_cache *cache,\n+\t\t\t\t       struct mlx5dr_action *action,\n+\t\t\t\t       uint16_t num_of_actions,\n+\t\t\t\t       __be64 *actions)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pattern;\n+\n+\tcached_pattern = mlx5dr_pat_find_cached_pattern(cache, action, num_of_actions, actions);\n+\tif (cached_pattern) {\n+\t\t/* LRU: move it to be first in the list */\n+\t\tLIST_REMOVE(cached_pattern, next);\n+\t\tLIST_INSERT_HEAD(&cache->head, cached_pattern, next);\n+\t\trte_atomic32_add(&cached_pattern->refcount, 1);\n+\t}\n+\n+\treturn cached_pattern;\n+}\n+\n+static struct mlx5dr_pat_cached_pattern *\n+mlx5dr_pat_get_cached_pattern_by_action(struct mlx5dr_pattern_cache *cache,\n+\t\t\t\t\tstruct mlx5dr_action *action)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pattern;\n+\n+\tLIST_FOREACH(cached_pattern, &cache->head, next) {\n+\t\tif (cached_pattern->mh_data.pattern_obj->id == action->modify_header.pattern_obj->id)\n+\t\t\treturn cached_pattern;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static struct mlx5dr_pat_cached_pattern *\n+mlx5dr_pat_add_pattern_to_cache(struct mlx5dr_pattern_cache *cache,\n+\t\t\t\tstruct mlx5dr_devx_obj *pattern_obj,\n+\t\t\t\tenum mlx5dr_action_type type,\n+\t\t\t\tuint16_t num_of_actions,\n+\t\t\t\t__be64 *actions)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pattern;\n+\n+\tcached_pattern = simple_calloc(1, sizeof(*cached_pattern));\n+\tif (!cached_pattern) {\n+\t\tDR_LOG(ERR, \"Failed to allocate cached_pattern\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tcached_pattern->type = type;\n+\tcached_pattern->mh_data.num_of_actions = num_of_actions;\n+\tcached_pattern->mh_data.pattern_obj = pattern_obj;\n+\tcached_pattern->mh_data.data =\n+\t\tsimple_malloc(num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);\n+\tif (!cached_pattern->mh_data.data) {\n+\t\tDR_LOG(ERR, \"Failed to allocate mh_data.data\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto free_cached_obj;\n+\t}\n+\n+\tmemcpy(cached_pattern->mh_data.data, actions,\n+\t       num_of_actions * MLX5DR_MODIFY_ACTION_SIZE);\n+\n+\tLIST_INSERT_HEAD(&cache->head, cached_pattern, next);\n+\n+\trte_atomic32_init(&cached_pattern->refcount);\n+\trte_atomic32_set(&cached_pattern->refcount, 1);\n+\n+\treturn cached_pattern;\n+\n+free_cached_obj:\n+\tsimple_free(cached_pattern);\n+\treturn NULL;\n+}\n+\n+static void\n+mlx5dr_pat_remove_pattern(struct mlx5dr_pat_cached_pattern *cached_pattern)\n+{\n+\tLIST_REMOVE(cached_pattern, next);\n+\tsimple_free(cached_pattern->mh_data.data);\n+\tsimple_free(cached_pattern);\n+}\n+\n+static void\n+mlx5dr_pat_put_pattern(struct mlx5dr_pattern_cache *cache,\n+\t\t       struct mlx5dr_action *action)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pattern;\n+\n+\tpthread_spin_lock(&cache->lock);\n+\tcached_pattern = mlx5dr_pat_get_cached_pattern_by_action(cache, action);\n+\tif (!cached_pattern) {\n+\t\tDR_LOG(ERR, \"Failed to find pattern according to action with pt\");\n+\t\tassert(false);\n+\t\tgoto out;\n+\t}\n+\n+\tif (!rte_atomic32_dec_and_test(&cached_pattern->refcount))\n+\t\tgoto out;\n+\n+\tmlx5dr_pat_remove_pattern(cached_pattern);\n+\n+out:\n+\tpthread_spin_unlock(&cache->lock);\n+}\n+\n+static int mlx5dr_pat_get_pattern(struct mlx5dr_context *ctx,\n+\t\t\t\t  struct mlx5dr_action *action,\n+\t\t\t\t  uint16_t num_of_actions,\n+\t\t\t\t  size_t pattern_sz,\n+\t\t\t\t  __be64 *pattern)\n+{\n+\tstruct mlx5dr_pat_cached_pattern *cached_pattern;\n+\tint ret = 0;\n+\n+\tpthread_spin_lock(&ctx->pattern_cache->lock);\n+\n+\tcached_pattern = mlx5dr_pat_get_existing_cached_pattern(ctx->pattern_cache,\n+\t\t\t\t\t\t\t\taction,\n+\t\t\t\t\t\t\t\tnum_of_actions,\n+\t\t\t\t\t\t\t\tpattern);\n+\tif (cached_pattern) {\n+\t\taction->modify_header.pattern_obj = cached_pattern->mh_data.pattern_obj;\n+\t\tgoto out_unlock;\n+\t}\n+\n+\taction->modify_header.pattern_obj =\n+\t\tmlx5dr_cmd_header_modify_pattern_create(ctx->ibv_ctx,\n+\t\t\t\t\t\t\tpattern_sz,\n+\t\t\t\t\t\t\t(uint8_t *)pattern);\n+\tif (!action->modify_header.pattern_obj) {\n+\t\tDR_LOG(ERR, \"Failed to create pattern FW object\");\n+\n+\t\tret = rte_errno;\n+\t\tgoto out_unlock;\n+\t}\n+\n+\tcached_pattern =\n+\t\tmlx5dr_pat_add_pattern_to_cache(ctx->pattern_cache,\n+\t\t\t\t\t\taction->modify_header.pattern_obj,\n+\t\t\t\t\t\taction->type,\n+\t\t\t\t\t\tnum_of_actions,\n+\t\t\t\t\t\tpattern);\n+\tif (!cached_pattern) {\n+\t\tDR_LOG(ERR, \"Failed to add pattern to cache\");\n+\t\tret = rte_errno;\n+\t\tgoto clean_pattern;\n+\t}\n+\n+out_unlock:\n+\tpthread_spin_unlock(&ctx->pattern_cache->lock);\n+\treturn ret;\n+\n+clean_pattern:\n+\tmlx5dr_cmd_destroy_obj(action->modify_header.pattern_obj);\n+\tpthread_spin_unlock(&ctx->pattern_cache->lock);\n+\treturn ret;\n+}\n+\n+static void\n+mlx5d_arg_init_send_attr(struct mlx5dr_send_engine_post_attr *send_attr,\n+\t\t\t void *comp_data,\n+\t\t\t uint32_t arg_idx)\n+{\n+\tsend_attr->opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;\n+\tsend_attr->opmod = MLX5DR_WQE_GTA_OPMOD_MOD_ARG;\n+\tsend_attr->len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;\n+\tsend_attr->id = arg_idx;\n+\tsend_attr->user_data = comp_data;\n+}\n+\n+void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,\n+\t\t\t      uint32_t arg_idx,\n+\t\t\t      uint8_t *arg_data,\n+\t\t\t      uint16_t num_of_actions)\n+{\n+\tstruct mlx5dr_send_engine_post_attr send_attr = {0};\n+\tstruct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;\n+\tstruct mlx5dr_send_engine_post_ctrl ctrl;\n+\tstruct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;\n+\tsize_t wqe_len;\n+\n+\tmlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);\n+\n+\tctrl = mlx5dr_send_engine_post_start(queue);\n+\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);\n+\tmemset(wqe_ctrl, 0, wqe_len);\n+\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);\n+\tmlx5dr_action_prepare_decap_l3_data(arg_data, (uint8_t *)wqe_arg,\n+\t\t\t\t\t    num_of_actions);\n+\tmlx5dr_send_engine_post_end(&ctrl, &send_attr);\n+}\n+\n+static int\n+mlx5dr_arg_poll_for_comp(struct mlx5dr_context *ctx, uint16_t queue_id)\n+{\n+\tstruct rte_flow_op_result comp[1];\n+\tint ret;\n+\n+\twhile (true) {\n+\t\tret = mlx5dr_send_queue_poll(ctx, queue_id, comp, 1);\n+\t\tif (ret) {\n+\t\t\tif (ret < 0) {\n+\t\t\t\tDR_LOG(ERR, \"Failed mlx5dr_send_queue_poll\");\n+\t\t\t} else if (comp[0].status == RTE_FLOW_OP_ERROR) {\n+\t\t\t\tDR_LOG(ERR, \"Got comp with error\");\n+\t\t\t\trte_errno = ENOENT;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn (ret == 1 ? 0 : ret);\n+}\n+\n+void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,\n+\t\t      void *comp_data,\n+\t\t      uint32_t arg_idx,\n+\t\t      uint8_t *arg_data,\n+\t\t      size_t data_size)\n+{\n+\tstruct mlx5dr_send_engine_post_attr send_attr = {0};\n+\tstruct mlx5dr_wqe_gta_data_seg_arg *wqe_arg;\n+\tstruct mlx5dr_send_engine_post_ctrl ctrl;\n+\tstruct mlx5dr_wqe_gta_ctrl_seg *wqe_ctrl;\n+\tint i, full_iter, leftover;\n+\tsize_t wqe_len;\n+\n+\tmlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);\n+\n+\t/* Each WQE can hold 64B of data, it might require multiple iteration */\n+\tfull_iter = data_size / MLX5DR_ARG_DATA_SIZE;\n+\tleftover = data_size & (MLX5DR_ARG_DATA_SIZE - 1);\n+\n+\tfor (i = 0; i < full_iter; i++) {\n+\t\tctrl = mlx5dr_send_engine_post_start(queue);\n+\t\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);\n+\t\tmemset(wqe_ctrl, 0, wqe_len);\n+\t\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);\n+\t\tmemcpy(wqe_arg, arg_data, wqe_len);\n+\t\tsend_attr.id = arg_idx++;\n+\t\tmlx5dr_send_engine_post_end(&ctrl, &send_attr);\n+\n+\t\t/* Move to next argument data */\n+\t\targ_data += MLX5DR_ARG_DATA_SIZE;\n+\t}\n+\n+\tif (leftover) {\n+\t\tctrl = mlx5dr_send_engine_post_start(queue);\n+\t\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);\n+\t\tmemset(wqe_ctrl, 0, wqe_len);\n+\t\tmlx5dr_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);\n+\t\tmemcpy(wqe_arg, arg_data, leftover);\n+\t\tsend_attr.id = arg_idx;\n+\t\tmlx5dr_send_engine_post_end(&ctrl, &send_attr);\n+\t}\n+}\n+\n+int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,\n+\t\t\t\t     uint32_t arg_idx,\n+\t\t\t\t     uint8_t *arg_data,\n+\t\t\t\t     size_t data_size)\n+{\n+\tstruct mlx5dr_send_engine *queue;\n+\tint ret;\n+\n+\tpthread_spin_lock(&ctx->ctrl_lock);\n+\n+\t/* Get the control queue */\n+\tqueue = &ctx->send_queue[ctx->queues - 1];\n+\n+\tmlx5dr_arg_write(queue, arg_data, arg_idx, arg_data, data_size);\n+\n+\tmlx5dr_send_engine_flush_queue(queue);\n+\n+\t/* Poll for completion */\n+\tret = mlx5dr_arg_poll_for_comp(ctx, ctx->queues - 1);\n+\tif (ret)\n+\t\tDR_LOG(ERR, \"Failed to get completions for shared action\");\n+\n+\tpthread_spin_unlock(&ctx->ctrl_lock);\n+\n+\treturn ret;\n+}\n+\n+bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,\n+\t\t\t\t\t  uint32_t arg_size)\n+{\n+\tif (arg_size < ctx->caps->log_header_modify_argument_granularity ||\n+\t    arg_size > ctx->caps->log_header_modify_argument_max_alloc) {\n+\t\treturn false;\n+\t}\n+\treturn true;\n+}\n+\n+static int\n+mlx5dr_arg_create_modify_header_arg(struct mlx5dr_context *ctx,\n+\t\t\t\t    struct mlx5dr_action *action,\n+\t\t\t\t    uint16_t num_of_actions,\n+\t\t\t\t    __be64 *pattern,\n+\t\t\t\t    uint32_t bulk_size)\n+{\n+\tuint32_t flags = action->flags;\n+\tuint16_t args_log_size;\n+\tint ret = 0;\n+\n+\t/* Alloc bulk of args */\n+\targs_log_size = mlx5dr_arg_get_arg_log_size(num_of_actions);\n+\tif (args_log_size >= MLX5DR_ARG_CHUNK_SIZE_MAX) {\n+\t\tDR_LOG(ERR, \"Exceed number of allowed actions %u\",\n+\t\t\tnum_of_actions);\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (!mlx5dr_arg_is_valid_arg_request_size(ctx, args_log_size + bulk_size)) {\n+\t\tDR_LOG(ERR, \"Arg size %d does not fit FW capability\",\n+\t\t       args_log_size + bulk_size);\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\taction->modify_header.arg_obj =\n+\t\tmlx5dr_cmd_arg_create(ctx->ibv_ctx, args_log_size + bulk_size,\n+\t\t\t\t      ctx->pd_num);\n+\tif (!action->modify_header.arg_obj) {\n+\t\tDR_LOG(ERR, \"Failed allocating arg in order: %d\",\n+\t\t\targs_log_size + bulk_size);\n+\t\treturn rte_errno;\n+\t}\n+\n+\t/* When INLINE need to write the arg data */\n+\tif (flags & MLX5DR_ACTION_FLAG_SHARED)\n+\t\tret = mlx5dr_arg_write_inline_arg_data(ctx,\n+\t\t\t\t\t\t       action->modify_header.arg_obj->id,\n+\t\t\t\t\t\t       (uint8_t *)pattern,\n+\t\t\t\t\t\t       num_of_actions *\n+\t\t\t\t\t\t       MLX5DR_MODIFY_ACTION_SIZE);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed writing INLINE arg in order: %d\",\n+\t\t\targs_log_size + bulk_size);\n+\t\tmlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx,\n+\t\t\t\t\tstruct mlx5dr_action *action,\n+\t\t\t\t\tsize_t pattern_sz,\n+\t\t\t\t\t__be64 pattern[],\n+\t\t\t\t\tuint32_t bulk_size)\n+{\n+\tuint16_t num_of_actions;\n+\tint ret;\n+\n+\tnum_of_actions = pattern_sz / MLX5DR_MODIFY_ACTION_SIZE;\n+\tif (num_of_actions == 0) {\n+\t\tDR_LOG(ERR, \"Invalid number of actions %u\\n\", num_of_actions);\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\taction->modify_header.num_of_actions = num_of_actions;\n+\n+\tret = mlx5dr_arg_create_modify_header_arg(ctx, action,\n+\t\t\t\t\t\t  num_of_actions,\n+\t\t\t\t\t\t  pattern,\n+\t\t\t\t\t\t  bulk_size);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate arg\");\n+\t\treturn ret;\n+\t}\n+\n+\tret = mlx5dr_pat_get_pattern(ctx, action, num_of_actions, pattern_sz,\n+\t\t\t\t     pattern);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to allocate pattern\");\n+\t\tgoto free_arg;\n+\t}\n+\n+\treturn 0;\n+\n+free_arg:\n+\tmlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);\n+\treturn rte_errno;\n+}\n+\n+void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx,\n+\t\t\t\t\t  struct mlx5dr_action *action)\n+{\n+\tmlx5dr_cmd_destroy_obj(action->modify_header.arg_obj);\n+\tmlx5dr_pat_put_pattern(ctx->pattern_cache, action);\n+}\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/drivers/net/mlx5/hws/mlx5dr_pat_arg.h\nnew file mode 100644\nindex 0000000000..8a4670427f\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.h\n@@ -0,0 +1,83 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_PAT_ARG_H_\n+#define MLX5DR_PAT_ARG_H_\n+\n+/* Modify-header arg pool */\n+enum mlx5dr_arg_chunk_size {\n+\tMLX5DR_ARG_CHUNK_SIZE_1,\n+\t/* Keep MIN updated when changing */\n+\tMLX5DR_ARG_CHUNK_SIZE_MIN = MLX5DR_ARG_CHUNK_SIZE_1,\n+\tMLX5DR_ARG_CHUNK_SIZE_2,\n+\tMLX5DR_ARG_CHUNK_SIZE_3,\n+\tMLX5DR_ARG_CHUNK_SIZE_4,\n+\tMLX5DR_ARG_CHUNK_SIZE_MAX,\n+};\n+\n+enum {\n+\tMLX5DR_MODIFY_ACTION_SIZE = 8,\n+\tMLX5DR_ARG_DATA_SIZE = 64,\n+};\n+\n+struct mlx5dr_pattern_cache {\n+\t/* Protect pattern list */\n+\tpthread_spinlock_t lock;\n+\tLIST_HEAD(pattern_head, mlx5dr_pat_cached_pattern) head;\n+};\n+\n+struct mlx5dr_pat_cached_pattern {\n+\tenum mlx5dr_action_type type;\n+\tstruct {\n+\t\tstruct mlx5dr_devx_obj *pattern_obj;\n+\t\tstruct dr_icm_chunk *chunk;\n+\t\tuint8_t *data;\n+\t\tuint16_t num_of_actions;\n+\t} mh_data;\n+\trte_atomic32_t refcount;\n+\tLIST_ENTRY(mlx5dr_pat_cached_pattern) next;\n+};\n+\n+enum mlx5dr_arg_chunk_size\n+mlx5dr_arg_get_arg_log_size(uint16_t num_of_actions);\n+\n+uint32_t mlx5dr_arg_get_arg_size(uint16_t num_of_actions);\n+\n+enum mlx5dr_arg_chunk_size\n+mlx5dr_arg_data_size_to_arg_log_size(uint16_t data_size);\n+\n+uint32_t mlx5dr_arg_data_size_to_arg_size(uint16_t data_size);\n+\n+int mlx5dr_pat_init_pattern_cache(struct mlx5dr_pattern_cache **cache);\n+\n+void mlx5dr_pat_uninit_pattern_cache(struct mlx5dr_pattern_cache *cache);\n+\n+int mlx5dr_pat_arg_create_modify_header(struct mlx5dr_context *ctx,\n+\t\t\t\t\tstruct mlx5dr_action *action,\n+\t\t\t\t\tsize_t pattern_sz,\n+\t\t\t\t\t__be64 pattern[],\n+\t\t\t\t\tuint32_t bulk_size);\n+\n+void mlx5dr_pat_arg_destroy_modify_header(struct mlx5dr_context *ctx,\n+\t\t\t\t\t  struct mlx5dr_action *action);\n+\n+bool mlx5dr_arg_is_valid_arg_request_size(struct mlx5dr_context *ctx,\n+\t\t\t\t\t  uint32_t arg_size);\n+\n+void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,\n+\t\t      void *comp_data,\n+\t\t      uint32_t arg_idx,\n+\t\t      uint8_t *arg_data,\n+\t\t      size_t data_size);\n+\n+void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,\n+\t\t\t      uint32_t arg_idx,\n+\t\t\t      uint8_t *arg_data,\n+\t\t\t      uint16_t num_of_actions);\n+\n+int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,\n+\t\t\t\t     uint32_t arg_idx,\n+\t\t\t\t     uint8_t *arg_data,\n+\t\t\t\t     size_t data_size);\n+#endif /* MLX5DR_PAT_ARG_H_ */\n",
    "prefixes": [
        "v6",
        "16/18"
    ]
}