get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118823/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118823,
    "url": "http://patches.dpdk.org/api/patches/118823/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-16-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020155749.16643-16-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020155749.16643-16-valex@nvidia.com",
    "date": "2022-10-20T15:57:45",
    "name": "[v6,15/18] net/mlx5/hws: Add HWS rule object",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ca9e2be2b4c955e959abc3144ab710fc7e21da7e",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-16-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25345,
            "url": "http://patches.dpdk.org/api/series/25345/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25345",
            "date": "2022-10-20T15:57:30",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/25345/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118823/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/118823/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 1F5FFA0553;\n\tThu, 20 Oct 2022 18:01:13 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 6333D42BEE;\n\tThu, 20 Oct 2022 17:59:43 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2052.outbound.protection.outlook.com [40.107.92.52])\n by mails.dpdk.org (Postfix) with ESMTP id 4765E42C0C\n for <dev@dpdk.org>; Thu, 20 Oct 2022 17:59:42 +0200 (CEST)",
            "from BN8PR15CA0045.namprd15.prod.outlook.com (2603:10b6:408:80::22)\n by LV2PR12MB5990.namprd12.prod.outlook.com (2603:10b6:408:170::16)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.30; Thu, 20 Oct\n 2022 15:59:40 +0000",
            "from BN8NAM11FT068.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:80:cafe::53) by BN8PR15CA0045.outlook.office365.com\n (2603:10b6:408:80::22) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.30 via Frontend\n Transport; Thu, 20 Oct 2022 15:59:40 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BN8NAM11FT068.mail.protection.outlook.com (10.13.177.69) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5746.16 via Frontend Transport; Thu, 20 Oct 2022 15:59:39 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Thu, 20 Oct\n 2022 08:59:25 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Thu, 20 Oct\n 2022 08:59:23 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=fpFdeYmmSuaFSO5NblizK+WK+Wj/YejUKoT1dlB7DPs+6GZDP5UEcdrHSDBZXb80O8wuFPCmodEombi8x6u5XVSAMwyiJdg/f7tvR7bOHTChhqFSwl/J0dbkmOqSxe8+6yOjbKn4S3JwwT4HR2kFBuudqXBtnKH8MpfT7zmTFdoW32z3cjPUGf371jj6Z/F7gOFo69+o3AZQ+zK0NSjo5Msxgrvb0Bb6+mwVYGYv4e/44XPhKnSVAH07LGYTMWYObkYjFXxiJT8/k1+pXnx6CYZQktMTnphz0quTdv0HnJVEgrbQN78eZJoJKbCCBFU1GbOV8SjOQTVlzZfLqX85xQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=4HqD1MIWwdQT7E+hwE7RB5/wQLAm3Ixp7wiF+Asevco=;\n b=g3vsWXtfUQ+Gfbc6fXytgxxxMpUEl4BXrnwpCvr0bXPcj3xUmRU8vjPUeVYu9JeLsdyxTeXilX1ZHbrIbGLqfgl44G7SZZ3np9NNxb0ZYKs5UHF4/vIRACvmx0CqtyUSpn8nvGhfSCuzwFiqlY18vyPhE8/8H5hsmqqq6B0SV2I+qxks9BM+Shy1d/cPfiGqcYD3ualCgnGTumg2J19FjeajeHykDE00AUs5ZGRF5TNHQK5u28uQaYoC1FfLNLH5Awqp+5sLLYSs7lRDrM9alI5nT3SGdCYeULwGUhO6CuhZd7rvEIZZZA26q/iI7DHuzpCCNAy50rS47ev80wuLKg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=none (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=fail (p=reject sp=reject pct=100) action=oreject\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=4HqD1MIWwdQT7E+hwE7RB5/wQLAm3Ixp7wiF+Asevco=;\n b=Rx/mO3FavXmKVrDAxo/C6hSkT40oQsL+d1ZGhswnxrDFWLl7Sp8X1csRRa/rK9b9TdLMfEkGf/QTlrLS2rGNzq3NDpPuCgkfhX8AuGU92U92mTniscEMOgA31vG57O5uNYwo0iDCXoFnaOzCX5n4dw9bXESuIkMLI+4wS44eJcWlDcNi0QA9bYwKct8zDa1rh34yksD0P189J6LfrnVcOrUhxV0BcOeGgsUfdCv5FSnn1M08+aDURENTmu5W0yHSwzGYOCkO16FZysa78ud/AcV6IVCQ4d1RLfX8ARuvsI/ju/kZDCfByqQISU5wmyrWvYZfkwE0TAuRL30b7gup4g==",
        "X-MS-Exchange-Authentication-Results": "spf=none (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=fail action=oreject header.from=nvidia.com;",
        "Received-SPF": "None (protection.outlook.com: nvidia.com does not designate\n permitted sender hosts)",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>",
        "Subject": "[v6 15/18] net/mlx5/hws: Add HWS rule object",
        "Date": "Thu, 20 Oct 2022 18:57:45 +0300",
        "Message-ID": "<20221020155749.16643-16-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221020155749.16643-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221020155749.16643-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BN8NAM11FT068:EE_|LV2PR12MB5990:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "d574112f-da02-46b4-fe7b-08dab2b41886",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n cpam2UuHxFcGHns4JV+b7cNyf3AydorZoD3hyx1/L30NdKxwhfhoi7MOisBPmuEl8ydKPLLVoqSZR3d2TRIvJf4ahxeijwb5cM1Fm84C8/Lo3z/Qmk3JuEtwfhJFTonNCh3yWhcBWvbOtwrieJeUZnSY83NOdPoNT3CmYncBnCzCvhPXArf7g3fQX2td9n4X7YS4Kn9TKUUzZfpDonqr/ZUxp0IibQMhH+pCbD2UQ73koo5B0CWSyHx1Tv5tl5Va0SsgBnZCmiyLb3+nd6x1oF11aIiJDOLey2b359shThXuotrDllk1Gq0xIOaldASl7uj47M23dEdvTOPvkXe4ji1OBliWIQT7umM0JCKoezFcQ6BzVEM/TK9WyiVHns+OV7VL2r9MMk7xCqEkn/uiZRtHydUZqJGRP39YApW4GJmAPoaXRGIzgrVD6mJKAU0xo527LqzFszJE6z4finTVPt+PB+4DNeYugjH7nakYEzoJojosCmZnasWaaNa6GITqBcoHVeeguTx9VxbIno0SqnJCW3BYo13ABq06uyUcH0ZoSjwM/I4DOgE1os6LI+kESLllmHkIFq9NffKULmQWZ2QAKPnT31XBsuwZCqdGUneEiisUPEtWpDB/4UTYotaZZvFj3EqW6L64vpPrjg0jYtOytOyFLBZkIjMQjAqvuvxsYPrT4QqwrDbUswRs75DHRcdqOYecnf+YsWH582wxg5gSQ4xsb37wYoKUuie6D5nz3Y7LPGG+uvdbWopidMVpALEuhi83wx6+x9kRTXG4Hg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(396003)(376002)(346002)(39860400002)(136003)(451199015)(46966006)(36840700001)(40470700004)(41300700001)(70586007)(30864003)(5660300002)(2906002)(4326008)(54906003)(8676002)(36756003)(107886003)(7696005)(36860700001)(110136005)(478600001)(6666004)(8936002)(186003)(6636002)(40480700001)(40460700003)(6286002)(26005)(70206006)(55016003)(316002)(83380400001)(426003)(356005)(47076005)(82310400005)(16526019)(1076003)(7636003)(336012)(2616005)(82740400003)(86362001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Oct 2022 15:59:39.9360 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n d574112f-da02-46b4-fe7b-08dab2b41886",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT068.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "LV2PR12MB5990",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "HWS rule objects reside under the matcher, each rule holds\nthe configuration for the packet fields to match on and the\nset of actions to execute over the packet that has the requested\nfields. Rules can be created asynchronously in parallel over multiple\nqueues to different matchers. Each rule is configured to the HW.\n\nSigned-off-by: Erez Shitrit <erezsh@nvidia.com>\nSigned-off-by: Alex Vesker <valex@nvidia.com>\n---\n drivers/net/mlx5/hws/mlx5dr_rule.c | 528 +++++++++++++++++++++++++++++\n drivers/net/mlx5/hws/mlx5dr_rule.h |  50 +++\n 2 files changed, 578 insertions(+)\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_rule.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_rule.h",
    "diff": "diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.c b/drivers/net/mlx5/hws/mlx5dr_rule.c\nnew file mode 100644\nindex 0000000000..b27318e6d4\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_rule.c\n@@ -0,0 +1,528 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include \"mlx5dr_internal.h\"\n+\n+static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,\n+\t\t\t     const struct rte_flow_item *items,\n+\t\t\t     bool *skip_rx, bool *skip_tx)\n+{\n+\tstruct mlx5dr_match_template *mt = matcher->mt[0];\n+\tconst struct flow_hw_port_info *vport;\n+\tconst struct rte_flow_item_ethdev *v;\n+\n+\t/* Flow_src is the 1st priority */\n+\tif (matcher->attr.optimize_flow_src) {\n+\t\t*skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;\n+\t\t*skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;\n+\t\treturn;\n+\t}\n+\n+\t/* By default FDB rules are added to both RX and TX */\n+\t*skip_rx = false;\n+\t*skip_tx = false;\n+\n+\tif (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {\n+\t\tv = items[mt->vport_item_id].spec;\n+\t\tvport = flow_hw_conv_port_id(v->port_id);\n+\t\tif (unlikely(!vport)) {\n+\t\t\tDR_LOG(ERR, \"Fail to map port ID %d, ignoring\", v->port_id);\n+\t\t\treturn;\n+\t\t}\n+\n+\t\tif (!vport->is_wire)\n+\t\t\t/* Match vport ID is not WIRE -> Skip RX */\n+\t\t\t*skip_rx = true;\n+\t\telse\n+\t\t\t/* Match vport ID is WIRE -> Skip TX */\n+\t\t\t*skip_tx = true;\n+\t}\n+}\n+\n+static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,\n+\t\t\t\t     struct mlx5dr_rule *rule,\n+\t\t\t\t     const struct rte_flow_item *items,\n+\t\t\t\t     void *user_data)\n+{\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\tstruct mlx5dr_table *tbl = matcher->tbl;\n+\tbool skip_rx, skip_tx;\n+\n+\tdep_wqe->rule = rule;\n+\tdep_wqe->user_data = user_data;\n+\n+\tswitch (tbl->type) {\n+\tcase MLX5DR_TABLE_TYPE_NIC_RX:\n+\tcase MLX5DR_TABLE_TYPE_NIC_TX:\n+\t\tdep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;\n+\t\tdep_wqe->retry_rtc_0 = matcher->col_matcher ?\n+\t\t\t\t       matcher->col_matcher->match_ste.rtc_0->id : 0;\n+\t\tdep_wqe->rtc_1 = 0;\n+\t\tdep_wqe->retry_rtc_1 = 0;\n+\t\tbreak;\n+\n+\tcase MLX5DR_TABLE_TYPE_FDB:\n+\t\tmlx5dr_rule_skip(matcher, items, &skip_rx, &skip_tx);\n+\n+\t\tif (!skip_rx) {\n+\t\t\tdep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;\n+\t\t\tdep_wqe->retry_rtc_0 = matcher->col_matcher ?\n+\t\t\t\t\t       matcher->col_matcher->match_ste.rtc_0->id : 0;\n+\t\t} else {\n+\t\t\tdep_wqe->rtc_0 = 0;\n+\t\t\tdep_wqe->retry_rtc_0 = 0;\n+\t\t}\n+\n+\t\tif (!skip_tx) {\n+\t\t\tdep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;\n+\t\t\tdep_wqe->retry_rtc_1 = matcher->col_matcher ?\n+\t\t\t\t\t       matcher->col_matcher->match_ste.rtc_1->id : 0;\n+\t\t} else {\n+\t\t\tdep_wqe->rtc_1 = 0;\n+\t\t\tdep_wqe->retry_rtc_1 = 0;\n+\t\t}\n+\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tassert(false);\n+\t\tbreak;\n+\t}\n+}\n+\n+static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,\n+\t\t\t\t struct mlx5dr_rule *rule,\n+\t\t\t\t bool err,\n+\t\t\t\t void *user_data,\n+\t\t\t\t enum mlx5dr_rule_status rule_status_on_succ)\n+{\n+\tenum rte_flow_op_status comp_status;\n+\n+\tif (!err) {\n+\t\tcomp_status = RTE_FLOW_OP_SUCCESS;\n+\t\trule->status = rule_status_on_succ;\n+\t} else {\n+\t\tcomp_status = RTE_FLOW_OP_ERROR;\n+\t\trule->status = MLX5DR_RULE_STATUS_FAILED;\n+\t}\n+\n+\tmlx5dr_send_engine_inc_rule(queue);\n+\tmlx5dr_send_engine_gen_comp(queue, user_data, comp_status);\n+}\n+\n+static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,\n+\t\t\t\t\tstruct mlx5dr_rule_attr *attr)\n+{\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\tint ret;\n+\n+\t/* Use rule_idx for locking optimzation, otherwise allocate from pool */\n+\tif (matcher->attr.optimize_using_rule_idx) {\n+\t\trule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;\n+\t} else {\n+\t\tstruct mlx5dr_pool_chunk ste = {0};\n+\n+\t\tste.order = rte_log2_u32(matcher->action_ste.max_stes);\n+\t\tret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to allocate STE for rule actions\");\n+\t\t\treturn ret;\n+\t\t}\n+\t\trule->action_ste_idx = ste.offset;\n+\t}\n+\treturn 0;\n+}\n+\n+void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)\n+{\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\n+\tif (rule->action_ste_idx > -1 && !matcher->attr.optimize_using_rule_idx) {\n+\t\tstruct mlx5dr_pool_chunk ste = {0};\n+\n+\t\t/* This release is safe only when the rule match part was deleted */\n+\t\tste.order = rte_log2_u32(matcher->action_ste.max_stes);\n+\t\tste.offset = rule->action_ste_idx;\n+\t\tmlx5dr_pool_chunk_free(matcher->action_ste.pool, &ste);\n+\t}\n+}\n+\n+static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,\n+\t\t\t\t    struct mlx5dr_send_ste_attr *ste_attr,\n+\t\t\t\t    struct mlx5dr_actions_apply_data *apply)\n+{\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\tstruct mlx5dr_table *tbl = matcher->tbl;\n+\tstruct mlx5dr_context *ctx = tbl->ctx;\n+\n+\t/* Init rule before reuse */\n+\trule->rtc_0 = 0;\n+\trule->rtc_1 = 0;\n+\trule->pending_wqes = 0;\n+\trule->action_ste_idx = -1;\n+\trule->status = MLX5DR_RULE_STATUS_CREATING;\n+\n+\t/* Init default send STE attributes */\n+\tste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;\n+\tste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;\n+\tste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;\n+\tste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;\n+\n+\t/* Init default action apply */\n+\tapply->tbl_type = tbl->type;\n+\tapply->common_res = &ctx->common_res[tbl->type];\n+\tapply->jump_to_action_stc = matcher->action_ste.stc.offset;\n+\tapply->require_dep = 0;\n+}\n+\n+static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,\n+\t\t\t\t  struct mlx5dr_rule_attr *attr,\n+\t\t\t\t  uint8_t mt_idx,\n+\t\t\t\t  const struct rte_flow_item items[],\n+\t\t\t\t  uint8_t at_idx,\n+\t\t\t\t  struct mlx5dr_rule_action rule_actions[])\n+{\n+\tstruct mlx5dr_action_template *at = rule->matcher->at[at_idx];\n+\tstruct mlx5dr_match_template *mt = rule->matcher->mt[mt_idx];\n+\tbool is_jumbo = mlx5dr_definer_is_jumbo(mt->definer);\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\tstruct mlx5dr_context *ctx = matcher->tbl->ctx;\n+\tstruct mlx5dr_send_ste_attr ste_attr = {0};\n+\tstruct mlx5dr_send_ring_dep_wqe *dep_wqe;\n+\tstruct mlx5dr_actions_wqe_setter *setter;\n+\tstruct mlx5dr_actions_apply_data apply;\n+\tstruct mlx5dr_send_engine *queue;\n+\tuint8_t total_stes, action_stes;\n+\tint i, ret;\n+\n+\tqueue = &ctx->send_queue[attr->queue_id];\n+\tif (unlikely(mlx5dr_send_engine_err(queue))) {\n+\t\trte_errno = EIO;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tmlx5dr_rule_create_init(rule, &ste_attr, &apply);\n+\n+\t/* Allocate dependent match WQE since rule might have dependent writes.\n+\t * The queued dependent WQE can be later aborted or kept as a dependency.\n+\t * dep_wqe buffers (ctrl, data) are also reused for all STE writes.\n+\t */\n+\tdep_wqe = mlx5dr_send_add_new_dep_wqe(queue);\n+\tmlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, attr->user_data);\n+\n+\tste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;\n+\tste_attr.wqe_data = &dep_wqe->wqe_data;\n+\tapply.wqe_ctrl = &dep_wqe->wqe_ctrl;\n+\tapply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;\n+\tapply.rule_action = rule_actions;\n+\tapply.queue = queue;\n+\n+\tsetter = &at->setters[at->num_of_action_stes];\n+\ttotal_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);\n+\taction_stes = total_stes - 1;\n+\n+\tif (action_stes) {\n+\t\t/* Allocate action STEs for complex rules */\n+\t\tret = mlx5dr_rule_alloc_action_ste(rule, attr);\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed to allocate action memory %d\", ret);\n+\t\t\tmlx5dr_send_abort_new_dep_wqe(queue);\n+\t\t\treturn ret;\n+\t\t}\n+\t\t/* Skip RX/TX based on the dep_wqe init */\n+\t\tste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;\n+\t\tste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;\n+\t\t/* Action STEs are written to a specific index last to first */\n+\t\tste_attr.direct_index = rule->action_ste_idx + action_stes;\n+\t\tapply.next_direct_idx = ste_attr.direct_index;\n+\t} else {\n+\t\tapply.next_direct_idx = 0;\n+\t}\n+\n+\tfor (i = total_stes; i-- > 0;) {\n+\t\tmlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);\n+\n+\t\tif (i == 0) {\n+\t\t\t/* Handle last match STE */\n+\t\t\tmlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,\n+\t\t\t\t\t\t  (uint8_t *)dep_wqe->wqe_data.action);\n+\n+\t\t\t/* Rule has dependent WQEs, match dep_wqe is queued */\n+\t\t\tif (action_stes || apply.require_dep)\n+\t\t\t\tbreak;\n+\n+\t\t\t/* Rule has no dependencies, abort dep_wqe and send WQE now */\n+\t\t\tmlx5dr_send_abort_new_dep_wqe(queue);\n+\t\t\tste_attr.wqe_tag_is_jumbo = is_jumbo;\n+\t\t\tste_attr.send_attr.notify_hw = !attr->burst;\n+\t\t\tste_attr.send_attr.user_data = dep_wqe->user_data;\n+\t\t\tste_attr.send_attr.rule = dep_wqe->rule;\n+\t\t\tste_attr.direct_index = 0;\n+\t\t\tste_attr.rtc_0 = dep_wqe->rtc_0;\n+\t\t\tste_attr.rtc_1 = dep_wqe->rtc_1;\n+\t\t\tste_attr.used_id_rtc_0 = &rule->rtc_0;\n+\t\t\tste_attr.used_id_rtc_1 = &rule->rtc_1;\n+\t\t\tste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;\n+\t\t\tste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;\n+\t\t} else {\n+\t\t\tapply.next_direct_idx = --ste_attr.direct_index;\n+\t\t}\n+\n+\t\tmlx5dr_send_ste(queue, &ste_attr);\n+\t}\n+\n+\t/* Backup TAG on the rule for deletion */\n+\tif (is_jumbo)\n+\t\tmemcpy(rule->tag.jumbo, dep_wqe->wqe_data.action, MLX5DR_JUMBO_TAG_SZ);\n+\telse\n+\t\tmemcpy(rule->tag.match, dep_wqe->wqe_data.tag, MLX5DR_MATCH_TAG_SZ);\n+\n+\tmlx5dr_send_engine_inc_rule(queue);\n+\n+\t/* Send dependent WQEs */\n+\tif (!attr->burst)\n+\t\tmlx5dr_send_all_dep_wqe(queue);\n+\n+\treturn 0;\n+}\n+\n+static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,\n+\t\t\t\t\t   struct mlx5dr_rule_attr *attr)\n+{\n+\tstruct mlx5dr_context *ctx = rule->matcher->tbl->ctx;\n+\tstruct mlx5dr_send_engine *queue;\n+\n+\tqueue = &ctx->send_queue[attr->queue_id];\n+\n+\tmlx5dr_rule_gen_comp(queue, rule, false,\n+\t\t\t     attr->user_data, MLX5DR_RULE_STATUS_DELETED);\n+\n+\t/* Rule failed now we can safely release action STEs */\n+\tmlx5dr_rule_free_action_ste_idx(rule);\n+\n+\t/* If a rule that was indicated as burst (need to trigger HW) has failed\n+\t * insertion we won't ring the HW as nothing is being written to the WQ.\n+\t * In such case update the last WQE and ring the HW with that work\n+\t */\n+\tif (attr->burst)\n+\t\treturn;\n+\n+\tmlx5dr_send_all_dep_wqe(queue);\n+\tmlx5dr_send_engine_flush_queue(queue);\n+}\n+\n+static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,\n+\t\t\t\t   struct mlx5dr_rule_attr *attr)\n+{\n+\tstruct mlx5dr_context *ctx = rule->matcher->tbl->ctx;\n+\tstruct mlx5dr_matcher *matcher = rule->matcher;\n+\tstruct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};\n+\tstruct mlx5dr_send_ste_attr ste_attr = {0};\n+\tstruct mlx5dr_send_engine *queue;\n+\n+\tqueue = &ctx->send_queue[attr->queue_id];\n+\n+\t/* Rule is not completed yet */\n+\tif (rule->status == MLX5DR_RULE_STATUS_CREATING) {\n+\t\trte_errno = EBUSY;\n+\t\treturn rte_errno;\n+\t}\n+\n+\t/* Rule failed and doesn't require cleanup */\n+\tif (rule->status == MLX5DR_RULE_STATUS_FAILED) {\n+\t\tmlx5dr_rule_destroy_failed_hws(rule, attr);\n+\t\treturn 0;\n+\t}\n+\n+\tif (unlikely(mlx5dr_send_engine_err(queue))) {\n+\t\tmlx5dr_rule_destroy_failed_hws(rule, attr);\n+\t\treturn 0;\n+\t}\n+\n+\tmlx5dr_send_engine_inc_rule(queue);\n+\n+\t/* Send dependent WQE */\n+\tif (!attr->burst)\n+\t\tmlx5dr_send_all_dep_wqe(queue);\n+\n+\trule->status = MLX5DR_RULE_STATUS_DELETING;\n+\n+\tste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;\n+\tste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;\n+\tste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;\n+\n+\tste_attr.send_attr.rule = rule;\n+\tste_attr.send_attr.notify_hw = !attr->burst;\n+\tste_attr.send_attr.user_data = attr->user_data;\n+\n+\tste_attr.rtc_0 = rule->rtc_0;\n+\tste_attr.rtc_1 = rule->rtc_1;\n+\tste_attr.used_id_rtc_0 = &rule->rtc_0;\n+\tste_attr.used_id_rtc_1 = &rule->rtc_1;\n+\tste_attr.wqe_ctrl = &wqe_ctrl;\n+\tste_attr.wqe_tag = &rule->tag;\n+\tste_attr.wqe_tag_is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt[0]->definer);\n+\tste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;\n+\n+\tmlx5dr_send_ste(queue, &ste_attr);\n+\n+\treturn 0;\n+}\n+\n+static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,\n+\t\t\t\t   struct mlx5dr_rule_attr *rule_attr,\n+\t\t\t\t   const struct rte_flow_item items[],\n+\t\t\t\t   uint8_t at_idx,\n+\t\t\t\t   struct mlx5dr_rule_action rule_actions[])\n+{\n+\tstruct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;\n+\tuint8_t num_actions = rule->matcher->at[at_idx]->num_actions;\n+\tstruct mlx5dr_context *ctx = rule->matcher->tbl->ctx;\n+\tstruct mlx5dv_flow_match_parameters *value;\n+\tstruct mlx5_flow_attr flow_attr = {0};\n+\tstruct mlx5dv_flow_action_attr *attr;\n+\tstruct rte_flow_error error;\n+\tuint8_t match_criteria;\n+\tint ret;\n+\n+\tattr = simple_calloc(num_actions, sizeof(*attr));\n+\tif (!attr) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tvalue = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +\n+\t\t\t      offsetof(struct mlx5dv_flow_match_parameters, match_buf));\n+\tif (!value) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto free_attr;\n+\t}\n+\n+\tflow_attr.tbl_type = rule->matcher->tbl->type;\n+\n+\tret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,\n+\t\t\t\t\t  MLX5_SET_MATCHER_HS_V, NULL,\n+\t\t\t\t\t  &match_criteria,\n+\t\t\t\t\t  &error);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to convert items to PRM [%s]\", error.message);\n+\t\tgoto free_value;\n+\t}\n+\n+\t/* Convert actions to verb action attr */\n+\tret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);\n+\tif (ret)\n+\t\tgoto free_value;\n+\n+\t/* Create verb flow */\n+\tvalue->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);\n+\trule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,\n+\t\t\t\t\t\t    value,\n+\t\t\t\t\t\t    num_actions,\n+\t\t\t\t\t\t    attr);\n+\n+\tmlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,\n+\t\t\t     rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);\n+\n+\tsimple_free(value);\n+\tsimple_free(attr);\n+\n+\treturn 0;\n+\n+free_value:\n+\tsimple_free(value);\n+free_attr:\n+\tsimple_free(attr);\n+\n+\treturn -rte_errno;\n+}\n+\n+static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,\n+\t\t\t\t    struct mlx5dr_rule_attr *attr)\n+{\n+\tstruct mlx5dr_context *ctx = rule->matcher->tbl->ctx;\n+\tint err = 0;\n+\n+\tif (rule->flow)\n+\t\terr = ibv_destroy_flow(rule->flow);\n+\n+\tmlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,\n+\t\t\t     attr->user_data, MLX5DR_RULE_STATUS_DELETED);\n+\n+\treturn 0;\n+}\n+\n+int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,\n+\t\t       uint8_t mt_idx,\n+\t\t       const struct rte_flow_item items[],\n+\t\t       uint8_t at_idx,\n+\t\t       struct mlx5dr_rule_action rule_actions[],\n+\t\t       struct mlx5dr_rule_attr *attr,\n+\t\t       struct mlx5dr_rule *rule_handle)\n+{\n+\tstruct mlx5dr_context *ctx;\n+\tint ret;\n+\n+\trule_handle->matcher = matcher;\n+\tctx = matcher->tbl->ctx;\n+\n+\tif (unlikely(!attr->user_data)) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Check if there is room in queue */\n+\tif (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {\n+\t\trte_errno = EBUSY;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tassert(matcher->num_of_mt >= mt_idx);\n+\tassert(matcher->num_of_at >= at_idx);\n+\n+\tif (unlikely(mlx5dr_table_is_root(matcher->tbl)))\n+\t\tret = mlx5dr_rule_create_root(rule_handle,\n+\t\t\t\t\t      attr,\n+\t\t\t\t\t      items,\n+\t\t\t\t\t      at_idx,\n+\t\t\t\t\t      rule_actions);\n+\telse\n+\t\tret = mlx5dr_rule_create_hws(rule_handle,\n+\t\t\t\t\t     attr,\n+\t\t\t\t\t     mt_idx,\n+\t\t\t\t\t     items,\n+\t\t\t\t\t     at_idx,\n+\t\t\t\t\t     rule_actions);\n+\treturn -ret;\n+}\n+\n+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,\n+\t\t\tstruct mlx5dr_rule_attr *attr)\n+{\n+\tstruct mlx5dr_context *ctx = rule->matcher->tbl->ctx;\n+\tint ret;\n+\n+\tif (unlikely(!attr->user_data)) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Check if there is room in queue */\n+\tif (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {\n+\t\trte_errno = EBUSY;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))\n+\t\tret = mlx5dr_rule_destroy_root(rule, attr);\n+\telse\n+\t\tret = mlx5dr_rule_destroy_hws(rule, attr);\n+\n+\treturn -ret;\n+}\n+\n+size_t mlx5dr_rule_get_handle_size(void)\n+{\n+\treturn sizeof(struct mlx5dr_rule);\n+}\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_rule.h b/drivers/net/mlx5/hws/mlx5dr_rule.h\nnew file mode 100644\nindex 0000000000..96c85674f2\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_rule.h\n@@ -0,0 +1,50 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_RULE_H_\n+#define MLX5DR_RULE_H_\n+\n+enum {\n+\tMLX5DR_STE_CTRL_SZ = 20,\n+\tMLX5DR_ACTIONS_SZ = 12,\n+\tMLX5DR_MATCH_TAG_SZ = 32,\n+\tMLX5DR_JUMBO_TAG_SZ = 44,\n+};\n+\n+enum mlx5dr_rule_status {\n+\tMLX5DR_RULE_STATUS_UNKNOWN,\n+\tMLX5DR_RULE_STATUS_CREATING,\n+\tMLX5DR_RULE_STATUS_CREATED,\n+\tMLX5DR_RULE_STATUS_DELETING,\n+\tMLX5DR_RULE_STATUS_DELETED,\n+\tMLX5DR_RULE_STATUS_FAILING,\n+\tMLX5DR_RULE_STATUS_FAILED,\n+};\n+\n+struct mlx5dr_rule_match_tag {\n+\tunion {\n+\t\tuint8_t jumbo[MLX5DR_JUMBO_TAG_SZ];\n+\t\tstruct {\n+\t\t\tuint8_t reserved[MLX5DR_ACTIONS_SZ];\n+\t\t\tuint8_t match[MLX5DR_MATCH_TAG_SZ];\n+\t\t};\n+\t};\n+};\n+\n+struct mlx5dr_rule {\n+\tstruct mlx5dr_matcher *matcher;\n+\tunion {\n+\t\tstruct mlx5dr_rule_match_tag tag;\n+\t\tstruct ibv_flow *flow;\n+\t};\n+\tuint32_t rtc_0; /* The RTC into which the STE was inserted */\n+\tuint32_t rtc_1; /* The RTC into which the STE was inserted */\n+\tint action_ste_idx; /* Action STE pool ID */\n+\tuint8_t status; /* enum mlx5dr_rule_status */\n+\tuint8_t pending_wqes;\n+};\n+\n+void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule);\n+\n+#endif /* MLX5DR_RULE_H_ */\n",
    "prefixes": [
        "v6",
        "15/18"
    ]
}